From a5eda3267ad2ba46a0c38058637b06e545c399eb Mon Sep 17 00:00:00 2001 From: Uzziah <120019273+uzziahlin@users.noreply.github.com> Date: Mon, 14 Aug 2023 14:25:26 +0800 Subject: [PATCH 1/9] fix: refactor UpdateBatch method (#5295) --- CHANGELOG.md | 1 + client/orm/db.go | 157 +++++++++++----- client/orm/db_test.go | 419 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 526 insertions(+), 51 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b46f2239..1747197a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - [refactor cache/redis: Use redisConfig to receive incoming JSON (previously using a map)](https://github.com/beego/beego/pull/5268) - [fix: refactor DeleteSQL method](https://github.com/beego/beego/pull/5271) - [fix: refactor UpdateSQL method](https://github.com/beego/beego/pull/5274) +- [fix: refactor UpdateBatch method](https://github.com/beego/beego/pull/5295) ## ORM refactoring - [introducing internal/models pkg](https://github.com/beego/beego/pull/5238) diff --git a/client/orm/db.go b/client/orm/db.go index 8bb4bec4..bded065f 100644 --- a/client/orm/db.go +++ b/client/orm/db.go @@ -819,58 +819,8 @@ func (d *dbBase) UpdateBatch(ctx context.Context, q dbQuerier, qs *querySet, mi join := tables.getJoinSQL() - var query, T string + query := d.UpdateBatchSQL(mi, columns, values, specifyIndexes, join, where) - Q := d.ins.TableQuote() - - if d.ins.SupportUpdateJoin() { - T = "T0." - } - - cols := make([]string, 0, len(columns)) - - for i, v := range columns { - col := fmt.Sprintf("%s%s%s%s", T, Q, v, Q) - if c, ok := values[i].(colValue); ok { - switch c.opt { - case ColAdd: - cols = append(cols, col+" = "+col+" + ?") - case ColMinus: - cols = append(cols, col+" = "+col+" - ?") - case ColMultiply: - cols = append(cols, col+" = "+col+" * ?") - case ColExcept: - cols = append(cols, col+" = "+col+" / ?") - case ColBitAnd: - cols = append(cols, col+" = "+col+" & ?") - case ColBitRShift: - cols = append(cols, col+" = "+col+" >> ?") - case ColBitLShift: - cols = append(cols, col+" = "+col+" << ?") - case ColBitXOR: - cols = append(cols, col+" = "+col+" ^ ?") - case ColBitOr: - cols = append(cols, col+" = "+col+" | ?") - } - values[i] = c.value - } else { - cols = append(cols, col+" = ?") - } - } - - sets := strings.Join(cols, ", ") + " " - - if d.ins.SupportUpdateJoin() { - query = fmt.Sprintf("UPDATE %s%s%s T0 %s%sSET %s%s", Q, mi.Table, Q, specifyIndexes, join, sets, where) - } else { - supQuery := fmt.Sprintf("SELECT T0.%s%s%s FROM %s%s%s T0 %s%s%s", - Q, mi.Fields.Pk.Column, Q, - Q, mi.Table, Q, - specifyIndexes, join, where) - query = fmt.Sprintf("UPDATE %s%s%s SET %sWHERE %s%s%s IN ( %s )", Q, mi.Table, Q, sets, Q, mi.Fields.Pk.Column, Q, supQuery) - } - - d.ins.ReplaceMarks(&query) res, err := q.ExecContext(ctx, query, values...) if err == nil { return res.RowsAffected() @@ -878,6 +828,111 @@ func (d *dbBase) UpdateBatch(ctx context.Context, q dbQuerier, qs *querySet, mi return 0, err } +func (d *dbBase) UpdateBatchSQL(mi *models.ModelInfo, cols []string, values []interface{}, specifyIndexes, join, where string) string { + quote := d.ins.TableQuote() + + buf := buffers.Get() + defer buffers.Put(buf) + + _, _ = buf.WriteString("UPDATE ") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(mi.Table) + _, _ = buf.WriteString(quote) + + if d.ins.SupportUpdateJoin() { + _, _ = buf.WriteString(" T0 ") + _, _ = buf.WriteString(specifyIndexes) + _, _ = buf.WriteString(join) + + d.buildSetSQL(buf, cols, values) + + _, _ = buf.WriteString(" ") + _, _ = buf.WriteString(where) + } else { + _, _ = buf.WriteString(" ") + + d.buildSetSQL(buf, cols, values) + + _, _ = buf.WriteString(" WHERE ") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(mi.Fields.Pk.Column) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(" IN ( ") + _, _ = buf.WriteString("SELECT T0.") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(mi.Fields.Pk.Column) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(mi.Table) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(" T0 ") + _, _ = buf.WriteString(specifyIndexes) + _, _ = buf.WriteString(join) + _, _ = buf.WriteString(where) + _, _ = buf.WriteString(" )") + } + + query := buf.String() + + d.ins.ReplaceMarks(&query) + + return query +} + +func (d *dbBase) buildSetSQL(buf buffers.Buffer, cols []string, values []interface{}) { + + var owner string + + quote := d.ins.TableQuote() + + if d.ins.SupportUpdateJoin() { + owner = "T0." + } + + _, _ = buf.WriteString("SET ") + + for i, v := range cols { + if i > 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(owner) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(v) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(" = ") + if c, ok := values[i].(colValue); ok { + _, _ = buf.WriteString(owner) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(v) + _, _ = buf.WriteString(quote) + switch c.opt { + case ColAdd: + _, _ = buf.WriteString(" + ?") + case ColMinus: + _, _ = buf.WriteString(" - ?") + case ColMultiply: + _, _ = buf.WriteString(" * ?") + case ColExcept: + _, _ = buf.WriteString(" / ?") + case ColBitAnd: + _, _ = buf.WriteString(" & ?") + case ColBitRShift: + _, _ = buf.WriteString(" >> ?") + case ColBitLShift: + _, _ = buf.WriteString(" << ?") + case ColBitXOR: + _, _ = buf.WriteString(" ^ ?") + case ColBitOr: + _, _ = buf.WriteString(" | ?") + } + values[i] = c.value + } else { + _, _ = buf.WriteString("?") + } + } +} + // delete related records. // do UpdateBanch or DeleteBanch by condition of tables' relationship. func (d *dbBase) deleteRels(ctx context.Context, q dbQuerier, mi *models.ModelInfo, args []interface{}, tz *time.Location) error { diff --git a/client/orm/db_test.go b/client/orm/db_test.go index 43fa3798..6a61f380 100644 --- a/client/orm/db_test.go +++ b/client/orm/db_test.go @@ -15,6 +15,7 @@ package orm import ( + "github.com/beego/beego/v2/client/orm/internal/buffers" "testing" "github.com/stretchr/testify/assert" @@ -229,3 +230,421 @@ func TestDbBase_DeleteSQL(t *testing.T) { }) } } + +func TestDbBase_buildSetSQL(t *testing.T) { + + testCases := []struct { + name string + + db *dbBase + + columns []string + values []interface{} + + wantRes string + wantValues []interface{} + }{ + { + name: "set add/mul operator by dbBase", + db: &dbBase{ + ins: &dbBase{}, + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColAdd, + value: 12, + }, + colValue{ + opt: ColMultiply, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET T0.`name` = ?, T0.`age` = T0.`age` + ?, T0.`score` = T0.`score` * ?", + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set min/except operator by dbBase", + db: &dbBase{ + ins: &dbBase{}, + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColMinus, + value: 12, + }, + colValue{ + opt: ColExcept, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET T0.`name` = ?, T0.`age` = T0.`age` - ?, T0.`score` = T0.`score` / ?", + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set bitRShift/bitLShift operator by dbBase", + db: &dbBase{ + ins: &dbBase{}, + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColBitRShift, + value: 12, + }, + colValue{ + opt: ColBitLShift, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET T0.`name` = ?, T0.`age` = T0.`age` >> ?, T0.`score` = T0.`score` << ?", + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set bitAnd/bitOr/bitXOR operator by dbBase", + db: &dbBase{ + ins: &dbBase{}, + }, + columns: []string{"count", "age", "score"}, + values: []interface{}{ + colValue{ + opt: ColBitAnd, + value: 28, + }, + colValue{ + opt: ColBitOr, + value: 12, + }, + colValue{ + opt: ColBitXOR, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET T0.`count` = T0.`count` & ?, T0.`age` = T0.`age` | ?, T0.`score` = T0.`score` ^ ?", + wantValues: []interface{}{int64(28), int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set add/mul operator by dbBasePostgres", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColAdd, + value: 12, + }, + colValue{ + opt: ColMultiply, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: `SET "name" = ?, "age" = "age" + ?, "score" = "score" * ?`, + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set min/except operator by dbBasePostgres", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColMinus, + value: 12, + }, + colValue{ + opt: ColExcept, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: `SET "name" = ?, "age" = "age" - ?, "score" = "score" / ?`, + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set bitRShift/bitLShift operator by dbBasePostgres", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColBitRShift, + value: 12, + }, + colValue{ + opt: ColBitLShift, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: `SET "name" = ?, "age" = "age" >> ?, "score" = "score" << ?`, + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set bitAnd/bitOr/bitXOR operator by dbBasePostgres", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + columns: []string{"count", "age", "score"}, + values: []interface{}{ + colValue{ + opt: ColBitAnd, + value: 28, + }, + colValue{ + opt: ColBitOr, + value: 12, + }, + colValue{ + opt: ColBitXOR, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: `SET "count" = "count" & ?, "age" = "age" | ?, "score" = "score" ^ ?`, + wantValues: []interface{}{int64(28), int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set add/mul operator by dbBaseSqlite", + db: &dbBase{ + ins: newdbBaseSqlite(), + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColAdd, + value: 12, + }, + colValue{ + opt: ColMultiply, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET `name` = ?, `age` = `age` + ?, `score` = `score` * ?", + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set min/except operator by dbBaseSqlite", + db: &dbBase{ + ins: newdbBaseSqlite(), + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColMinus, + value: 12, + }, + colValue{ + opt: ColExcept, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET `name` = ?, `age` = `age` - ?, `score` = `score` / ?", + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set bitRShift/bitLShift operator by dbBaseSqlite", + db: &dbBase{ + ins: newdbBaseSqlite(), + }, + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColBitRShift, + value: 12, + }, + colValue{ + opt: ColBitLShift, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET `name` = ?, `age` = `age` >> ?, `score` = `score` << ?", + wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, + }, + { + name: "set bitAnd/bitOr/bitXOR operator by dbBaseSqlite", + db: &dbBase{ + ins: newdbBaseSqlite(), + }, + columns: []string{"count", "age", "score"}, + values: []interface{}{ + colValue{ + opt: ColBitAnd, + value: 28, + }, + colValue{ + opt: ColBitOr, + value: 12, + }, + colValue{ + opt: ColBitXOR, + value: 2, + }, + "test_origin_name", + 18, + }, + wantRes: "SET `count` = `count` & ?, `age` = `age` | ?, `score` = `score` ^ ?", + wantValues: []interface{}{int64(28), int64(12), int64(2), "test_origin_name", 18}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + buf := buffers.Get() + defer buffers.Put(buf) + + tc.db.buildSetSQL(buf, tc.columns, tc.values) + + assert.Equal(t, tc.wantRes, buf.String()) + assert.Equal(t, tc.wantValues, tc.values) + }) + } +} + +func TestDbBase_UpdateBatchSQL(t *testing.T) { + mi := &models.ModelInfo{ + Table: "test_tab", + Fields: &models.Fields{ + Pk: &models.FieldInfo{ + Column: "test_id", + }, + }, + } + + testCases := []struct { + name string + db *dbBase + + columns []string + values []interface{} + + specifyIndexes string + join string + where string + + wantRes string + }{ + { + name: "update batch by dbBase", + db: &dbBase{ + ins: &dbBase{}, + }, + + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColAdd, + value: 12, + }, + colValue{ + opt: ColMultiply, + value: 2, + }, + "test_origin_name", + 18, + }, + + specifyIndexes: " USE INDEX(`name`) ", + join: "LEFT OUTER JOIN `test_tab_2` T1 ON T1.`id` = T0.`test_id` ", + where: "WHERE T0.`name` = ? AND T1.`age` = ?", + + wantRes: "UPDATE `test_tab` T0 USE INDEX(`name`) LEFT OUTER JOIN `test_tab_2` T1 ON T1.`id` = T0.`test_id` SET T0.`name` = ?, T0.`age` = T0.`age` + ?, T0.`score` = T0.`score` * ? WHERE T0.`name` = ? AND T1.`age` = ?", + }, + { + name: "update batch by dbBasePostgres", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColAdd, + value: 12, + }, + colValue{ + opt: ColMultiply, + value: 2, + }, + "test_origin_name", + 18, + }, + + specifyIndexes: ` USE INDEX("name") `, + join: `LEFT OUTER JOIN "test_tab_2" T1 ON T1."id" = T0."test_id" `, + where: `WHERE T0."name" = ? AND T1."age" = ?`, + + wantRes: `UPDATE "test_tab" SET "name" = $1, "age" = "age" + $2, "score" = "score" * $3 WHERE "test_id" IN ( SELECT T0."test_id" FROM "test_tab" T0 USE INDEX("name") LEFT OUTER JOIN "test_tab_2" T1 ON T1."id" = T0."test_id" WHERE T0."name" = $4 AND T1."age" = $5 )`, + }, + { + name: "update batch by dbBaseSqlite", + db: &dbBase{ + ins: newdbBaseSqlite(), + }, + + columns: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + colValue{ + opt: ColAdd, + value: 12, + }, + colValue{ + opt: ColMultiply, + value: 2, + }, + "test_origin_name", + 18, + }, + + specifyIndexes: " USE INDEX(`name`) ", + join: "LEFT OUTER JOIN `test_tab_2` T1 ON T1.`id` = T0.`test_id` ", + where: "WHERE T0.`name` = ? AND T1.`age` = ?", + + wantRes: "UPDATE `test_tab` SET `name` = ?, `age` = `age` + ?, `score` = `score` * ? WHERE `test_id` IN ( SELECT T0.`test_id` FROM `test_tab` T0 USE INDEX(`name`) LEFT OUTER JOIN `test_tab_2` T1 ON T1.`id` = T0.`test_id` WHERE T0.`name` = ? AND T1.`age` = ? )", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + res := tc.db.UpdateBatchSQL(mi, tc.columns, tc.values, tc.specifyIndexes, tc.join, tc.where) + + assert.Equal(t, tc.wantRes, res) + }) + } +} From 46a00d3592e925327e00639c7213cf5801ffc2d0 Mon Sep 17 00:00:00 2001 From: Uzziah <120019273+uzziahlin@users.noreply.github.com> Date: Fri, 18 Aug 2023 20:47:24 +0800 Subject: [PATCH 2/9] fix: refactor InsertOrUpdate method in dbBase (#5296) * fix: refactor InsertOrUpdate method in dbBase and add the test * fix: add the change record to the CHANGELOG.md --- CHANGELOG.md | 1 + client/orm/db.go | 181 ++++++++++++++++++++------------ client/orm/db_test.go | 238 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 355 insertions(+), 65 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1747197a..964545a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - [fix: refactor DeleteSQL method](https://github.com/beego/beego/pull/5271) - [fix: refactor UpdateSQL method](https://github.com/beego/beego/pull/5274) - [fix: refactor UpdateBatch method](https://github.com/beego/beego/pull/5295) +- [fix: refactor InsertOrUpdate method](https://github.com/beego/beego/pull/5296) ## ORM refactoring - [introducing internal/models pkg](https://github.com/beego/beego/pull/5238) diff --git a/client/orm/db.go b/client/orm/db.go index bded065f..15db2f77 100644 --- a/client/orm/db.go +++ b/client/orm/db.go @@ -528,80 +528,20 @@ func (d *dbBase) InsertValueSQL(names []string, values []interface{}, isMulti bo // If your primary key or unique column conflict will update // If no will insert func (d *dbBase) InsertOrUpdate(ctx context.Context, q dbQuerier, mi *models.ModelInfo, ind reflect.Value, a *alias, args ...string) (int64, error) { - args0 := "" - iouStr := "" - argsMap := map[string]string{} - switch a.Driver { - case DRMySQL: - iouStr = "ON DUPLICATE KEY UPDATE" - case DRPostgres: - if len(args) == 0 { - return 0, fmt.Errorf("`%s` use InsertOrUpdate must have a conflict column", a.DriverName) - } - args0 = strings.ToLower(args[0]) - iouStr = fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET", args0) - default: - return 0, fmt.Errorf("`%s` nonsupport InsertOrUpdate in beego", a.DriverName) - } - - // Get on the key-value pairs - for _, v := range args { - kv := strings.Split(v, "=") - if len(kv) == 2 { - argsMap[strings.ToLower(kv[0])] = kv[1] - } - } names := make([]string, 0, len(mi.Fields.DBcols)-1) - Q := d.ins.TableQuote() + values, _, err := d.collectValues(mi, ind, mi.Fields.DBcols, true, true, &names, a.TZ) if err != nil { return 0, err } - marks := make([]string, len(names)) - updateValues := make([]interface{}, 0) - updates := make([]string, len(names)) - var conflitValue interface{} - for i, v := range names { - // identifier in database may not be case-sensitive, so quote it - v = fmt.Sprintf("%s%s%s", Q, v, Q) - marks[i] = "?" - valueStr := argsMap[strings.ToLower(v)] - if v == args0 { - conflitValue = values[i] - } - if valueStr != "" { - switch a.Driver { - case DRMySQL: - updates[i] = v + "=" + valueStr - case DRPostgres: - if conflitValue != nil { - // postgres ON CONFLICT DO UPDATE SET can`t use colu=colu+values - updates[i] = fmt.Sprintf("%s=(select %s from %s where %s = ? )", v, valueStr, mi.Table, args0) - updateValues = append(updateValues, conflitValue) - } else { - return 0, fmt.Errorf("`%s` must be in front of `%s` in your struct", args0, v) - } - } - } else { - updates[i] = v + "=?" - updateValues = append(updateValues, values[i]) - } + query, err := d.InsertOrUpdateSQL(names, &values, mi, a, args...) + + if err != nil { + return 0, err } - values = append(values, updateValues...) - - sep := fmt.Sprintf("%s, %s", Q, Q) - qmarks := strings.Join(marks, ", ") - qupdates := strings.Join(updates, ", ") - columns := strings.Join(names, sep) - - // conflitValue maybe is a int,can`t use fmt.Sprintf - query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s) %s "+qupdates, Q, mi.Table, Q, Q, columns, Q, qmarks, iouStr) - - d.ins.ReplaceMarks(&query) - if !d.ins.HasReturningID(mi, &query) { res, err := q.ExecContext(ctx, query, values...) if err == nil { @@ -625,6 +565,117 @@ func (d *dbBase) InsertOrUpdate(ctx context.Context, q dbQuerier, mi *models.Mod return id, err } +func (d *dbBase) InsertOrUpdateSQL(names []string, values *[]interface{}, mi *models.ModelInfo, a *alias, args ...string) (string, error) { + + args0 := "" + + switch a.Driver { + case DRMySQL: + case DRPostgres: + if len(args) == 0 { + return "", fmt.Errorf("`%s` use InsertOrUpdate must have a conflict column", a.DriverName) + } + args0 = strings.ToLower(args[0]) + default: + return "", fmt.Errorf("`%s` nonsupport InsertOrUpdate in beego", a.DriverName) + } + + argsMap := map[string]string{} + // Get on the key-value pairs + for _, v := range args { + kv := strings.Split(v, "=") + if len(kv) == 2 { + argsMap[strings.ToLower(kv[0])] = kv[1] + } + } + + quote := d.ins.TableQuote() + + buf := buffers.Get() + defer buffers.Put(buf) + + _, _ = buf.WriteString("INSERT INTO ") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(mi.Table) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(" (") + + for i, name := range names { + if i > 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(name) + _, _ = buf.WriteString(quote) + } + + _, _ = buf.WriteString(") VALUES (") + + for i := 0; i < len(names); i++ { + if i > 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString("?") + } + + _, _ = buf.WriteString(") ") + + switch a.Driver { + case DRMySQL: + _, _ = buf.WriteString("ON DUPLICATE KEY UPDATE ") + case DRPostgres: + _, _ = buf.WriteString("ON CONFLICT (") + _, _ = buf.WriteString(args0) + _, _ = buf.WriteString(") DO UPDATE SET ") + } + + var conflitValue interface{} + for i, v := range names { + if i > 0 { + _, _ = buf.WriteString(", ") + } + // identifier in database may not be case-sensitive, so quote it + v = fmt.Sprintf("%s%s%s", quote, v, quote) + valueStr := argsMap[strings.ToLower(v)] + if v == args0 { + conflitValue = (*values)[i] + } + if valueStr != "" { + switch a.Driver { + case DRMySQL: + _, _ = buf.WriteString(v) + _, _ = buf.WriteString("=") + _, _ = buf.WriteString(valueStr) + case DRPostgres: + if conflitValue != nil { + // postgres ON CONFLICT DO UPDATE SET can`t use colu=colu+values + _, _ = buf.WriteString(v) + _, _ = buf.WriteString("=(select ") + _, _ = buf.WriteString(valueStr) + _, _ = buf.WriteString(" from ") + _, _ = buf.WriteString(mi.Table) + _, _ = buf.WriteString(" where ") + _, _ = buf.WriteString(args0) + _, _ = buf.WriteString(" = ? )") + *values = append(*values, conflitValue) + } else { + return "", fmt.Errorf("`%s` must be in front of `%s` in your struct", args0, v) + } + } + } else { + _, _ = buf.WriteString(v) + _, _ = buf.WriteString("=?") + *values = append(*values, (*values)[i]) + } + } + + query := buf.String() + + d.ins.ReplaceMarks(&query) + + return query, nil +} + // Update execute update sql dbQuerier with given struct reflect.Value. func (d *dbBase) Update(ctx context.Context, q dbQuerier, mi *models.ModelInfo, ind reflect.Value, tz *time.Location, cols []string) (int64, error) { pkName, pkValue, ok := getExistPk(mi, ind) diff --git a/client/orm/db_test.go b/client/orm/db_test.go index 6a61f380..cc79b108 100644 --- a/client/orm/db_test.go +++ b/client/orm/db_test.go @@ -15,6 +15,7 @@ package orm import ( + "errors" "github.com/beego/beego/v2/client/orm/internal/buffers" "testing" @@ -648,3 +649,240 @@ func TestDbBase_UpdateBatchSQL(t *testing.T) { }) } } + +func TestDbBase_InsertOrUpdateSQL(t *testing.T) { + + mi := &models.ModelInfo{ + Table: "test_tab", + } + + testCases := []struct { + name string + db *dbBase + + names []string + values []interface{} + a *alias + args []string + + wantRes string + wantErr error + wantValues []interface{} + }{ + { + name: "test nonsupport driver", + db: &dbBase{ + ins: newdbBaseSqlite(), + }, + + names: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + 18, + 12, + }, + a: &alias{ + Driver: DRSqlite, + DriverName: "sqlite3", + }, + args: []string{ + "`age`=20", + "`score`=`score`+1", + }, + + wantErr: errors.New("`sqlite3` nonsupport InsertOrUpdate in beego"), + wantValues: []interface{}{ + "test_name", + 18, + 12, + }, + }, + { + name: "insert or update with MySQL", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + + names: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + 18, + 12, + }, + a: &alias{ + Driver: DRMySQL, + DriverName: "mysql", + }, + args: []string{ + "`age`=20", + "`score`=`score`+1", + }, + + wantRes: "INSERT INTO `test_tab` (`name`, `age`, `score`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `name`=?, `age`=20, `score`=`score`+1", + wantValues: []interface{}{ + "test_name", + 18, + 12, + "test_name", + }, + }, + { + name: "insert or update with MySQL with no args", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + + names: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + 18, + 12, + }, + a: &alias{ + Driver: DRMySQL, + DriverName: "mysql", + }, + + wantRes: "INSERT INTO `test_tab` (`name`, `age`, `score`) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE `name`=?, `age`=?, `score`=?", + wantValues: []interface{}{ + "test_name", + 18, + 12, + "test_name", + 18, + 12, + }, + }, + { + name: "insert or update with PostgreSQL normal", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + + names: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + 18, + 12, + }, + a: &alias{ + Driver: DRPostgres, + DriverName: "postgres", + }, + args: []string{ + `"name"`, + `"score"="score_1"`, + }, + + wantRes: `INSERT INTO "test_tab" ("name", "age", "score") VALUES ($1, $2, $3) ON CONFLICT ("name") DO UPDATE SET "name"=$4, "age"=$5, "score"=(select "score_1" from test_tab where "name" = $6 )`, + wantValues: []interface{}{ + "test_name", + 18, + 12, + "test_name", + 18, + "test_name", + }, + }, + { + name: "insert or update with PostgreSQL without conflict column", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + + names: []string{"name", "age", "score"}, + values: []interface{}{ + "test_name", + 18, + 12, + }, + a: &alias{ + Driver: DRPostgres, + DriverName: "postgres", + }, + + wantErr: errors.New("`postgres` use InsertOrUpdate must have a conflict column"), + wantValues: []interface{}{ + "test_name", + 18, + 12, + }, + }, + { + name: "insert or update with PostgreSQL the conflict column is not in front of the specified column", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + + names: []string{"score", "name", "age"}, + values: []interface{}{ + 12, + "test_name", + 18, + }, + a: &alias{ + Driver: DRPostgres, + DriverName: "postgres", + }, + args: []string{ + `"name"`, + `"score"="score_1"`, + }, + + wantErr: errors.New("`\"name\"` must be in front of `\"score\"` in your struct"), + wantValues: []interface{}{ + 12, + "test_name", + 18, + }, + }, + { + name: "insert or update with PostgreSQL the conflict column is in front of the specified column", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + + names: []string{"age", "name", "score"}, + values: []interface{}{ + 18, + "test_name", + 12, + }, + a: &alias{ + Driver: DRPostgres, + DriverName: "postgres", + }, + args: []string{ + `"name"`, + `"score"="score_1"`, + }, + + wantRes: `INSERT INTO "test_tab" ("age", "name", "score") VALUES ($1, $2, $3) ON CONFLICT ("name") DO UPDATE SET "age"=$4, "name"=$5, "score"=(select "score_1" from test_tab where "name" = $6 )`, + wantValues: []interface{}{ + 18, + "test_name", + 12, + 18, + "test_name", + "test_name", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + res, err := tc.db.InsertOrUpdateSQL(tc.names, &tc.values, mi, tc.a, tc.args...) + + assert.Equal(t, tc.wantValues, tc.values) + + assert.Equal(t, tc.wantErr, err) + if err != nil { + return + } + + assert.Equal(t, tc.wantRes, res) + }) + } + +} From e9d3357643d3df214fadb5062abe53bf0b8e3153 Mon Sep 17 00:00:00 2001 From: Uzziah <120019273+uzziahlin@users.noreply.github.com> Date: Wed, 23 Aug 2023 18:35:17 +0800 Subject: [PATCH 3/9] fix: refactor ReadBatch method (#5298) --- CHANGELOG.md | 1 + client/orm/db.go | 108 +++++++++----- client/orm/db_test.go | 324 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 400 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 964545a6..0d83cd95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ - [fix: refactor UpdateSQL method](https://github.com/beego/beego/pull/5274) - [fix: refactor UpdateBatch method](https://github.com/beego/beego/pull/5295) - [fix: refactor InsertOrUpdate method](https://github.com/beego/beego/pull/5296) +- [fix: refactor ReadBatch method](https://github.com/beego/beego/pull/5298) ## ORM refactoring - [introducing internal/models pkg](https://github.com/beego/beego/pull/5238) diff --git a/client/orm/db.go b/client/orm/db.go index 15db2f77..87e4a96c 100644 --- a/client/orm/db.go +++ b/client/orm/db.go @@ -1127,11 +1127,6 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m RegisterModel(container) } - rlimit := qs.limit - offset := qs.offset - - Q := d.ins.TableQuote() - var tCols []string if len(cols) > 0 { hasRel := len(qs.related) > 0 || qs.relDepth > 0 @@ -1163,44 +1158,18 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m tCols = mi.Fields.DBcols } - colsNum := len(tCols) - sep := fmt.Sprintf("%s, T0.%s", Q, Q) - sels := fmt.Sprintf("T0.%s%s%s", Q, strings.Join(tCols, sep), Q) - tables := newDbTables(mi, d.ins) tables.parseRelated(qs.related, qs.relDepth) - where, args := tables.getCondSQL(cond, false, tz) - groupBy := tables.getGroupSQL(qs.groups) - orderBy := tables.getOrderSQL(qs.orders) - limit := tables.getLimitSQL(mi, offset, rlimit) - join := tables.getJoinSQL() - specifyIndexes := tables.getIndexSql(mi.Table, qs.useIndex, qs.indexes) + colsNum := len(tCols) for _, tbl := range tables.tables { if tbl.sel { colsNum += len(tbl.mi.Fields.DBcols) - sep := fmt.Sprintf("%s, %s.%s", Q, tbl.index, Q) - sels += fmt.Sprintf(", %s.%s%s%s", tbl.index, Q, strings.Join(tbl.mi.Fields.DBcols, sep), Q) } } - sqlSelect := "SELECT" - if qs.distinct { - sqlSelect += " DISTINCT" - } - if qs.aggregate != "" { - sels = qs.aggregate - } - query := fmt.Sprintf("%s %s FROM %s%s%s T0 %s%s%s%s%s%s", - sqlSelect, sels, Q, mi.Table, Q, - specifyIndexes, join, where, groupBy, orderBy, limit) - - if qs.forUpdate { - query += " FOR UPDATE" - } - - d.ins.ReplaceMarks(&query) + query, args := d.readBatchSQL(tables, tCols, cond, qs, mi, tz) rs, err := q.QueryContext(ctx, query, args...) if err != nil { @@ -1322,6 +1291,79 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m return cnt, nil } +func (d *dbBase) readBatchSQL(tables *dbTables, tCols []string, cond *Condition, qs *querySet, mi *models.ModelInfo, tz *time.Location) (string, []interface{}) { + + quote := d.ins.TableQuote() + + where, args := tables.getCondSQL(cond, false, tz) + groupBy := tables.getGroupSQL(qs.groups) + orderBy := tables.getOrderSQL(qs.orders) + limit := tables.getLimitSQL(mi, qs.offset, qs.limit) + join := tables.getJoinSQL() + specifyIndexes := tables.getIndexSql(mi.Table, qs.useIndex, qs.indexes) + + buf := buffers.Get() + defer buffers.Put(buf) + + _, _ = buf.WriteString("SELECT ") + + if qs.distinct { + _, _ = buf.WriteString("DISTINCT ") + } + + if qs.aggregate == "" { + for i, tCol := range tCols { + if i > 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString("T0.") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(tCol) + _, _ = buf.WriteString(quote) + } + + for _, tbl := range tables.tables { + if tbl.sel { + _, _ = buf.WriteString(", ") + for i, DBcol := range tbl.mi.Fields.DBcols { + if i > 0 { + _, _ = buf.WriteString(", ") + } + _, _ = buf.WriteString(tbl.index) + _, _ = buf.WriteString(".") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(DBcol) + _, _ = buf.WriteString(quote) + } + } + } + } else { + _, _ = buf.WriteString(qs.aggregate) + } + + _, _ = buf.WriteString(" FROM ") + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(mi.Table) + _, _ = buf.WriteString(quote) + _, _ = buf.WriteString(" T0 ") + _, _ = buf.WriteString(specifyIndexes) + _, _ = buf.WriteString(join) + _, _ = buf.WriteString(where) + _, _ = buf.WriteString(groupBy) + _, _ = buf.WriteString(orderBy) + _, _ = buf.WriteString(limit) + + if qs.forUpdate { + _, _ = buf.WriteString(" FOR UPDATE") + } + + query := buf.String() + + d.ins.ReplaceMarks(&query) + + return query, args +} + // Count excute count sql and return count result int64. func (d *dbBase) Count(ctx context.Context, q dbQuerier, qs *querySet, mi *models.ModelInfo, cond *Condition, tz *time.Location) (cnt int64, err error) { tables := newDbTables(mi, d.ins) diff --git a/client/orm/db_test.go b/client/orm/db_test.go index cc79b108..f7553b08 100644 --- a/client/orm/db_test.go +++ b/client/orm/db_test.go @@ -16,8 +16,10 @@ package orm import ( "errors" + "github.com/beego/beego/v2/client/orm/clauses/order_clause" "github.com/beego/beego/v2/client/orm/internal/buffers" "testing" + "time" "github.com/stretchr/testify/assert" @@ -886,3 +888,325 @@ func TestDbBase_InsertOrUpdateSQL(t *testing.T) { } } + +func TestDbBase_readBatchSQL(t *testing.T) { + + tCols := []string{"name", "score"} + + mc := &modelCache{ + cache: make(map[string]*models.ModelInfo), + cacheByFullName: make(map[string]*models.ModelInfo), + } + + err := mc.register("", false, new(testTab), new(testTab1), new(testTab2)) + + assert.Nil(t, err) + + mc.bootstrap() + + mi, ok := mc.getByMd(new(testTab)) + + assert.True(t, ok) + + cond := NewCondition().And("name", "test_name"). + OrCond(NewCondition().And("age__gt", 18).And("score__lt", 60)) + + tz := time.Local + + testCases := []struct { + name string + db *dbBase + + qs *querySet + + wantRes string + wantArgs []interface{} + }{ + { + name: "read batch with MySQL", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: "SELECT T0.`name`, T0.`score`, T1.`id`, T1.`name_1`, T1.`age_1`, T1.`score_1`, T1.`test_tab_2_id`, T2.`id`, T2.`name_2`, T2.`age_2`, T2.`score_2` FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with MySQL and distinct", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + distinct: true, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: "SELECT DISTINCT T0.`name`, T0.`score`, T1.`id`, T1.`name_1`, T1.`age_1`, T1.`score_1`, T1.`test_tab_2_id`, T2.`id`, T2.`name_2`, T2.`age_2`, T2.`score_2` FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with MySQL and aggregate", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + aggregate: "sum(`T0`.`score`), count(`T1`.`name_1`)", + related: make([]string, 0), + relDepth: 2, + }, + wantRes: "SELECT sum(`T0`.`score`), count(`T1`.`name_1`) FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with MySQL and distinct and aggregate", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + distinct: true, + aggregate: "sum(`T0`.`score`), count(`T1`.`name_1`)", + related: make([]string, 0), + relDepth: 2, + }, + wantRes: "SELECT DISTINCT sum(`T0`.`score`), count(`T1`.`name_1`) FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with MySQL and for update", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + forUpdate: true, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: "SELECT T0.`name`, T0.`score`, T1.`id`, T1.`name_1`, T1.`age_1`, T1.`score_1`, T1.`test_tab_2_id`, T2.`id`, T2.`name_2`, T2.`age_2`, T2.`score_2` FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100 FOR UPDATE", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with PostgreSQL", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: `SELECT T0."name", T0."score", T1."id", T1."name_1", T1."age_1", T1."score_1", T1."test_tab_2_id", T2."id", T2."name_2", T2."age_2", T2."score_2" FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with PostgreSQL and distinct", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + distinct: true, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: `SELECT DISTINCT T0."name", T0."score", T1."id", T1."name_1", T1."age_1", T1."score_1", T1."test_tab_2_id", T2."id", T2."name_2", T2."age_2", T2."score_2" FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with PostgreSQL and aggregate", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + aggregate: `sum("T0"."score"), count("T1"."name_1")`, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: `SELECT sum("T0"."score"), count("T1"."name_1") FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with PostgreSQL and distinct and aggregate", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + distinct: true, + aggregate: `sum("T0"."score"), count("T1"."name_1")`, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: `SELECT DISTINCT sum("T0"."score"), count("T1"."name_1") FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read batch with PostgreSQL and for update", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + forUpdate: true, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: `SELECT T0."name", T0."score", T1."id", T1."name_1", T1."age_1", T1."score_1", T1."test_tab_2_id", T2."id", T2."name_2", T2."age_2", T2."score_2" FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100 FOR UPDATE`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tables := newDbTables(mi, tc.db.ins) + tables.parseRelated(tc.qs.related, tc.qs.relDepth) + + res, args := tc.db.readBatchSQL(tables, tCols, cond, tc.qs, mi, tz) + + assert.Equal(t, tc.wantRes, res) + assert.Equal(t, tc.wantArgs, args) + }) + } + +} + +type testTab struct { + ID int64 `orm:"auto;pk;column(id)"` + Name string `orm:"column(name)"` + Age int64 `orm:"column(age)"` + Score int64 `orm:"column(score)"` + TestTab1 *testTab1 `orm:"rel(fk);column(test_tab_1_id)"` +} + +type testTab1 struct { + ID int64 `orm:"auto;pk;column(id)"` + Name1 string `orm:"column(name_1)"` + Age1 int64 `orm:"column(age_1)"` + Score1 int64 `orm:"column(score_1)"` + TestTab2 *testTab2 `orm:"rel(fk);column(test_tab_2_id)"` +} + +type testTab2 struct { + ID int64 `orm:"auto;pk;column(id)"` + Name2 int64 `orm:"column(name_2)"` + Age2 int64 `orm:"column(age_2)"` + Score2 int64 `orm:"column(score_2)"` +} From 2d0da431cbd8ca87a8c0fd8c25a9e423d0224da0 Mon Sep 17 00:00:00 2001 From: Ming Deng Date: Mon, 28 Aug 2023 21:10:31 +0800 Subject: [PATCH 4/9] refactor: move the modelCache to internal/models package (#5306) --- CHANGELOG.md | 1 + client/orm/cmd.go | 14 +- client/orm/cmd_utils.go | 2 +- client/orm/db.go | 22 +- client/orm/db_alias.go | 12 +- client/orm/db_mysql.go | 4 +- client/orm/db_oracle.go | 6 +- client/orm/db_postgres.go | 4 +- client/orm/db_sqlite.go | 10 +- client/orm/db_test.go | 38 ++- client/orm/db_tidb.go | 4 +- client/orm/db_utils.go | 8 +- client/orm/ddl.go | 195 +++++++++++++ client/orm/{model_test.go => ddl_test.go} | 10 +- client/orm/filter_orm_decorator.go | 22 +- client/orm/{ => internal/models}/models.go | 305 ++++----------------- client/orm/internal/models/models_test.go | 49 ++++ client/orm/invocation.go | 2 +- client/orm/model_utils_test.go | 49 ---- client/orm/models_boot.go | 36 ++- client/orm/models_fields.go | 8 +- client/orm/models_test.go | 12 +- client/orm/orm.go | 30 +- client/orm/orm_log.go | 2 +- client/orm/orm_querym2m.go | 4 +- client/orm/orm_queryset.go | 22 +- client/orm/orm_raw.go | 22 +- client/orm/orm_test.go | 20 +- client/orm/qb_mysql.go | 4 +- client/orm/qb_postgres.go | 4 +- client/orm/types.go | 46 ++-- 31 files changed, 498 insertions(+), 469 deletions(-) create mode 100644 client/orm/ddl.go rename client/orm/{model_test.go => ddl_test.go} (96%) rename client/orm/{ => internal/models}/models.go (51%) create mode 100644 client/orm/internal/models/models_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d83cd95..e5c3433d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ # developing +- [orm: move the modelCache to internal/models package](https://github.com/beego/beego/pull/5306) # v2.1.1 - [httplib: fix unstable unit test which use the httplib.org](https://github.com/beego/beego/pull/5232) diff --git a/client/orm/cmd.go b/client/orm/cmd.go index cd6fd5cc..a9edcb8d 100644 --- a/client/orm/cmd.go +++ b/client/orm/cmd.go @@ -104,7 +104,7 @@ func (d *commandSyncDb) Run() error { var drops []string var err error if d.force { - drops, err = defaultModelCache.getDbDropSQL(d.al) + drops, err = getDbDropSQL(defaultModelCache, d.al) if err != nil { return err } @@ -113,7 +113,7 @@ func (d *commandSyncDb) Run() error { db := d.al.DB if d.force && len(drops) > 0 { - for i, mi := range defaultModelCache.allOrdered() { + for i, mi := range defaultModelCache.AllOrdered() { query := drops[i] if !d.noInfo { fmt.Printf("drop table `%s`\n", mi.Table) @@ -131,7 +131,7 @@ func (d *commandSyncDb) Run() error { } } - createQueries, indexes, err := defaultModelCache.getDbCreateSQL(d.al) + createQueries, indexes, err := getDbCreateSQL(defaultModelCache, d.al) if err != nil { return err } @@ -145,7 +145,7 @@ func (d *commandSyncDb) Run() error { } ctx := context.Background() - for i, mi := range defaultModelCache.allOrdered() { + for i, mi := range defaultModelCache.AllOrdered() { if !models.IsApplicableTableForDB(mi.AddrField, d.al.Name) { fmt.Printf("table `%s` is not applicable to database '%s'\n", mi.Table, d.al.Name) @@ -262,12 +262,12 @@ func (d *commandSQLAll) Parse(args []string) { // Run orm line command. func (d *commandSQLAll) Run() error { - createQueries, indexes, err := defaultModelCache.getDbCreateSQL(d.al) + createQueries, indexes, err := getDbCreateSQL(defaultModelCache, d.al) if err != nil { return err } var all []string - for i, mi := range defaultModelCache.allOrdered() { + for i, mi := range defaultModelCache.AllOrdered() { queries := []string{createQueries[i]} for _, idx := range indexes[mi.Table] { queries = append(queries, idx.SQL) @@ -288,7 +288,7 @@ func init() { // RunSyncdb run syncdb command line. // name: Table's alias name (default is "default") // force: Run the next sql command even if the current gave an error -// verbose: Print all information, useful for debugging +// verbose: Print All information, useful for debugging func RunSyncdb(name string, force bool, verbose bool) error { BootStrap() diff --git a/client/orm/cmd_utils.go b/client/orm/cmd_utils.go index b327dd6f..3a26c61d 100644 --- a/client/orm/cmd_utils.go +++ b/client/orm/cmd_utils.go @@ -27,7 +27,7 @@ type dbIndex struct { SQL string } -// get database column type string. +// Get database column type string. func getColumnTyp(al *alias, fi *models.FieldInfo) (col string) { T := al.DbBaser.DbTypes() fieldType := fi.FieldType diff --git a/client/orm/db.go b/client/orm/db.go index 87e4a96c..8d59fd01 100644 --- a/client/orm/db.go +++ b/client/orm/db.go @@ -74,7 +74,7 @@ type dbBase struct { // check dbBase implements dbBaser interface. var _ dbBaser = new(dbBase) -// get struct Columns values as interface slice. +// Get struct Columns values as interface slice. func (d *dbBase) collectValues(mi *models.ModelInfo, ind reflect.Value, cols []string, skipAuto bool, insert bool, names *[]string, tz *time.Location) (values []interface{}, autoFields []string, err error) { if names == nil { ns := make([]string, 0, len(cols)) @@ -117,7 +117,7 @@ func (d *dbBase) collectValues(mi *models.ModelInfo, ind reflect.Value, cols []s return } -// get one field value in struct column as interface. +// Get one field value in struct column as interface. func (d *dbBase) collectFieldValue(mi *models.ModelInfo, fi *models.FieldInfo, ind reflect.Value, insert bool, tz *time.Location) (interface{}, error) { var value interface{} if fi.Pk { @@ -685,7 +685,7 @@ func (d *dbBase) Update(ctx context.Context, q dbQuerier, mi *models.ModelInfo, var setNames []string - // if specify cols length is zero, then commit all Columns. + // if specify cols length is zero, then commit All Columns. if len(cols) == 0 { cols = mi.Fields.DBcols setNames = make([]string, 0, len(mi.Fields.DBcols)-1) @@ -1180,7 +1180,7 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m slice := ind if unregister { - mi, _ = defaultModelCache.get(name) + mi, _ = defaultModelCache.Get(name) tCols = mi.Fields.DBcols colsNum = len(tCols) } @@ -1281,7 +1281,7 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m ind.Set(slice) } else { // when a result is empty and container is nil - // to set an empty container + // to Set an empty container if ind.IsNil() { ind.Set(reflect.MakeSlice(ind.Type(), 0, 0)) } @@ -1457,7 +1457,7 @@ func (d *dbBase) GenerateOperatorLeftCol(*models.FieldInfo, string, *string) { // default not use } -// set values to struct column. +// Set values to struct column. func (d *dbBase) setColsValues(mi *models.ModelInfo, ind *reflect.Value, cols []string, values []interface{}, tz *time.Location) { for i, column := range cols { val := reflect.Indirect(reflect.ValueOf(values[i])).Interface() @@ -1643,7 +1643,7 @@ end: return value, nil } -// set one value to struct column field. +// Set one value to struct column field. func (d *dbBase) setFieldValue(fi *models.FieldInfo, value interface{}, field reflect.Value) (interface{}, error) { fieldType := fi.FieldType isNative := !fi.IsFielder @@ -1826,7 +1826,7 @@ setValue: fd := field.Addr().Interface().(models.Fielder) err := fd.SetRaw(value) if err != nil { - err = fmt.Errorf("converted value `%v` set to Fielder `%s` failed, err: %s", value, fi.FullName, err) + err = fmt.Errorf("converted value `%v` Set to Fielder `%s` failed, err: %s", value, fi.FullName, err) return nil, err } } @@ -2050,12 +2050,12 @@ func (d *dbBase) TimeToDB(t *time.Time, tz *time.Location) { *t = t.In(tz) } -// DbTypes get database types. +// DbTypes Get database types. func (d *dbBase) DbTypes() map[string]string { return nil } -// GetTables gt all tables. +// GetTables gt All tables. func (d *dbBase) GetTables(db dbQuerier) (map[string]bool, error) { tables := make(map[string]bool) query := d.ins.ShowTablesQuery() @@ -2080,7 +2080,7 @@ func (d *dbBase) GetTables(db dbQuerier) (map[string]bool, error) { return tables, rows.Err() } -// GetColumns get all cloumns in table. +// GetColumns Get All cloumns in table. func (d *dbBase) GetColumns(ctx context.Context, db dbQuerier, table string) (map[string][3]string, error) { columns := make(map[string][3]string) query := d.ins.ShowColumnsQuery(table) diff --git a/client/orm/db_alias.go b/client/orm/db_alias.go index ff0b962f..d7874166 100644 --- a/client/orm/db_alias.go +++ b/client/orm/db_alias.go @@ -42,13 +42,13 @@ const ( // database driver string. type driver string -// get type constant int of current driver.. +// Get type constant int of current driver.. func (d driver) Type() DriverType { a, _ := dataBaseCache.get(string(d)) return a.Driver } -// get name of current driver +// Get name of current driver func (d driver) Name() string { return string(d) } @@ -326,7 +326,7 @@ func detectTZ(al *alias) { } } - // get default engine from current database + // Get default engine from current database row = al.DB.QueryRow("SELECT ENGINE, TRANSACTIONS FROM information_schema.engines WHERE SUPPORT = 'DEFAULT'") var engine string var tx bool @@ -410,7 +410,7 @@ func newAliasWithDb(aliasName, driverName string, db *sql.DB, params ...DBOption err := db.Ping() if err != nil { - return nil, fmt.Errorf("register db Ping `%s`, %s", aliasName, err.Error()) + return nil, fmt.Errorf("Register db Ping `%s`, %s", aliasName, err.Error()) } detectTZ(al) @@ -465,7 +465,7 @@ func RegisterDataBase(aliasName, driverName, dataSource string, params ...DBOpti db, err = sql.Open(driverName, dataSource) if err != nil { - err = fmt.Errorf("register db `%s`, %s", aliasName, err.Error()) + err = fmt.Errorf("Register db `%s`, %s", aliasName, err.Error()) goto end } @@ -510,7 +510,7 @@ func SetDataBaseTZ(aliasName string, tz *time.Location) error { } // GetDB Get *sql.DB from registered database by db alias name. -// Use "default" as alias name if you not set. +// Use "default" as alias name if you not Set. func GetDB(aliasNames ...string) (*sql.DB, error) { var name string if len(aliasNames) > 0 { diff --git a/client/orm/db_mysql.go b/client/orm/db_mysql.go index 889d807f..e253f92a 100644 --- a/client/orm/db_mysql.go +++ b/client/orm/db_mysql.go @@ -76,12 +76,12 @@ type dbBaseMysql struct { var _ dbBaser = new(dbBaseMysql) -// OperatorSQL get mysql operator. +// OperatorSQL Get mysql operator. func (d *dbBaseMysql) OperatorSQL(operator string) string { return mysqlOperators[operator] } -// DbTypes get mysql table field types. +// DbTypes Get mysql table field types. func (d *dbBaseMysql) DbTypes() map[string]string { return mysqlTypes } diff --git a/client/orm/db_oracle.go b/client/orm/db_oracle.go index 5057f358..247959df 100644 --- a/client/orm/db_oracle.go +++ b/client/orm/db_oracle.go @@ -72,17 +72,17 @@ func newdbBaseOracle() dbBaser { return b } -// OperatorSQL get oracle operator. +// OperatorSQL Get oracle operator. func (d *dbBaseOracle) OperatorSQL(operator string) string { return oracleOperators[operator] } -// DbTypes get oracle table field types. +// DbTypes Get oracle table field types. func (d *dbBaseOracle) DbTypes() map[string]string { return oracleTypes } -// ShowTablesQuery show all the tables in database +// ShowTablesQuery show All the tables in database func (d *dbBaseOracle) ShowTablesQuery() string { return "SELECT TABLE_NAME FROM USER_TABLES" } diff --git a/client/orm/db_postgres.go b/client/orm/db_postgres.go index b52b2578..9a7383b8 100644 --- a/client/orm/db_postgres.go +++ b/client/orm/db_postgres.go @@ -74,7 +74,7 @@ type dbBasePostgres struct { var _ dbBaser = new(dbBasePostgres) -// get postgresql operator. +// Get postgresql operator. func (d *dbBasePostgres) OperatorSQL(operator string) string { return postgresOperators[operator] } @@ -173,7 +173,7 @@ func (d *dbBasePostgres) ShowColumnsQuery(table string) string { return fmt.Sprintf("SELECT column_name, data_type, is_nullable FROM information_schema.Columns where table_schema NOT IN ('pg_catalog', 'information_schema') and table_name = '%s'", table) } -// get column types of postgresql. +// Get column types of postgresql. func (d *dbBasePostgres) DbTypes() map[string]string { return postgresTypes } diff --git a/client/orm/db_sqlite.go b/client/orm/db_sqlite.go index 8041f7be..0e84d4df 100644 --- a/client/orm/db_sqlite.go +++ b/client/orm/db_sqlite.go @@ -85,7 +85,7 @@ func (d *dbBaseSqlite) Read(ctx context.Context, q dbQuerier, mi *models.ModelIn return d.dbBase.Read(ctx, q, mi, ind, tz, cols, false) } -// get sqlite operator. +// Get sqlite operator. func (d *dbBaseSqlite) OperatorSQL(operator string) string { return sqliteOperators[operator] } @@ -108,17 +108,17 @@ func (d *dbBaseSqlite) MaxLimit() uint64 { return 9223372036854775807 } -// get column types in sqlite. +// Get column types in sqlite. func (d *dbBaseSqlite) DbTypes() map[string]string { return sqliteTypes } -// get show tables sql in sqlite. +// Get show tables sql in sqlite. func (d *dbBaseSqlite) ShowTablesQuery() string { return "SELECT name FROM sqlite_master WHERE type = 'table'" } -// get Columns in sqlite. +// Get Columns in sqlite. func (d *dbBaseSqlite) GetColumns(ctx context.Context, db dbQuerier, table string) (map[string][3]string, error) { query := d.ins.ShowColumnsQuery(table) rows, err := db.QueryContext(ctx, query) @@ -139,7 +139,7 @@ func (d *dbBaseSqlite) GetColumns(ctx context.Context, db dbQuerier, table strin return columns, rows.Err() } -// get show Columns sql in sqlite. +// Get show Columns sql in sqlite. func (d *dbBaseSqlite) ShowColumnsQuery(table string) string { return fmt.Sprintf("pragma table_info('%s')", table) } diff --git a/client/orm/db_test.go b/client/orm/db_test.go index f7553b08..32e90d17 100644 --- a/client/orm/db_test.go +++ b/client/orm/db_test.go @@ -18,11 +18,10 @@ import ( "errors" "github.com/beego/beego/v2/client/orm/clauses/order_clause" "github.com/beego/beego/v2/client/orm/internal/buffers" + "github.com/stretchr/testify/assert" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/beego/beego/v2/client/orm/internal/models" ) @@ -248,7 +247,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues []interface{} }{ { - name: "set add/mul operator by dbBase", + name: "Set add/mul operator by dbBase", db: &dbBase{ ins: &dbBase{}, }, @@ -270,7 +269,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set min/except operator by dbBase", + name: "Set min/except operator by dbBase", db: &dbBase{ ins: &dbBase{}, }, @@ -292,7 +291,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set bitRShift/bitLShift operator by dbBase", + name: "Set bitRShift/bitLShift operator by dbBase", db: &dbBase{ ins: &dbBase{}, }, @@ -314,7 +313,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set bitAnd/bitOr/bitXOR operator by dbBase", + name: "Set bitAnd/bitOr/bitXOR operator by dbBase", db: &dbBase{ ins: &dbBase{}, }, @@ -339,7 +338,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{int64(28), int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set add/mul operator by dbBasePostgres", + name: "Set add/mul operator by dbBasePostgres", db: &dbBase{ ins: newdbBasePostgres(), }, @@ -361,7 +360,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set min/except operator by dbBasePostgres", + name: "Set min/except operator by dbBasePostgres", db: &dbBase{ ins: newdbBasePostgres(), }, @@ -383,7 +382,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set bitRShift/bitLShift operator by dbBasePostgres", + name: "Set bitRShift/bitLShift operator by dbBasePostgres", db: &dbBase{ ins: newdbBasePostgres(), }, @@ -405,7 +404,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set bitAnd/bitOr/bitXOR operator by dbBasePostgres", + name: "Set bitAnd/bitOr/bitXOR operator by dbBasePostgres", db: &dbBase{ ins: newdbBasePostgres(), }, @@ -430,7 +429,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{int64(28), int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set add/mul operator by dbBaseSqlite", + name: "Set add/mul operator by dbBaseSqlite", db: &dbBase{ ins: newdbBaseSqlite(), }, @@ -452,7 +451,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set min/except operator by dbBaseSqlite", + name: "Set min/except operator by dbBaseSqlite", db: &dbBase{ ins: newdbBaseSqlite(), }, @@ -474,7 +473,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set bitRShift/bitLShift operator by dbBaseSqlite", + name: "Set bitRShift/bitLShift operator by dbBaseSqlite", db: &dbBase{ ins: newdbBaseSqlite(), }, @@ -496,7 +495,7 @@ func TestDbBase_buildSetSQL(t *testing.T) { wantValues: []interface{}{"test_name", int64(12), int64(2), "test_origin_name", 18}, }, { - name: "set bitAnd/bitOr/bitXOR operator by dbBaseSqlite", + name: "Set bitAnd/bitOr/bitXOR operator by dbBaseSqlite", db: &dbBase{ ins: newdbBaseSqlite(), }, @@ -893,18 +892,15 @@ func TestDbBase_readBatchSQL(t *testing.T) { tCols := []string{"name", "score"} - mc := &modelCache{ - cache: make(map[string]*models.ModelInfo), - cacheByFullName: make(map[string]*models.ModelInfo), - } + mc := models.NewModelCacheHandler() - err := mc.register("", false, new(testTab), new(testTab1), new(testTab2)) + err := mc.Register("", false, new(testTab), new(testTab1), new(testTab2)) assert.Nil(t, err) - mc.bootstrap() + mc.Bootstrap() - mi, ok := mc.getByMd(new(testTab)) + mi, ok := mc.GetByMd(new(testTab)) assert.True(t, ok) diff --git a/client/orm/db_tidb.go b/client/orm/db_tidb.go index 8d91b091..863cf05a 100644 --- a/client/orm/db_tidb.go +++ b/client/orm/db_tidb.go @@ -26,12 +26,12 @@ type dbBaseTidb struct { var _ dbBaser = new(dbBaseTidb) -// get mysql operator. +// Get mysql operator. func (d *dbBaseTidb) OperatorSQL(operator string) string { return mysqlOperators[operator] } -// get mysql table field types. +// Get mysql table field types. func (d *dbBaseTidb) DbTypes() map[string]string { return mysqlTypes } diff --git a/client/orm/db_utils.go b/client/orm/db_utils.go index 45c95f85..b10ccd04 100644 --- a/client/orm/db_utils.go +++ b/client/orm/db_utils.go @@ -24,7 +24,7 @@ import ( "github.com/beego/beego/v2/client/orm/internal/models" ) -// get table alias. +// Get table alias. func getDbAlias(name string) *alias { if al, ok := dataBaseCache.get(name); ok { return al @@ -32,7 +32,7 @@ func getDbAlias(name string) *alias { panic(fmt.Errorf("unknown DataBase alias name %s", name)) } -// get pk column info. +// Get pk column info. func getExistPk(mi *models.ModelInfo, ind reflect.Value) (column string, value interface{}, exist bool) { fi := mi.Fields.Pk @@ -57,7 +57,7 @@ func getExistPk(mi *models.ModelInfo, ind reflect.Value) (column string, value i return } -// get Fields description as flatted string. +// Get Fields description as flatted string. func getFlatParams(fi *models.FieldInfo, args []interface{}, tz *time.Location) (params []interface{}) { outFor: for _, arg := range args { @@ -160,7 +160,7 @@ outFor: typ := val.Type() name := models.GetFullName(typ) var value interface{} - if mmi, ok := defaultModelCache.getByFullName(name); ok { + if mmi, ok := defaultModelCache.GetByFullName(name); ok { if _, vu, exist := getExistPk(mmi, val); exist { value = vu } diff --git a/client/orm/ddl.go b/client/orm/ddl.go new file mode 100644 index 00000000..f5d3c3a6 --- /dev/null +++ b/client/orm/ddl.go @@ -0,0 +1,195 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "errors" + "fmt" + "strings" + + imodels "github.com/beego/beego/v2/client/orm/internal/models" +) + +// getDbDropSQL Get database scheme drop sql queries +func getDbDropSQL(mc *imodels.ModelCache, al *alias) (queries []string, err error) { + if mc.Empty() { + err = errors.New("no Model found, need Register your model") + return + } + + Q := al.DbBaser.TableQuote() + + for _, mi := range mc.AllOrdered() { + queries = append(queries, fmt.Sprintf(`DROP TABLE IF EXISTS %s%s%s`, Q, mi.Table, Q)) + } + return queries, nil +} + +// getDbCreateSQL Get database scheme creation sql queries +func getDbCreateSQL(mc *imodels.ModelCache, al *alias) (queries []string, tableIndexes map[string][]dbIndex, err error) { + if mc.Empty() { + err = errors.New("no Model found, need Register your model") + return + } + + Q := al.DbBaser.TableQuote() + T := al.DbBaser.DbTypes() + sep := fmt.Sprintf("%s, %s", Q, Q) + + tableIndexes = make(map[string][]dbIndex) + + for _, mi := range mc.AllOrdered() { + sql := fmt.Sprintf("-- %s\n", strings.Repeat("-", 50)) + sql += fmt.Sprintf("-- Table Structure for `%s`\n", mi.FullName) + sql += fmt.Sprintf("-- %s\n", strings.Repeat("-", 50)) + + sql += fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s%s%s (\n", Q, mi.Table, Q) + + columns := make([]string, 0, len(mi.Fields.FieldsDB)) + + sqlIndexes := [][]string{} + var commentIndexes []int // store comment indexes for postgres + + for i, fi := range mi.Fields.FieldsDB { + column := fmt.Sprintf(" %s%s%s ", Q, fi.Column, Q) + col := getColumnTyp(al, fi) + + if fi.Auto { + switch al.Driver { + case DRSqlite, DRPostgres: + column += T["auto"] + default: + column += col + " " + T["auto"] + } + } else if fi.Pk { + column += col + " " + T["pk"] + } else { + column += col + + if !fi.Null { + column += " " + "NOT NULL" + } + + // if fi.initial.String() != "" { + // column += " DEFAULT " + fi.initial.String() + // } + + // Append attribute DEFAULT + column += getColumnDefault(fi) + + if fi.Unique { + column += " " + "UNIQUE" + } + + if fi.Index { + sqlIndexes = append(sqlIndexes, []string{fi.Column}) + } + } + + if strings.Contains(column, "%COL%") { + column = strings.Replace(column, "%COL%", fi.Column, -1) + } + + if fi.Description != "" && al.Driver != DRSqlite { + if al.Driver == DRPostgres { + commentIndexes = append(commentIndexes, i) + } else { + column += " " + fmt.Sprintf("COMMENT '%s'", fi.Description) + } + } + + columns = append(columns, column) + } + + if mi.Model != nil { + allnames := imodels.GetTableUnique(mi.AddrField) + if !mi.Manual && len(mi.Uniques) > 0 { + allnames = append(allnames, mi.Uniques) + } + for _, names := range allnames { + cols := make([]string, 0, len(names)) + for _, name := range names { + if fi, ok := mi.Fields.GetByAny(name); ok && fi.DBcol { + cols = append(cols, fi.Column) + } else { + panic(fmt.Errorf("cannot found column `%s` when parse UNIQUE in `%s.TableUnique`", name, mi.FullName)) + } + } + column := fmt.Sprintf(" UNIQUE (%s%s%s)", Q, strings.Join(cols, sep), Q) + columns = append(columns, column) + } + } + + sql += strings.Join(columns, ",\n") + sql += "\n)" + + if al.Driver == DRMySQL { + var engine string + if mi.Model != nil { + engine = imodels.GetTableEngine(mi.AddrField) + } + if engine == "" { + engine = al.Engine + } + sql += " ENGINE=" + engine + } + + sql += ";" + if al.Driver == DRPostgres && len(commentIndexes) > 0 { + // append comments for postgres only + for _, index := range commentIndexes { + sql += fmt.Sprintf("\nCOMMENT ON COLUMN %s%s%s.%s%s%s is '%s';", + Q, + mi.Table, + Q, + Q, + mi.Fields.FieldsDB[index].Column, + Q, + mi.Fields.FieldsDB[index].Description) + } + } + queries = append(queries, sql) + + if mi.Model != nil { + for _, names := range imodels.GetTableIndex(mi.AddrField) { + cols := make([]string, 0, len(names)) + for _, name := range names { + if fi, ok := mi.Fields.GetByAny(name); ok && fi.DBcol { + cols = append(cols, fi.Column) + } else { + panic(fmt.Errorf("cannot found column `%s` when parse INDEX in `%s.TableIndex`", name, mi.FullName)) + } + } + sqlIndexes = append(sqlIndexes, cols) + } + } + + for _, names := range sqlIndexes { + name := mi.Table + "_" + strings.Join(names, "_") + cols := strings.Join(names, sep) + sql := fmt.Sprintf("CREATE INDEX %s%s%s ON %s%s%s (%s%s%s);", Q, name, Q, Q, mi.Table, Q, Q, cols, Q) + + index := dbIndex{} + index.Table = mi.Table + index.Name = name + index.SQL = sql + + tableIndexes[mi.Table] = append(tableIndexes[mi.Table], index) + } + + } + + return +} diff --git a/client/orm/model_test.go b/client/orm/ddl_test.go similarity index 96% rename from client/orm/model_test.go rename to client/orm/ddl_test.go index 8aee8d89..265ca2ee 100644 --- a/client/orm/model_test.go +++ b/client/orm/ddl_test.go @@ -17,6 +17,8 @@ package orm import ( "testing" + "github.com/beego/beego/v2/client/orm/internal/models" + "github.com/stretchr/testify/assert" ) @@ -49,7 +51,7 @@ func TestGetDbCreateSQLWithComment(t *testing.T) { wantErr error } al := getDbAlias("default") - testModelCache := NewModelCacheHandler() + testModelCache := models.NewModelCacheHandler() var testCases []TestCase switch al.Driver { case DRMySQL: @@ -67,10 +69,10 @@ func TestGetDbCreateSQLWithComment(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - testModelCache.clean() - err := testModelCache.register("", true, tc.model) + testModelCache.Clean() + err := testModelCache.Register("", true, tc.model) assert.NoError(t, err) - queries, _, err := testModelCache.getDbCreateSQL(al) + queries, _, err := getDbCreateSQL(testModelCache, al) assert.Equal(t, tc.wantSQL, queries[0]) assert.Equal(t, tc.wantErr, err) }) diff --git a/client/orm/filter_orm_decorator.go b/client/orm/filter_orm_decorator.go index c8a28967..58d6e2c0 100644 --- a/client/orm/filter_orm_decorator.go +++ b/client/orm/filter_orm_decorator.go @@ -82,7 +82,7 @@ func (f *filterOrmDecorator) Read(md interface{}, cols ...string) error { } func (f *filterOrmDecorator) ReadWithCtx(ctx context.Context, md interface{}, cols ...string) error { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "ReadWithCtx", Args: []interface{}{md, cols}, @@ -104,7 +104,7 @@ func (f *filterOrmDecorator) ReadForUpdate(md interface{}, cols ...string) error } func (f *filterOrmDecorator) ReadForUpdateWithCtx(ctx context.Context, md interface{}, cols ...string) error { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "ReadForUpdateWithCtx", Args: []interface{}{md, cols}, @@ -126,7 +126,7 @@ func (f *filterOrmDecorator) ReadOrCreate(md interface{}, col1 string, cols ...s } func (f *filterOrmDecorator) ReadOrCreateWithCtx(ctx context.Context, md interface{}, col1 string, cols ...string) (bool, int64, error) { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "ReadOrCreateWithCtx", Args: []interface{}{md, col1, cols}, @@ -148,7 +148,7 @@ func (f *filterOrmDecorator) LoadRelated(md interface{}, name string, args ...ut } func (f *filterOrmDecorator) LoadRelatedWithCtx(ctx context.Context, md interface{}, name string, args ...utils.KV) (int64, error) { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "LoadRelatedWithCtx", Args: []interface{}{md, name, args}, @@ -166,7 +166,7 @@ func (f *filterOrmDecorator) LoadRelatedWithCtx(ctx context.Context, md interfac } func (f *filterOrmDecorator) QueryM2M(md interface{}, name string) QueryM2Mer { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "QueryM2M", Args: []interface{}{md, name}, @@ -206,7 +206,7 @@ func (f *filterOrmDecorator) QueryTable(ptrStructOrTableName interface{}) QueryS md = ptrStructOrTableName } - if m, ok := defaultModelCache.getByFullName(name); ok { + if m, ok := defaultModelCache.GetByFullName(name); ok { mi = m } @@ -260,7 +260,7 @@ func (f *filterOrmDecorator) Insert(md interface{}) (int64, error) { } func (f *filterOrmDecorator) InsertWithCtx(ctx context.Context, md interface{}) (int64, error) { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "InsertWithCtx", Args: []interface{}{md}, @@ -282,7 +282,7 @@ func (f *filterOrmDecorator) InsertOrUpdate(md interface{}, colConflitAndArgs .. } func (f *filterOrmDecorator) InsertOrUpdateWithCtx(ctx context.Context, md interface{}, colConflitAndArgs ...string) (int64, error) { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "InsertOrUpdateWithCtx", Args: []interface{}{md, colConflitAndArgs}, @@ -315,7 +315,7 @@ func (f *filterOrmDecorator) InsertMultiWithCtx(ctx context.Context, bulk int, m if (sind.Kind() == reflect.Array || sind.Kind() == reflect.Slice) && sind.Len() > 0 { ind := reflect.Indirect(sind.Index(0)) md = ind.Interface() - mi, _ = defaultModelCache.getByMd(md) + mi, _ = defaultModelCache.GetByMd(md) } inv := &Invocation{ @@ -339,7 +339,7 @@ func (f *filterOrmDecorator) Update(md interface{}, cols ...string) (int64, erro } func (f *filterOrmDecorator) UpdateWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "UpdateWithCtx", Args: []interface{}{md, cols}, @@ -361,7 +361,7 @@ func (f *filterOrmDecorator) Delete(md interface{}, cols ...string) (int64, erro } func (f *filterOrmDecorator) DeleteWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) { - mi, _ := defaultModelCache.getByMd(md) + mi, _ := defaultModelCache.GetByMd(md) inv := &Invocation{ Method: "DeleteWithCtx", Args: []interface{}{md, cols}, diff --git a/client/orm/models.go b/client/orm/internal/models/models.go similarity index 51% rename from client/orm/models.go rename to client/orm/internal/models/models.go index 542ced59..e105a494 100644 --- a/client/orm/models.go +++ b/client/orm/internal/models/models.go @@ -1,4 +1,4 @@ -// Copyright 2014 beego Author. All Rights Reserved. +// Copyright 2023 beego. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,78 +12,76 @@ // See the License for the specific language governing permissions and // limitations under the License. -package orm +package models import ( - "errors" "fmt" "reflect" "runtime/debug" "strings" "sync" - - imodels "github.com/beego/beego/v2/client/orm/internal/models" ) -var defaultModelCache = NewModelCacheHandler() - -// model info collection -type modelCache struct { +// ModelCache info collection +type ModelCache struct { sync.RWMutex // only used outsite for bootStrap orders []string - cache map[string]*imodels.ModelInfo - cacheByFullName map[string]*imodels.ModelInfo + cache map[string]*ModelInfo + cacheByFullName map[string]*ModelInfo done bool } -// NewModelCacheHandler generator of modelCache -func NewModelCacheHandler() *modelCache { - return &modelCache{ - cache: make(map[string]*imodels.ModelInfo), - cacheByFullName: make(map[string]*imodels.ModelInfo), +// NewModelCacheHandler generator of ModelCache +func NewModelCacheHandler() *ModelCache { + return &ModelCache{ + cache: make(map[string]*ModelInfo), + cacheByFullName: make(map[string]*ModelInfo), } } -// get all model info -func (mc *modelCache) all() map[string]*imodels.ModelInfo { - m := make(map[string]*imodels.ModelInfo, len(mc.cache)) +// All return all model info +func (mc *ModelCache) All() map[string]*ModelInfo { + m := make(map[string]*ModelInfo, len(mc.cache)) for k, v := range mc.cache { m[k] = v } return m } -// get ordered model info -func (mc *modelCache) allOrdered() []*imodels.ModelInfo { - m := make([]*imodels.ModelInfo, 0, len(mc.orders)) +func (mc *ModelCache) Empty() bool { + return len(mc.cache) == 0 +} + +func (mc *ModelCache) AllOrdered() []*ModelInfo { + m := make([]*ModelInfo, 0, len(mc.orders)) for _, table := range mc.orders { m = append(m, mc.cache[table]) } return m } -// get model info by table name -func (mc *modelCache) get(table string) (mi *imodels.ModelInfo, ok bool) { +// Get model info by table name +func (mc *ModelCache) Get(table string) (mi *ModelInfo, ok bool) { mi, ok = mc.cache[table] return } -// get model info by full name -func (mc *modelCache) getByFullName(name string) (mi *imodels.ModelInfo, ok bool) { +// GetByFullName model info by full name +func (mc *ModelCache) GetByFullName(name string) (mi *ModelInfo, ok bool) { mi, ok = mc.cacheByFullName[name] return } -func (mc *modelCache) getByMd(md interface{}) (*imodels.ModelInfo, bool) { +func (mc *ModelCache) GetByMd(md interface{}) (*ModelInfo, bool) { val := reflect.ValueOf(md) ind := reflect.Indirect(val) typ := ind.Type() - name := imodels.GetFullName(typ) - return mc.getByFullName(name) + name := GetFullName(typ) + return mc.GetByFullName(name) } -// set model info to collection -func (mc *modelCache) set(table string, mi *imodels.ModelInfo) *imodels.ModelInfo { +// Set model info to collection +func (mc *ModelCache) Set(table string, mi *ModelInfo) *ModelInfo { mii := mc.cache[table] mc.cache[table] = mi mc.cacheByFullName[mi.FullName] = mi @@ -93,19 +91,19 @@ func (mc *modelCache) set(table string, mi *imodels.ModelInfo) *imodels.ModelInf return mii } -// clean all model info. -func (mc *modelCache) clean() { +// Clean All model info. +func (mc *ModelCache) Clean() { mc.Lock() defer mc.Unlock() mc.orders = make([]string, 0) - mc.cache = make(map[string]*imodels.ModelInfo) - mc.cacheByFullName = make(map[string]*imodels.ModelInfo) + mc.cache = make(map[string]*ModelInfo) + mc.cacheByFullName = make(map[string]*ModelInfo) mc.done = false } -// bootstrap bootstrap for models -func (mc *modelCache) bootstrap() { +// Bootstrap Bootstrap for models +func (mc *ModelCache) Bootstrap() { mc.Lock() defer mc.Unlock() if mc.done { @@ -113,16 +111,11 @@ func (mc *modelCache) bootstrap() { } var ( err error - models map[string]*imodels.ModelInfo + models map[string]*ModelInfo ) - if dataBaseCache.getDefault() == nil { - err = fmt.Errorf("must have one register DataBase alias named `default`") - goto end - } - - // set rel and reverse model - // RelManyToMany set the relTable - models = mc.all() + // Set rel and reverse model + // RelManyToMany Set the relTable + models = mc.All() for _, mi := range models { for _, fi := range mi.Fields.Columns { if fi.Rel || fi.Reverse { @@ -130,11 +123,11 @@ func (mc *modelCache) bootstrap() { if fi.FieldType == RelReverseMany || fi.FieldType == RelManyToMany { elm = elm.Elem() } - // check the rel or reverse model already register - name := imodels.GetFullName(elm) - mii, ok := mc.getByFullName(name) + // check the rel or reverse model already Register + name := GetFullName(elm) + mii, ok := mc.GetByFullName(name) if !ok || mii.Pkg != elm.PkgPath() { - err = fmt.Errorf("can not find rel in field `%s`, `%s` may be miss register", fi.FullName, elm.String()) + err = fmt.Errorf("can not find rel in field `%s`, `%s` may be miss Register", fi.FullName, elm.String()) goto end } fi.RelModelInfo = mii @@ -144,7 +137,7 @@ func (mc *modelCache) bootstrap() { if fi.RelThrough != "" { if i := strings.LastIndex(fi.RelThrough, "."); i != -1 && len(fi.RelThrough) > (i+1) { pn := fi.RelThrough[:i] - rmi, ok := mc.getByFullName(fi.RelThrough) + rmi, ok := mc.GetByFullName(fi.RelThrough) if !ok || pn != rmi.Pkg { err = fmt.Errorf("field `%s` wrong rel_through value `%s` cannot find table", fi.FullName, fi.RelThrough) goto end @@ -156,11 +149,11 @@ func (mc *modelCache) bootstrap() { goto end } } else { - i := imodels.NewM2MModelInfo(mi, mii) + i := NewM2MModelInfo(mi, mii) if fi.RelTable != "" { i.Table = fi.RelTable } - if v := mc.set(i.Table, i); v != nil { + if v := mc.Set(i.Table, i); v != nil { err = fmt.Errorf("the rel table name `%s` already registered, cannot be use, please change one", fi.RelTable) goto end } @@ -176,7 +169,7 @@ func (mc *modelCache) bootstrap() { // check the rel filed while the relModelInfo also has filed point to current model // if not exist, add a new field to the relModelInfo - models = mc.all() + models = mc.All() for _, mi := range models { for _, fi := range mi.Fields.FieldsRel { switch fi.FieldType { @@ -190,7 +183,7 @@ func (mc *modelCache) bootstrap() { } if !inModel { rmi := fi.RelModelInfo - ffi := new(imodels.FieldInfo) + ffi := new(FieldInfo) ffi.Name = mi.Name ffi.Column = ffi.Name ffi.FullName = rmi.FullName + "." + ffi.Name @@ -221,7 +214,7 @@ func (mc *modelCache) bootstrap() { } } - models = mc.all() + models = mc.All() for _, mi := range models { for _, fi := range mi.Fields.FieldsRel { switch fi.FieldType { @@ -247,7 +240,7 @@ func (mc *modelCache) bootstrap() { } } - models = mc.all() + models = mc.All() for _, mi := range models { for _, fi := range mi.Fields.FieldsReverse { switch fi.FieldType { @@ -320,14 +313,14 @@ end: mc.done = true } -// register register models to model cache -func (mc *modelCache) register(prefixOrSuffixStr string, prefixOrSuffix bool, models ...interface{}) (err error) { +// Register Register models to model cache +func (mc *ModelCache) Register(prefixOrSuffixStr string, prefixOrSuffix bool, models ...interface{}) (err error) { for _, model := range models { val := reflect.ValueOf(model) typ := reflect.Indirect(val).Type() if val.Kind() != reflect.Ptr { - err = fmt.Errorf(" cannot use non-ptr model struct `%s`", imodels.GetFullName(typ)) + err = fmt.Errorf(" cannot use non-ptr model struct `%s`", GetFullName(typ)) return } // For this case: @@ -340,7 +333,7 @@ func (mc *modelCache) register(prefixOrSuffixStr string, prefixOrSuffix bool, mo if val.Elem().Kind() == reflect.Slice { val = reflect.New(val.Elem().Type().Elem()) } - table := imodels.GetTableName(val) + table := GetTableName(val) if prefixOrSuffixStr != "" { if prefixOrSuffix { @@ -351,17 +344,17 @@ func (mc *modelCache) register(prefixOrSuffixStr string, prefixOrSuffix bool, mo } // models's fullname is pkgpath + struct name - name := imodels.GetFullName(typ) - if _, ok := mc.getByFullName(name); ok { - err = fmt.Errorf(" model `%s` repeat register, must be unique\n", name) + name := GetFullName(typ) + if _, ok := mc.GetByFullName(name); ok { + err = fmt.Errorf(" model `%s` repeat Register, must be unique\n", name) return } - if _, ok := mc.get(table); ok { + if _, ok := mc.Get(table); ok { return nil } - mi := imodels.NewModelInfo(val) + mi := NewModelInfo(val) if mi.Fields.Pk == nil { outFor: for _, fi := range mi.Fields.FieldsDB { @@ -382,185 +375,7 @@ func (mc *modelCache) register(prefixOrSuffixStr string, prefixOrSuffix bool, mo mi.Model = model mi.Manual = true - mc.set(table, mi) + mc.Set(table, mi) } return } - -// getDbDropSQL get database scheme drop sql queries -func (mc *modelCache) getDbDropSQL(al *alias) (queries []string, err error) { - if len(mc.cache) == 0 { - err = errors.New("no Model found, need register your model") - return - } - - Q := al.DbBaser.TableQuote() - - for _, mi := range mc.allOrdered() { - queries = append(queries, fmt.Sprintf(`DROP TABLE IF EXISTS %s%s%s`, Q, mi.Table, Q)) - } - return queries, nil -} - -// getDbCreateSQL get database scheme creation sql queries -func (mc *modelCache) getDbCreateSQL(al *alias) (queries []string, tableIndexes map[string][]dbIndex, err error) { - if len(mc.cache) == 0 { - err = errors.New("no Model found, need register your model") - return - } - - Q := al.DbBaser.TableQuote() - T := al.DbBaser.DbTypes() - sep := fmt.Sprintf("%s, %s", Q, Q) - - tableIndexes = make(map[string][]dbIndex) - - for _, mi := range mc.allOrdered() { - sql := fmt.Sprintf("-- %s\n", strings.Repeat("-", 50)) - sql += fmt.Sprintf("-- Table Structure for `%s`\n", mi.FullName) - sql += fmt.Sprintf("-- %s\n", strings.Repeat("-", 50)) - - sql += fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s%s%s (\n", Q, mi.Table, Q) - - columns := make([]string, 0, len(mi.Fields.FieldsDB)) - - sqlIndexes := [][]string{} - var commentIndexes []int // store comment indexes for postgres - - for i, fi := range mi.Fields.FieldsDB { - column := fmt.Sprintf(" %s%s%s ", Q, fi.Column, Q) - col := getColumnTyp(al, fi) - - if fi.Auto { - switch al.Driver { - case DRSqlite, DRPostgres: - column += T["auto"] - default: - column += col + " " + T["auto"] - } - } else if fi.Pk { - column += col + " " + T["pk"] - } else { - column += col - - if !fi.Null { - column += " " + "NOT NULL" - } - - // if fi.initial.String() != "" { - // column += " DEFAULT " + fi.initial.String() - // } - - // Append attribute DEFAULT - column += getColumnDefault(fi) - - if fi.Unique { - column += " " + "UNIQUE" - } - - if fi.Index { - sqlIndexes = append(sqlIndexes, []string{fi.Column}) - } - } - - if strings.Contains(column, "%COL%") { - column = strings.Replace(column, "%COL%", fi.Column, -1) - } - - if fi.Description != "" && al.Driver != DRSqlite { - if al.Driver == DRPostgres { - commentIndexes = append(commentIndexes, i) - } else { - column += " " + fmt.Sprintf("COMMENT '%s'", fi.Description) - } - } - - columns = append(columns, column) - } - - if mi.Model != nil { - allnames := imodels.GetTableUnique(mi.AddrField) - if !mi.Manual && len(mi.Uniques) > 0 { - allnames = append(allnames, mi.Uniques) - } - for _, names := range allnames { - cols := make([]string, 0, len(names)) - for _, name := range names { - if fi, ok := mi.Fields.GetByAny(name); ok && fi.DBcol { - cols = append(cols, fi.Column) - } else { - panic(fmt.Errorf("cannot found column `%s` when parse UNIQUE in `%s.TableUnique`", name, mi.FullName)) - } - } - column := fmt.Sprintf(" UNIQUE (%s%s%s)", Q, strings.Join(cols, sep), Q) - columns = append(columns, column) - } - } - - sql += strings.Join(columns, ",\n") - sql += "\n)" - - if al.Driver == DRMySQL { - var engine string - if mi.Model != nil { - engine = imodels.GetTableEngine(mi.AddrField) - } - if engine == "" { - engine = al.Engine - } - sql += " ENGINE=" + engine - } - - sql += ";" - if al.Driver == DRPostgres && len(commentIndexes) > 0 { - // append comments for postgres only - for _, index := range commentIndexes { - sql += fmt.Sprintf("\nCOMMENT ON COLUMN %s%s%s.%s%s%s is '%s';", - Q, - mi.Table, - Q, - Q, - mi.Fields.FieldsDB[index].Column, - Q, - mi.Fields.FieldsDB[index].Description) - } - } - queries = append(queries, sql) - - if mi.Model != nil { - for _, names := range imodels.GetTableIndex(mi.AddrField) { - cols := make([]string, 0, len(names)) - for _, name := range names { - if fi, ok := mi.Fields.GetByAny(name); ok && fi.DBcol { - cols = append(cols, fi.Column) - } else { - panic(fmt.Errorf("cannot found column `%s` when parse INDEX in `%s.TableIndex`", name, mi.FullName)) - } - } - sqlIndexes = append(sqlIndexes, cols) - } - } - - for _, names := range sqlIndexes { - name := mi.Table + "_" + strings.Join(names, "_") - cols := strings.Join(names, sep) - sql := fmt.Sprintf("CREATE INDEX %s%s%s ON %s%s%s (%s%s%s);", Q, name, Q, Q, mi.Table, Q, Q, cols, Q) - - index := dbIndex{} - index.Table = mi.Table - index.Name = name - index.SQL = sql - - tableIndexes[mi.Table] = append(tableIndexes[mi.Table], index) - } - - } - - return -} - -// ResetModelCache Clean model cache. Then you can re-RegisterModel. -// Common use this api for test case. -func ResetModelCache() { - defaultModelCache.clean() -} diff --git a/client/orm/internal/models/models_test.go b/client/orm/internal/models/models_test.go new file mode 100644 index 00000000..9a9ccfe2 --- /dev/null +++ b/client/orm/internal/models/models_test.go @@ -0,0 +1,49 @@ +package models + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type Interface struct { + Id int + Name string + + Index1 string + Index2 string + + Unique1 string + Unique2 string +} + +func (i *Interface) TableIndex() [][]string { + return [][]string{{"index1"}, {"index2"}} +} + +func (i *Interface) TableUnique() [][]string { + return [][]string{{"unique1"}, {"unique2"}} +} + +func (i *Interface) TableName() string { + return "INTERFACE_" +} + +func (i *Interface) TableEngine() string { + return "innodb" +} + +func TestDbBase_GetTables(t *testing.T) { + c := NewModelCacheHandler() + c.Register("", true, &Interface{}) + mi, ok := c.Get("INTERFACE_") + assert.True(t, ok) + assert.NotNil(t, mi) + + engine := GetTableEngine(mi.AddrField) + assert.Equal(t, "innodb", engine) + uniques := GetTableUnique(mi.AddrField) + assert.Equal(t, [][]string{{"unique1"}, {"unique2"}}, uniques) + indexes := GetTableIndex(mi.AddrField) + assert.Equal(t, [][]string{{"index1"}, {"index2"}}, indexes) +} diff --git a/client/orm/invocation.go b/client/orm/invocation.go index 48fdbf6e..21cbdc42 100644 --- a/client/orm/invocation.go +++ b/client/orm/invocation.go @@ -26,7 +26,7 @@ type Invocation struct { Method string // Md may be nil in some cases. It depends on method Md interface{} - // the args are all arguments except context.Context + // the args are All arguments except context.Context Args []interface{} mi *models.ModelInfo diff --git a/client/orm/model_utils_test.go b/client/orm/model_utils_test.go index d3d57cdf..3e8b2042 100644 --- a/client/orm/model_utils_test.go +++ b/client/orm/model_utils_test.go @@ -13,52 +13,3 @@ // limitations under the License. package orm - -import ( - "testing" - - "github.com/beego/beego/v2/client/orm/internal/models" - - "github.com/stretchr/testify/assert" -) - -type Interface struct { - Id int - Name string - - Index1 string - Index2 string - - Unique1 string - Unique2 string -} - -func (i *Interface) TableIndex() [][]string { - return [][]string{{"index1"}, {"index2"}} -} - -func (i *Interface) TableUnique() [][]string { - return [][]string{{"unique1"}, {"unique2"}} -} - -func (i *Interface) TableName() string { - return "INTERFACE_" -} - -func (i *Interface) TableEngine() string { - return "innodb" -} - -func TestDbBase_GetTables(t *testing.T) { - RegisterModel(&Interface{}) - mi, ok := defaultModelCache.get("INTERFACE_") - assert.True(t, ok) - assert.NotNil(t, mi) - - engine := models.GetTableEngine(mi.AddrField) - assert.Equal(t, "innodb", engine) - uniques := models.GetTableUnique(mi.AddrField) - assert.Equal(t, [][]string{{"unique1"}, {"unique2"}}, uniques) - indexes := models.GetTableIndex(mi.AddrField) - assert.Equal(t, [][]string{{"index1"}, {"index2"}}, indexes) -} diff --git a/client/orm/models_boot.go b/client/orm/models_boot.go index 6916f3ba..7ecd999e 100644 --- a/client/orm/models_boot.go +++ b/client/orm/models_boot.go @@ -14,27 +14,47 @@ package orm -// RegisterModel register models +import ( + "fmt" + "runtime/debug" + + imodels "github.com/beego/beego/v2/client/orm/internal/models" +) + +var defaultModelCache = imodels.NewModelCacheHandler() + +// RegisterModel Register models func RegisterModel(models ...interface{}) { RegisterModelWithPrefix("", models...) } -// RegisterModelWithPrefix register models with a prefix +// RegisterModelWithPrefix Register models with a prefix func RegisterModelWithPrefix(prefix string, models ...interface{}) { - if err := defaultModelCache.register(prefix, true, models...); err != nil { + if err := defaultModelCache.Register(prefix, true, models...); err != nil { panic(err) } } -// RegisterModelWithSuffix register models with a suffix +// RegisterModelWithSuffix Register models with a suffix func RegisterModelWithSuffix(suffix string, models ...interface{}) { - if err := defaultModelCache.register(suffix, false, models...); err != nil { + if err := defaultModelCache.Register(suffix, false, models...); err != nil { panic(err) } } -// BootStrap bootstrap models. -// make all model parsed and can not add more models +// BootStrap Bootstrap models. +// make All model parsed and can not add more models func BootStrap() { - defaultModelCache.bootstrap() + if dataBaseCache.getDefault() == nil { + fmt.Println("must have one Register DataBase alias named `default`") + debug.PrintStack() + return + } + defaultModelCache.Bootstrap() +} + +// ResetModelCache Clean model cache. Then you can re-RegisterModel. +// Common use this api for test case. +func ResetModelCache() { + defaultModelCache.Clean() } diff --git a/client/orm/models_fields.go b/client/orm/models_fields.go index 4f07ea18..1fda9f1e 100644 --- a/client/orm/models_fields.go +++ b/client/orm/models_fields.go @@ -74,11 +74,11 @@ var _ Fielder = new(CharField) // Has a few extra, optional attr tag: // // auto_now: -// Automatically set the field to now every time the object is saved. Useful for “last-modified” timestamps. +// Automatically Set the field to now every time the object is saved. Useful for “last-modified” timestamps. // Note that the current date is always used; it’s not just a default value that you can override. // // auto_now_add: -// Automatically set the field to now when the object is first created. Useful for creation of timestamps. +// Automatically Set the field to now when the object is first created. Useful for creation of timestamps. // Note that the current date is always used; it’s not just a default value that you can override. // // eg: `orm:"auto_now"` or `orm:"auto_now_add"` @@ -91,11 +91,11 @@ var _ Fielder = new(TimeField) // Has a few extra, optional attr tag: // // auto_now: -// Automatically set the field to now every time the object is saved. Useful for “last-modified” timestamps. +// Automatically Set the field to now every time the object is saved. Useful for “last-modified” timestamps. // Note that the current date is always used; it’s not just a default value that you can override. // // auto_now_add: -// Automatically set the field to now when the object is first created. Useful for creation of timestamps. +// Automatically Set the field to now when the object is first created. Useful for creation of timestamps. // Note that the current date is always used; it’s not just a default value that you can override. // // eg: `orm:"auto_now"` or `orm:"auto_now_add"` diff --git a/client/orm/models_test.go b/client/orm/models_test.go index 52bafd9e..1818fcdb 100644 --- a/client/orm/models_test.go +++ b/client/orm/models_test.go @@ -507,11 +507,11 @@ var helpinfo = `need driver and source! usage: - go get -u github.com/beego/beego/v2/client/orm - go get -u github.com/go-sql-driver/mysql - go get -u github.com/mattn/go-sqlite3 - go get -u github.com/lib/pq - go get -u github.com/pingcap/tidb + go Get -u github.com/beego/beego/v2/client/orm + go Get -u github.com/go-sql-driver/mysql + go Get -u github.com/mattn/go-sqlite3 + go Get -u github.com/lib/pq + go Get -u github.com/pingcap/tidb #### MySQL mysql -u root -e 'create database orm_test;' @@ -550,7 +550,7 @@ func init() { err := RegisterDataBase("default", DBARGS.Driver, DBARGS.Source, MaxIdleConnections(20)) if err != nil { - panic(fmt.Sprintf("can not register database: %v", err)) + panic(fmt.Sprintf("can not Register database: %v", err)) } alias := getDbAlias("default") diff --git a/client/orm/orm.go b/client/orm/orm.go index b7048409..2f91eab6 100644 --- a/client/orm/orm.go +++ b/client/orm/orm.go @@ -106,7 +106,7 @@ var ( _ DriverGetter = new(ormBase) ) -// get model info and model reflect value +// Get model info and model reflect value func (*ormBase) getMi(md interface{}) (mi *models.ModelInfo) { val := reflect.ValueOf(md) ind := reflect.Indirect(val) @@ -115,7 +115,7 @@ func (*ormBase) getMi(md interface{}) (mi *models.ModelInfo) { return } -// get need ptr model info and model reflect value +// Get need ptr model info and model reflect value func (*ormBase) getPtrMiInd(md interface{}) (mi *models.ModelInfo, ind reflect.Value) { val := reflect.ValueOf(md) ind = reflect.Indirect(val) @@ -129,13 +129,13 @@ func (*ormBase) getPtrMiInd(md interface{}) (mi *models.ModelInfo, ind reflect.V func getTypeMi(mdTyp reflect.Type) *models.ModelInfo { name := models.GetFullName(mdTyp) - if mi, ok := defaultModelCache.getByFullName(name); ok { + if mi, ok := defaultModelCache.GetByFullName(name); ok { return mi } panic(fmt.Errorf(" table: `%s` not found, make sure it was registered with `RegisterModel()`", name)) } -// get field info from model info by given field name +// Get field info from model info by given field name func (*ormBase) getFieldInfo(mi *models.ModelInfo, name string) *models.FieldInfo { fi, ok := mi.Fields.GetByAny(name) if !ok { @@ -208,7 +208,7 @@ func (o *ormBase) InsertWithCtx(ctx context.Context, md interface{}) (int64, err return id, nil } -// set auto pk field +// Set auto pk field func (*ormBase) setPk(mi *models.ModelInfo, ind reflect.Value, id int64) { if mi.Fields.Pk != nil && mi.Fields.Pk.Auto { if mi.Fields.Pk.FieldType&IsPositiveIntegerField > 0 { @@ -276,7 +276,7 @@ func (o *ormBase) InsertOrUpdateWithCtx(ctx context.Context, md interface{}, col } // update model to database. -// cols set the Columns those want to update. +// cols Set the Columns those want to update. func (o *ormBase) Update(md interface{}, cols ...string) (int64, error) { return o.UpdateWithCtx(context.Background(), md, cols...) } @@ -396,7 +396,7 @@ func (o *ormBase) LoadRelatedWithCtx(_ context.Context, md interface{}, name str return nums, err } -// get QuerySeter for related models to md model +// Get QuerySeter for related models to md model func (o *ormBase) queryRelated(md interface{}, name string) (*models.ModelInfo, *models.FieldInfo, reflect.Value, *querySet) { mi, ind := o.getPtrMiInd(md) fi := o.getFieldInfo(mi, name) @@ -428,7 +428,7 @@ func (o *ormBase) queryRelated(md interface{}, name string) (*models.ModelInfo, return mi, fi, ind, qs } -// get reverse relation QuerySeter +// Get reverse relation QuerySeter func (o *ormBase) getReverseQs(md interface{}, mi *models.ModelInfo, fi *models.FieldInfo) *querySet { switch fi.FieldType { case RelReverseOne, RelReverseMany: @@ -449,7 +449,7 @@ func (o *ormBase) getReverseQs(md interface{}, mi *models.ModelInfo, fi *models. return q } -// get relation QuerySeter +// Get relation QuerySeter func (o *ormBase) getRelQs(md interface{}, mi *models.ModelInfo, fi *models.FieldInfo) *querySet { switch fi.FieldType { case RelOneToOne, RelForeignKey, RelManyToMany: @@ -476,12 +476,12 @@ func (o *ormBase) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) { var name string if table, ok := ptrStructOrTableName.(string); ok { name = models.NameStrategyMap[models.DefaultNameStrategy](table) - if mi, ok := defaultModelCache.get(name); ok { + if mi, ok := defaultModelCache.Get(name); ok { qs = newQuerySet(o, mi) } } else { name = models.GetFullName(iutils.IndirectType(reflect.TypeOf(ptrStructOrTableName))) - if mi, ok := defaultModelCache.getByFullName(name); ok { + if mi, ok := defaultModelCache.GetByFullName(name); ok { qs = newQuerySet(o, mi) } } @@ -491,13 +491,13 @@ func (o *ormBase) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) { return qs } -// NOTE: this method is deprecated, context parameter will not take effect. +// Deprecated: QueryTableWithCtx is deprecated, context parameter will not take effect. func (o *ormBase) QueryTableWithCtx(_ context.Context, ptrStructOrTableName interface{}) (qs QuerySeter) { logs.Warn("QueryTableWithCtx is DEPRECATED. Use methods with `WithCtx` suffix on QuerySeter as replacement please.") return o.QueryTable(ptrStructOrTableName) } -// return a raw query seter for raw sql string. +// Raw return a raw query seter for raw sql string. func (o *ormBase) Raw(query string, args ...interface{}) RawSeter { return o.RawWithCtx(context.Background(), query, args...) } @@ -506,12 +506,12 @@ func (o *ormBase) RawWithCtx(_ context.Context, query string, args ...interface{ return newRawSet(o, query, args) } -// return current using database Driver +// Driver return current using database Driver func (o *ormBase) Driver() Driver { return driver(o.alias.Name) } -// return sql.DBStats for current database +// DBStats return sql.DBStats for current database func (o *ormBase) DBStats() *sql.DBStats { if o.alias != nil && o.alias.DB != nil { stats := o.alias.DB.DB.Stats() diff --git a/client/orm/orm_log.go b/client/orm/orm_log.go index 50ebc3a6..b1476b7b 100644 --- a/client/orm/orm_log.go +++ b/client/orm/orm_log.go @@ -28,7 +28,7 @@ import ( type Log = logs.Log -// NewLog set io.Writer to create a Logger. +// NewLog Set io.Writer to create a Logger. func NewLog(out io.Writer) *logs.Log { d := new(logs.Log) d.Logger = log.New(out, "[ORM]", log.LstdFlags) diff --git a/client/orm/orm_querym2m.go b/client/orm/orm_querym2m.go index 6dc66b3d..daae6a43 100644 --- a/client/orm/orm_querym2m.go +++ b/client/orm/orm_querym2m.go @@ -132,7 +132,7 @@ func (o *queryM2M) ExistWithCtx(ctx context.Context, md interface{}) bool { Filter(fi.ReverseFieldInfoTwo.Name, md).ExistWithCtx(ctx) } -// clean all models in related of origin model +// Clean All models in related of origin model func (o *queryM2M) Clear() (int64, error) { return o.ClearWithCtx(context.Background()) } @@ -142,7 +142,7 @@ func (o *queryM2M) ClearWithCtx(ctx context.Context) (int64, error) { return o.qs.Filter(fi.ReverseFieldInfo.Name, o.md).DeleteWithCtx(ctx) } -// count all related models of origin model +// count All related models of origin model func (o *queryM2M) Count() (int64, error) { return o.CountWithCtx(context.Background()) } diff --git a/client/orm/orm_queryset.go b/client/orm/orm_queryset.go index 8464741b..69fe01bd 100644 --- a/client/orm/orm_queryset.go +++ b/client/orm/orm_queryset.go @@ -115,7 +115,7 @@ func (o querySet) Exclude(expr string, args ...interface{}) QuerySeter { return &o } -// set offset number +// Set offset number func (o *querySet) setOffset(num interface{}) { o.offset = utils.ToInt64(num) } @@ -194,7 +194,7 @@ func (o querySet) IgnoreIndex(indexes ...string) QuerySeter { return &o } -// set relation model to query together. +// Set relation model to query together. // it will query relation models and assign to parent model. func (o querySet) RelatedSel(params ...interface{}) QuerySeter { if len(params) == 0 { @@ -214,13 +214,13 @@ func (o querySet) RelatedSel(params ...interface{}) QuerySeter { return &o } -// set condition to QuerySeter. +// Set condition to QuerySeter. func (o querySet) SetCond(cond *Condition) QuerySeter { o.cond = cond return &o } -// get condition from QuerySeter +// Get condition from QuerySeter func (o querySet) GetCond() *Condition { return o.cond } @@ -276,7 +276,7 @@ func (o *querySet) PrepareInsertWithCtx(ctx context.Context) (Inserter, error) { return newInsertSet(ctx, o.orm, o.mi) } -// query all data and map to containers. +// query All data and map to containers. // cols means the Columns when querying. func (o *querySet) All(container interface{}, cols ...string) (int64, error) { return o.AllWithCtx(context.Background(), container, cols...) @@ -308,7 +308,7 @@ func (o *querySet) OneWithCtx(ctx context.Context, container interface{}, cols . return nil } -// query all data and map to []map[string]interface. +// query All data and map to []map[string]interface. // expres means condition expression. // it converts data to []map[column]value. func (o *querySet) Values(results *[]Params, exprs ...string) (int64, error) { @@ -319,7 +319,7 @@ func (o *querySet) ValuesWithCtx(ctx context.Context, results *[]Params, exprs . return o.orm.alias.DbBaser.ReadValues(ctx, o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ) } -// query all data and map to [][]interface +// query All data and map to [][]interface // it converts data to [][column_index]value func (o *querySet) ValuesList(results *[]ParamsList, exprs ...string) (int64, error) { return o.ValuesListWithCtx(context.Background(), results, exprs...) @@ -329,8 +329,8 @@ func (o *querySet) ValuesListWithCtx(ctx context.Context, results *[]ParamsList, return o.orm.alias.DbBaser.ReadValues(ctx, o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ) } -// query all data and map to []interface. -// it's designed for one row record set, auto change to []value, not [][column]value. +// query All data and map to []interface. +// it's designed for one row record Set, auto change to []value, not [][column]value. func (o *querySet) ValuesFlat(result *ParamsList, expr string) (int64, error) { return o.ValuesFlatWithCtx(context.Background(), result, expr) } @@ -339,7 +339,7 @@ func (o *querySet) ValuesFlatWithCtx(ctx context.Context, result *ParamsList, ex return o.orm.alias.DbBaser.ReadValues(ctx, o.orm.db, o, o.mi, o.cond, []string{expr}, result, o.orm.alias.TZ) } -// query all rows into map[string]interface with specify key and value column name. +// query All rows into map[string]interface with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value @@ -354,7 +354,7 @@ func (o *querySet) RowsToMap(result *Params, keyCol, valueCol string) (int64, er panic(ErrNotImplement) } -// query all rows into struct with specify key and value column name. +// query All rows into struct with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value diff --git a/client/orm/orm_raw.go b/client/orm/orm_raw.go index 2f811c65..b49daf73 100644 --- a/client/orm/orm_raw.go +++ b/client/orm/orm_raw.go @@ -75,7 +75,7 @@ type rawSet struct { var _ RawSeter = new(rawSet) -// set args for every query +// Set args for every query func (o rawSet) SetArgs(args ...interface{}) RawSeter { o.args = args return &o @@ -90,7 +90,7 @@ func (o *rawSet) Exec() (sql.Result, error) { return o.orm.db.Exec(query, args...) } -// set field value to row container +// Set field value to row container func (o *rawSet) setFieldValue(ind reflect.Value, value interface{}) { switch ind.Kind() { case reflect.Bool: @@ -215,7 +215,7 @@ func (o *rawSet) setFieldValue(ind reflect.Value, value interface{}) { } } -// set field value in loop for slice container +// Set field value in loop for slice container func (o *rawSet) loopSetRefs(refs []interface{}, sInds []reflect.Value, nIndsPtr *[]reflect.Value, eTyps []reflect.Type, init bool) { nInds := *nIndsPtr @@ -299,7 +299,7 @@ func (o *rawSet) QueryRow(containers ...interface{}) error { ind := reflect.Indirect(val) if val.Kind() != reflect.Ptr { - panic(fmt.Errorf(" all args must be use ptr")) + panic(fmt.Errorf(" All args must be use ptr")) } etyp := ind.Type() @@ -318,7 +318,7 @@ func (o *rawSet) QueryRow(containers ...interface{}) error { structMode = true fn := models.GetFullName(typ) - if mi, ok := defaultModelCache.getByFullName(fn); ok { + if mi, ok := defaultModelCache.GetByFullName(fn); ok { sMi = mi } } else { @@ -386,7 +386,7 @@ func (o *rawSet) QueryRow(containers ...interface{}) error { fd := field.Addr().Interface().(models.Fielder) err := fd.SetRaw(value) if err != nil { - return errors.Errorf("set raw error:%s", err) + return errors.Errorf("Set raw error:%s", err) } } else { o.setFieldValue(field, value) @@ -460,7 +460,7 @@ func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) { val := reflect.ValueOf(container) sInd := reflect.Indirect(val) if val.Kind() != reflect.Ptr || sInd.Kind() != reflect.Slice { - panic(fmt.Errorf(" all args must be use ptr slice")) + panic(fmt.Errorf(" All args must be use ptr slice")) } etyp := sInd.Type().Elem() @@ -479,7 +479,7 @@ func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) { structMode = true fn := models.GetFullName(typ) - if mi, ok := defaultModelCache.getByFullName(fn); ok { + if mi, ok := defaultModelCache.GetByFullName(fn); ok { sMi = mi } } else { @@ -552,7 +552,7 @@ func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) { fd := field.Addr().Interface().(models.Fielder) err := fd.SetRaw(value) if err != nil { - return 0, errors.Errorf("set raw error:%s", err) + return 0, errors.Errorf("Set raw error:%s", err) } } else { o.setFieldValue(field, value) @@ -880,7 +880,7 @@ func (o *rawSet) ValuesFlat(container *ParamsList, cols ...string) (int64, error return o.readValues(container, cols) } -// query all rows into map[string]interface with specify key and value column name. +// query All rows into map[string]interface with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value @@ -895,7 +895,7 @@ func (o *rawSet) RowsToMap(result *Params, keyCol, valueCol string) (int64, erro return o.queryRowsTo(result, keyCol, valueCol) } -// query all rows into struct with specify key and value column name. +// query All rows into struct with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value diff --git a/client/orm/orm_test.go b/client/orm/orm_test.go index 526e53e6..3dced8ff 100644 --- a/client/orm/orm_test.go +++ b/client/orm/orm_test.go @@ -50,7 +50,7 @@ var ( type argAny []interface{} -// get interface by index from interface slice +// Get interface by index from interface slice func (a argAny) Get(i int, args ...interface{}) (r interface{}) { if i >= 0 && i < len(a) { r = a[i] @@ -88,7 +88,7 @@ func ValuesCompare(is bool, a interface{}, args ...interface{}) (ok bool, err er } ok = is && ok || !is && !ok if !ok { - err = fmt.Errorf("expected: `%v`, get `%v`", b, a) + err = fmt.Errorf("expected: `%v`, Get `%v`", b, a) } wrongArg: @@ -217,7 +217,7 @@ func TestSyncDb(t *testing.T) { err := RunSyncdb("default", true, Debug) throwFail(t, err) - defaultModelCache.clean() + defaultModelCache.Clean() } func TestRegisterModels(_ *testing.T) { @@ -253,10 +253,10 @@ func TestModelSyntax(t *testing.T) { user := &User{} ind := reflect.ValueOf(user).Elem() fn := models.GetFullName(ind.Type()) - _, ok := defaultModelCache.getByFullName(fn) + _, ok := defaultModelCache.GetByFullName(fn) throwFail(t, AssertIs(ok, true)) - mi, ok := defaultModelCache.get("user") + mi, ok := defaultModelCache.Get("user") throwFail(t, AssertIs(ok, true)) if ok { throwFail(t, AssertIs(mi.Fields.GetByName("ShouldSkip") == nil, true)) @@ -283,7 +283,7 @@ var DataValues = map[string]interface{}{ "Uint8": uint8(1<<8 - 1), "Uint16": uint16(1<<16 - 1), "Uint32": uint32(1<<32 - 1), - "Uint64": uint64(1<<63 - 1), // uint64 values with high bit set are not supported + "Uint64": uint64(1<<63 - 1), // uint64 values with high bit Set are not supported "Float32": float32(100.1234), "Float64": float64(100.1234), "Decimal": float64(100.1234), @@ -774,7 +774,7 @@ func TestInsertTestData(t *testing.T) { posts := []*Post{ {User: users[0], Tags: []*Tag{tags[0]}, Title: "Introduction", Content: `Go is a new language. Although it borrows ideas from existing languages, it has unusual properties that make effective Go programs different in character from programs written in its relatives. A straightforward translation of a C++ or Java program into Go is unlikely to produce a satisfactory result—Java programs are written in Java, not Go. On the other hand, thinking about the problem from a Go perspective could produce a successful but quite different program. In other words, to write Go well, it's important to understand its properties and idioms. It's also important to know the established conventions for programming in Go, such as naming, formatting, program construction, and so on, so that programs you write will be easy for other Go programmers to understand. -This document gives tips for writing clear, idiomatic Go code. It augments the language specification, the Tour of Go, and How to Write Go Code, all of which you should read first.`}, +This document gives tips for writing clear, idiomatic Go code. It augments the language specification, the Tour of Go, and How to Write Go Code, All of which you should read first.`}, {User: users[1], Tags: []*Tag{tags[0], tags[1]}, Title: "Examples", Content: `The Go package sources are intended to serve not only as the core library but also as examples of how to use the language. Moreover, many of the packages contain working, self-contained executable examples you can run directly from the golang.org web site, such as this one (click on the word "Example" to open it up). If you have a question about how to approach a problem or how something might be implemented, the documentation, code and examples in the library can provide answers, ideas and background.`}, {User: users[1], Tags: []*Tag{tags[0], tags[2]}, Title: "Formatting", Content: `Formatting issues are the most contentious but the least consequential. People can adapt to different formatting styles but it's better if they don't have to, and less time is devoted to the topic if everyone adheres to the same style. The problem is how to approach this Utopia without a long prescriptive style guide. With Go we take an unusual approach and let the machine take care of most formatting issues. The gofmt program (also available as go fmt, which operates at the package level rather than source file level) reads a Go program and emits the source in a standard style of indentation and vertical alignment, retaining and if necessary reformatting comments. If you want to know how to handle some new layout situation, run gofmt; if the answer doesn't seem right, rearrange your program (or file a bug about gofmt), don't work around it.`}, @@ -2347,7 +2347,7 @@ func TestTransactionIsolationLevel(t *testing.T) { throwFail(t, err) throwFail(t, AssertIs(num, 0)) - // o2 commit and query tag table, get the result + // o2 commit and query tag table, Get the result to2.Commit() num, err = o2.QueryTable("tag").Filter("name", "test-transaction").Count() throwFail(t, err) @@ -2631,9 +2631,9 @@ func TestIgnoreCaseTag(t *testing.T) { Name02 string `orm:"COLUMN(Name)"` Name03 string `orm:"Column(name)"` } - defaultModelCache.clean() + defaultModelCache.Clean() RegisterModel(&testTagModel{}) - info, ok := defaultModelCache.get("test_tag_model") + info, ok := defaultModelCache.Get("test_tag_model") throwFail(t, AssertIs(ok, true)) throwFail(t, AssertNot(info, nil)) if t == nil { diff --git a/client/orm/qb_mysql.go b/client/orm/qb_mysql.go index df65e11d..486299a9 100644 --- a/client/orm/qb_mysql.go +++ b/client/orm/qb_mysql.go @@ -142,7 +142,7 @@ func (qb *MySQLQueryBuilder) Update(tables ...string) QueryBuilder { return qb } -// Set join the set kv +// Set join the Set kv func (qb *MySQLQueryBuilder) Set(kv ...string) QueryBuilder { qb.tokens = append(qb.tokens, "SET", strings.Join(kv, CommaSpace)) return qb @@ -179,7 +179,7 @@ func (qb *MySQLQueryBuilder) Subquery(sub string, alias string) string { return fmt.Sprintf("(%s) AS %s", sub, alias) } -// String join all tokens +// String join All tokens func (qb *MySQLQueryBuilder) String() string { s := strings.Join(qb.tokens, " ") qb.tokens = qb.tokens[:0] diff --git a/client/orm/qb_postgres.go b/client/orm/qb_postgres.go index 3e5ec1c6..713fb014 100644 --- a/client/orm/qb_postgres.go +++ b/client/orm/qb_postgres.go @@ -172,7 +172,7 @@ func (qb *PostgresQueryBuilder) Update(tables ...string) QueryBuilder { return qb } -// Set join the set kv +// Set join the Set kv func (qb *PostgresQueryBuilder) Set(kv ...string) QueryBuilder { qb.tokens = append(qb.tokens, "SET", strings.Join(kv, CommaSpace)) return qb @@ -211,7 +211,7 @@ func (qb *PostgresQueryBuilder) Subquery(sub string, alias string) string { return fmt.Sprintf("(%s) AS %s", sub, alias) } -// String join all tokens +// String join All tokens func (qb *PostgresQueryBuilder) String() string { s := strings.Join(qb.tokens, " ") qb.tokens = qb.tokens[:0] diff --git a/client/orm/types.go b/client/orm/types.go index 649d29fc..07186a03 100644 --- a/client/orm/types.go +++ b/client/orm/types.go @@ -148,7 +148,7 @@ type DML interface { // for example: // user := new(User) // id, err = Ormer.Insert(user) - // user must be a pointer and Insert will set user's pk field + // user must be a pointer and Insert will Set user's pk field Insert(md interface{}) (int64, error) InsertWithCtx(ctx context.Context, md interface{}) (int64, error) // InsertOrUpdate mysql:InsertOrUpdate(model) or InsertOrUpdate(model,"colu=colu+value") @@ -161,8 +161,8 @@ type DML interface { InsertMulti(bulk int, mds interface{}) (int64, error) InsertMultiWithCtx(ctx context.Context, bulk int, mds interface{}) (int64, error) // Update updates model to database. - // cols set the Columns those want to update. - // find model by Id(pk) field and update Columns specified by Fields, if cols is null then update all Columns + // cols Set the Columns those want to update. + // find model by Id(pk) field and update Columns specified by Fields, if cols is null then update All Columns // for example: // user := User{Id: 2} // user.Langs = append(user.Langs, "zh-CN", "en-US") @@ -291,14 +291,14 @@ type QuerySeter interface { // Exclude add NOT condition to querySeter. // have the same usage as Filter Exclude(string, ...interface{}) QuerySeter - // SetCond set condition to QuerySeter. + // SetCond Set condition to QuerySeter. // sql's where condition // cond := orm.NewCondition() // cond1 := cond.And("profile__isnull", false).AndNot("status__in", 1).Or("profile__age__gt", 2000) // //sql-> WHERE T0.`profile_id` IS NOT NULL AND NOT T0.`Status` IN (?) OR T1.`age` > 2000 // num, err := qs.SetCond(cond1).Count() SetCond(*Condition) QuerySeter - // GetCond get condition from QuerySeter. + // GetCond Get condition from QuerySeter. // sql's where condition // cond := orm.NewCondition() // cond = cond.And("profile__isnull", false).AndNot("status__in", 1) @@ -310,8 +310,8 @@ type QuerySeter interface { GetCond() *Condition // Limit add LIMIT value. // args[0] means offset, e.g. LIMIT num,offset. - // if Limit <= 0 then Limit will be set to default limit ,eg 1000 - // if QuerySeter doesn't call Limit, the sql's Limit will be set to default limit, eg 1000 + // if Limit <= 0 then Limit will be Set to default limit ,eg 1000 + // if QuerySeter doesn't call Limit, the sql's Limit will be Set to default limit, eg 1000 // for example: // qs.Limit(10, 2) // // sql-> limit 10 offset 2 @@ -365,10 +365,10 @@ type QuerySeter interface { // qs.IgnoreIndex(`idx_name1`,`idx_name2`) // ForceIndex, UseIndex , IgnoreIndex are mutually exclusive IgnoreIndex(indexes ...string) QuerySeter - // RelatedSel set relation model to query together. + // RelatedSel Set relation model to query together. // it will query relation models and assign to parent model. // for example: - // // will load all related Fields use left join . + // // will load All related Fields use left join . // qs.RelatedSel().One(&user) // // will load related field only profile // qs.RelatedSel("profile").One(&user) @@ -380,7 +380,7 @@ type QuerySeter interface { // Distinct(). // All(&permissions) Distinct() QuerySeter - // ForUpdate set FOR UPDATE to query. + // ForUpdate Set FOR UPDATE to query. // for example: // o.QueryTable("user").Filter("uid", uid).ForUpdate().All(&users) ForUpdate() QuerySeter @@ -418,7 +418,7 @@ type QuerySeter interface { // err = i.Close() //don't forget call Close PrepareInsert() (Inserter, error) PrepareInsertWithCtx(context.Context) (Inserter, error) - // All query all data and map to containers. + // All query All data and map to containers. // cols means the Columns when querying. // for example: // var users []*User @@ -432,7 +432,7 @@ type QuerySeter interface { // qs.One(&user) //user.UserName == "slene" One(container interface{}, cols ...string) error OneWithCtx(ctx context.Context, container interface{}, cols ...string) error - // Values query all data and map to []map[string]interface. + // Values query All data and map to []map[string]interface. // expres means condition expression. // it converts data to []map[column]value. // for example: @@ -440,21 +440,21 @@ type QuerySeter interface { // qs.Values(&maps) //maps[0]["UserName"]=="slene" Values(results *[]Params, exprs ...string) (int64, error) ValuesWithCtx(ctx context.Context, results *[]Params, exprs ...string) (int64, error) - // ValuesList query all data and map to [][]interface + // ValuesList query All data and map to [][]interface // it converts data to [][column_index]value // for example: // var list []ParamsList // qs.ValuesList(&list) // list[0][1] == "slene" ValuesList(results *[]ParamsList, exprs ...string) (int64, error) ValuesListWithCtx(ctx context.Context, results *[]ParamsList, exprs ...string) (int64, error) - // ValuesFlat query all data and map to []interface. - // it's designed for one column record set, auto change to []value, not [][column]value. + // ValuesFlat query All data and map to []interface. + // it's designed for one column record Set, auto change to []value, not [][column]value. // for example: // var list ParamsList // qs.ValuesFlat(&list, "UserName") // list[0] == "slene" ValuesFlat(result *ParamsList, expr string) (int64, error) ValuesFlatWithCtx(ctx context.Context, result *ParamsList, expr string) (int64, error) - // RowsToMap query all rows into map[string]interface with specify key and value column name. + // RowsToMap query All rows into map[string]interface with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value @@ -465,7 +465,7 @@ type QuerySeter interface { // "found": 200, // } RowsToMap(result *Params, keyCol, valueCol string) (int64, error) - // RowsToStruct query all rows into struct with specify key and value column name. + // RowsToStruct query All rows into struct with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value @@ -488,7 +488,7 @@ type QuerySeter interface { } // QueryM2Mer model to model query struct -// all operations are on the m2m table only, will not affect the origin model table +// All operations are on the m2m table only, will not affect the origin model table type QueryM2Mer interface { // Add adds models to origin models when creating queryM2M. // example: @@ -513,10 +513,10 @@ type QueryM2Mer interface { // Exist checks model is existed in relationship of origin model Exist(interface{}) bool ExistWithCtx(context.Context, interface{}) bool - // Clear cleans all models in related of origin model + // Clear cleans All models in related of origin model Clear() (int64, error) ClearWithCtx(context.Context) (int64, error) - // Count counts all related models of origin model + // Count counts All related models of origin model Count() (int64, error) CountWithCtx(context.Context) (int64, error) } @@ -534,7 +534,7 @@ type RawPreparer interface { // sql := fmt.Sprintf("SELECT %sid%s,%sname%s FROM %suser%s WHERE id = ?",Q,Q,Q,Q,Q,Q) // rs := Ormer.Raw(sql, 1) type RawSeter interface { - // Exec execute sql and get result + // Exec execute sql and Get result Exec() (sql.Result, error) // QueryRow query data and map to container // for example: @@ -559,7 +559,7 @@ type RawSeter interface { // ValuesFlat query data to []interface // see QuerySeter's ValuesFlat ValuesFlat(container *ParamsList, cols ...string) (int64, error) - // RowsToMap query all rows into map[string]interface with specify key and value column name. + // RowsToMap query All rows into map[string]interface with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value @@ -570,7 +570,7 @@ type RawSeter interface { // "found": 200, // } RowsToMap(result *Params, keyCol, valueCol string) (int64, error) - // RowsToStruct query all rows into struct with specify key and value column name. + // RowsToStruct query All rows into struct with specify key and value column name. // keyCol = "name", valueCol = "value" // table data // name | value From 4eea71f1d7a9ed5068ff70298b9762450e8634d6 Mon Sep 17 00:00:00 2001 From: Uzziah <120019273+uzziahlin@users.noreply.github.com> Date: Tue, 29 Aug 2023 20:56:51 +0800 Subject: [PATCH 5/9] fix: refactor readBatchSQL and readValuesSQL method to reuse readSQL (#5303) * fix: refactor readBatchSQL and readValuesSQL method to reuse readSQL and add test of the readValuesSQL method * fix: add the change record into the CHANGELOG.md * fix: fix the bug for preprocess cols * fix: resolve the conflict with develop --------- Co-authored-by: Ken --- CHANGELOG.md | 1 + client/orm/db.go | 45 ++++++------ client/orm/db_test.go | 155 ++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 175 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5c3433d..9857d579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ - [fix: refactor UpdateBatch method](https://github.com/beego/beego/pull/5295) - [fix: refactor InsertOrUpdate method](https://github.com/beego/beego/pull/5296) - [fix: refactor ReadBatch method](https://github.com/beego/beego/pull/5298) +- [fix: refactor ReadValues method](https://github.com/beego/beego/pull/5303) ## ORM refactoring - [introducing internal/models pkg](https://github.com/beego/beego/pull/5238) diff --git a/client/orm/db.go b/client/orm/db.go index 8d59fd01..4c31dbb9 100644 --- a/client/orm/db.go +++ b/client/orm/db.go @@ -1292,6 +1292,24 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m } func (d *dbBase) readBatchSQL(tables *dbTables, tCols []string, cond *Condition, qs *querySet, mi *models.ModelInfo, tz *time.Location) (string, []interface{}) { + cols := d.preProcCols(tCols) // pre process columns + return d.readSQL(tables, cols, cond, qs, mi, tz) +} + +func (d *dbBase) preProcCols(cols []string) []string { + res := make([]string, len(cols)) + + quote := d.ins.TableQuote() + for i, col := range cols { + res[i] = fmt.Sprintf("T0.%s%s%s", quote, col, quote) + } + + return res +} + +// readSQL generate a select sql string and return args +// ReadBatch and ReadValues methods will reuse this method. +func (d *dbBase) readSQL(tables *dbTables, tCols []string, cond *Condition, qs *querySet, mi *models.ModelInfo, tz *time.Location) (string, []interface{}) { quote := d.ins.TableQuote() @@ -1316,10 +1334,7 @@ func (d *dbBase) readBatchSQL(tables *dbTables, tCols []string, cond *Condition, if i > 0 { _, _ = buf.WriteString(", ") } - _, _ = buf.WriteString("T0.") - _, _ = buf.WriteString(quote) _, _ = buf.WriteString(tCol) - _, _ = buf.WriteString(quote) } for _, tbl := range tables.tables { @@ -1897,25 +1912,7 @@ func (d *dbBase) ReadValues(ctx context.Context, q dbQuerier, qs *querySet, mi * } } - where, args := tables.getCondSQL(cond, false, tz) - groupBy := tables.getGroupSQL(qs.groups) - orderBy := tables.getOrderSQL(qs.orders) - limit := tables.getLimitSQL(mi, qs.offset, qs.limit) - join := tables.getJoinSQL() - specifyIndexes := tables.getIndexSql(mi.Table, qs.useIndex, qs.indexes) - - sels := strings.Join(cols, ", ") - - sqlSelect := "SELECT" - if qs.distinct { - sqlSelect += " DISTINCT" - } - query := fmt.Sprintf("%s %s FROM %s%s%s T0 %s%s%s%s%s%s", - sqlSelect, sels, - Q, mi.Table, Q, - specifyIndexes, join, where, groupBy, orderBy, limit) - - d.ins.ReplaceMarks(&query) + query, args := d.readValuesSQL(tables, cols, qs, mi, cond, tz) rs, err := q.QueryContext(ctx, query, args...) if err != nil { @@ -2011,6 +2008,10 @@ func (d *dbBase) ReadValues(ctx context.Context, q dbQuerier, qs *querySet, mi * return cnt, nil } +func (d *dbBase) readValuesSQL(tables *dbTables, cols []string, qs *querySet, mi *models.ModelInfo, cond *Condition, tz *time.Location) (string, []interface{}) { + return d.readSQL(tables, cols, cond, qs, mi, tz) +} + // SupportUpdateJoin flag of update joined record. func (d *dbBase) SupportUpdateJoin() bool { return true diff --git a/client/orm/db_test.go b/client/orm/db_test.go index 32e90d17..2b551608 100644 --- a/client/orm/db_test.go +++ b/client/orm/db_test.go @@ -890,8 +890,6 @@ func TestDbBase_InsertOrUpdateSQL(t *testing.T) { func TestDbBase_readBatchSQL(t *testing.T) { - tCols := []string{"name", "score"} - mc := models.NewModelCacheHandler() err := mc.Register("", false, new(testTab), new(testTab1), new(testTab2)) @@ -913,7 +911,8 @@ func TestDbBase_readBatchSQL(t *testing.T) { name string db *dbBase - qs *querySet + tCols []string + qs *querySet wantRes string wantArgs []interface{} @@ -923,6 +922,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBaseMysql(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -948,6 +948,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBaseMysql(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -974,6 +975,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBaseMysql(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1000,6 +1002,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBaseMysql(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1027,6 +1030,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBaseMysql(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1053,6 +1057,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBasePostgres(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1076,6 +1081,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBasePostgres(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1100,6 +1106,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBasePostgres(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1124,6 +1131,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBasePostgres(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1149,6 +1157,7 @@ func TestDbBase_readBatchSQL(t *testing.T) { db: &dbBase{ ins: newdbBasePostgres(), }, + tCols: []string{"name", "score"}, qs: &querySet{ mi: mi, cond: cond, @@ -1175,7 +1184,145 @@ func TestDbBase_readBatchSQL(t *testing.T) { tables := newDbTables(mi, tc.db.ins) tables.parseRelated(tc.qs.related, tc.qs.relDepth) - res, args := tc.db.readBatchSQL(tables, tCols, cond, tc.qs, mi, tz) + res, args := tc.db.readBatchSQL(tables, tc.tCols, cond, tc.qs, mi, tz) + + assert.Equal(t, tc.wantRes, res) + assert.Equal(t, tc.wantArgs, args) + }) + } + +} + +func TestDbBase_readValuesSQL(t *testing.T) { + + mc := models.NewModelCacheHandler() + + err := mc.Register("", false, new(testTab), new(testTab1), new(testTab2)) + + assert.Nil(t, err) + + mc.Bootstrap() + + mi, ok := mc.GetByMd(new(testTab)) + + assert.True(t, ok) + + cond := NewCondition().And("name", "test_name"). + OrCond(NewCondition().And("age__gt", 18).And("score__lt", 60)) + + tz := time.Local + + testCases := []struct { + name string + db *dbBase + + cols []string + qs *querySet + + wantRes string + wantArgs []interface{} + }{ + { + name: "read values with MySQL", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + cols: []string{"T0.`name` name", "T0.`age` age", "T0.`score` score"}, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + }, + wantRes: "SELECT T0.`name` name, T0.`age` age, T0.`score` score FROM `test_tab` T0 USE INDEX(`name`,`score`) WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read values with MySQL and distinct", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + cols: []string{"T0.`name` name", "T0.`age` age", "T0.`score` score"}, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + useIndex: 1, + indexes: []string{"name", "score"}, + distinct: true, + }, + wantRes: "SELECT DISTINCT T0.`name` name, T0.`age` age, T0.`score` score FROM `test_tab` T0 USE INDEX(`name`,`score`) WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ORDER BY T0.`score` DESC, T0.`age` ASC LIMIT 10 OFFSET 100", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read values with PostgreSQL", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + cols: []string{`T0."name" name`, `T0."age" age`, `T0."score" score`}, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + }, + wantRes: `SELECT T0."name" name, T0."age" age, T0."score" score FROM "test_tab" T0 WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "read values with PostgreSQL and distinct", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + cols: []string{`T0."name" name`, `T0."age" age`, `T0."score" score`}, + qs: &querySet{ + mi: mi, + cond: cond, + limit: 10, + offset: 100, + groups: []string{"name", "age"}, + orders: []*order_clause.Order{ + order_clause.Clause(order_clause.Column("score"), + order_clause.SortDescending()), + order_clause.Clause(order_clause.Column("age"), + order_clause.SortAscending()), + }, + distinct: true, + }, + wantRes: `SELECT DISTINCT T0."name" name, T0."age" age, T0."score" score FROM "test_tab" T0 WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ORDER BY T0."score" DESC, T0."age" ASC LIMIT 10 OFFSET 100`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tables := newDbTables(mi, tc.db.ins) + + res, args := tc.db.readValuesSQL(tables, tc.cols, tc.qs, mi, cond, tz) assert.Equal(t, tc.wantRes, res) assert.Equal(t, tc.wantArgs, args) From b2a37fe60e1146146afb9c70dc3362d0b4033eed Mon Sep 17 00:00:00 2001 From: Uzziah <120019273+uzziahlin@users.noreply.github.com> Date: Tue, 12 Sep 2023 00:18:04 +0800 Subject: [PATCH 6/9] fix: refactor Count method (#5300) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: refactor Count method and add test * fix: add the change record into the CHANGELOG.md * fix: refactor the readSQL method and let countSQL reuse readSQL method * fix: fix the bug in the construction process of the order by clause * fix: modify the TestCountOrderBy、add the TestCount and TestOrderBy * fix: move the change record in CHANGELOG.md to developing --------- Co-authored-by: Ken --- CHANGELOG.md | 2 +- client/orm/db.go | 87 ++++++++++++++++++++-------------- client/orm/db_tables.go | 4 +- client/orm/db_test.go | 102 ++++++++++++++++++++++++++++++++++++++++ client/orm/orm_test.go | 60 ++++++++++++++++++++++- 5 files changed, 217 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9857d579..6f0333c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,5 @@ # developing -- [orm: move the modelCache to internal/models package](https://github.com/beego/beego/pull/5306) +- [fix: refactor Count method](https://github.com/beego/beego/pull/5300) # v2.1.1 - [httplib: fix unstable unit test which use the httplib.org](https://github.com/beego/beego/pull/5232) diff --git a/client/orm/db.go b/client/orm/db.go index 4c31dbb9..b06016e5 100644 --- a/client/orm/db.go +++ b/client/orm/db.go @@ -1293,7 +1293,17 @@ func (d *dbBase) ReadBatch(ctx context.Context, q dbQuerier, qs *querySet, mi *m func (d *dbBase) readBatchSQL(tables *dbTables, tCols []string, cond *Condition, qs *querySet, mi *models.ModelInfo, tz *time.Location) (string, []interface{}) { cols := d.preProcCols(tCols) // pre process columns - return d.readSQL(tables, cols, cond, qs, mi, tz) + + buf := buffers.Get() + defer buffers.Put(buf) + + args := d.readSQL(buf, tables, cols, cond, qs, mi, tz) + + query := buf.String() + + d.ins.ReplaceMarks(&query) + + return query, args } func (d *dbBase) preProcCols(cols []string) []string { @@ -1309,7 +1319,7 @@ func (d *dbBase) preProcCols(cols []string) []string { // readSQL generate a select sql string and return args // ReadBatch and ReadValues methods will reuse this method. -func (d *dbBase) readSQL(tables *dbTables, tCols []string, cond *Condition, qs *querySet, mi *models.ModelInfo, tz *time.Location) (string, []interface{}) { +func (d *dbBase) readSQL(buf buffers.Buffer, tables *dbTables, tCols []string, cond *Condition, qs *querySet, mi *models.ModelInfo, tz *time.Location) []interface{} { quote := d.ins.TableQuote() @@ -1320,9 +1330,6 @@ func (d *dbBase) readSQL(tables *dbTables, tCols []string, cond *Condition, qs * join := tables.getJoinSQL() specifyIndexes := tables.getIndexSql(mi.Table, qs.useIndex, qs.indexes) - buf := buffers.Get() - defer buffers.Put(buf) - _, _ = buf.WriteString("SELECT ") if qs.distinct { @@ -1372,6 +1379,37 @@ func (d *dbBase) readSQL(tables *dbTables, tCols []string, cond *Condition, qs * _, _ = buf.WriteString(" FOR UPDATE") } + return args +} + +// Count excute count sql and return count result int64. +func (d *dbBase) Count(ctx context.Context, q dbQuerier, qs *querySet, mi *models.ModelInfo, cond *Condition, tz *time.Location) (cnt int64, err error) { + + query, args := d.countSQL(qs, mi, cond, tz) + + row := q.QueryRowContext(ctx, query, args...) + err = row.Scan(&cnt) + return +} + +func (d *dbBase) countSQL(qs *querySet, mi *models.ModelInfo, cond *Condition, tz *time.Location) (string, []interface{}) { + tables := newDbTables(mi, d.ins) + tables.parseRelated(qs.related, qs.relDepth) + + buf := buffers.Get() + defer buffers.Put(buf) + + if len(qs.groups) > 0 { + _, _ = buf.WriteString("SELECT COUNT(*) FROM (") + } + + qs.aggregate = "COUNT(*)" + args := d.readSQL(buf, tables, nil, cond, qs, mi, tz) + + if len(qs.groups) > 0 { + _, _ = buf.WriteString(") AS T") + } + query := buf.String() d.ins.ReplaceMarks(&query) @@ -1379,34 +1417,6 @@ func (d *dbBase) readSQL(tables *dbTables, tCols []string, cond *Condition, qs * return query, args } -// Count excute count sql and return count result int64. -func (d *dbBase) Count(ctx context.Context, q dbQuerier, qs *querySet, mi *models.ModelInfo, cond *Condition, tz *time.Location) (cnt int64, err error) { - tables := newDbTables(mi, d.ins) - tables.parseRelated(qs.related, qs.relDepth) - - where, args := tables.getCondSQL(cond, false, tz) - groupBy := tables.getGroupSQL(qs.groups) - tables.getOrderSQL(qs.orders) - join := tables.getJoinSQL() - specifyIndexes := tables.getIndexSql(mi.Table, qs.useIndex, qs.indexes) - - Q := d.ins.TableQuote() - - query := fmt.Sprintf("SELECT COUNT(*) FROM %s%s%s T0 %s%s%s%s", - Q, mi.Table, Q, - specifyIndexes, join, where, groupBy) - - if groupBy != "" { - query = fmt.Sprintf("SELECT COUNT(*) FROM (%s) AS T", query) - } - - d.ins.ReplaceMarks(&query) - - row := q.QueryRowContext(ctx, query, args...) - err = row.Scan(&cnt) - return -} - // GenerateOperatorSQL generate sql with replacing operator string placeholders and replaced values. func (d *dbBase) GenerateOperatorSQL(mi *models.ModelInfo, fi *models.FieldInfo, operator string, args []interface{}, tz *time.Location) (string, []interface{}) { var sql string @@ -2009,7 +2019,16 @@ func (d *dbBase) ReadValues(ctx context.Context, q dbQuerier, qs *querySet, mi * } func (d *dbBase) readValuesSQL(tables *dbTables, cols []string, qs *querySet, mi *models.ModelInfo, cond *Condition, tz *time.Location) (string, []interface{}) { - return d.readSQL(tables, cols, cond, qs, mi, tz) + buf := buffers.Get() + defer buffers.Put(buf) + + args := d.readSQL(buf, tables, cols, cond, qs, mi, tz) + + query := buf.String() + + d.ins.ReplaceMarks(&query) + + return query, args } // SupportUpdateJoin flag of update joined record. diff --git a/client/orm/db_tables.go b/client/orm/db_tables.go index 9d30afb3..c22f0a11 100644 --- a/client/orm/db_tables.go +++ b/client/orm/db_tables.go @@ -439,9 +439,9 @@ func (t *dbTables) getOrderSQL(orders []*order_clause.Order) (orderSQL string) { if order.IsRaw() { if len(clause) == 2 { - orderSqls = append(orderSqls, fmt.Sprintf("%s.%s%s%s %s", clause[0], Q, clause[1], Q, order.SortString())) + orderSqls = append(orderSqls, fmt.Sprintf("%s.%s %s", clause[0], clause[1], order.SortString())) } else if len(clause) == 1 { - orderSqls = append(orderSqls, fmt.Sprintf("%s%s%s %s", Q, clause[0], Q, order.SortString())) + orderSqls = append(orderSqls, fmt.Sprintf("%s %s", clause[0], order.SortString())) } else { panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(clause, ExprSep))) } diff --git a/client/orm/db_test.go b/client/orm/db_test.go index 2b551608..2bb32090 100644 --- a/client/orm/db_test.go +++ b/client/orm/db_test.go @@ -1331,6 +1331,108 @@ func TestDbBase_readValuesSQL(t *testing.T) { } +func TestDbBase_countSQL(t *testing.T) { + + mc := models.NewModelCacheHandler() + + err := mc.Register("", false, new(testTab), new(testTab1), new(testTab2)) + + assert.Nil(t, err) + + mc.Bootstrap() + + mi, ok := mc.GetByMd(new(testTab)) + + assert.True(t, ok) + + cond := NewCondition().And("name", "test_name"). + OrCond(NewCondition().And("age__gt", 18).And("score__lt", 60)) + + tz := time.Local + + testCases := []struct { + name string + db *dbBase + + qs *querySet + + wantRes string + wantArgs []interface{} + }{ + { + name: "count with MySQL has no group by", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + useIndex: 1, + indexes: []string{"name", "score"}, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: "SELECT COUNT(*) FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) ", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "count with MySQL has group by", + db: &dbBase{ + ins: newdbBaseMysql(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + useIndex: 1, + indexes: []string{"name", "score"}, + related: make([]string, 0), + relDepth: 2, + groups: []string{"name", "age"}, + }, + wantRes: "SELECT COUNT(*) FROM (SELECT COUNT(*) FROM `test_tab` T0 USE INDEX(`name`,`score`) INNER JOIN `test_tab1` T1 ON T1.`id` = T0.`test_tab_1_id` INNER JOIN `test_tab2` T2 ON T2.`id` = T1.`test_tab_2_id` WHERE T0.`name` = ? OR ( T0.`age` > ? AND T0.`score` < ? ) GROUP BY T0.`name`, T0.`age` ) AS T", + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "count with PostgreSQL has no group by", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + related: make([]string, 0), + relDepth: 2, + }, + wantRes: `SELECT COUNT(*) FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) `, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + { + name: "count with PostgreSQL has group by", + db: &dbBase{ + ins: newdbBasePostgres(), + }, + qs: &querySet{ + mi: mi, + cond: cond, + related: make([]string, 0), + relDepth: 2, + groups: []string{"name", "age"}, + }, + wantRes: `SELECT COUNT(*) FROM (SELECT COUNT(*) FROM "test_tab" T0 INNER JOIN "test_tab1" T1 ON T1."id" = T0."test_tab_1_id" INNER JOIN "test_tab2" T2 ON T2."id" = T1."test_tab_2_id" WHERE T0."name" = $1 OR ( T0."age" > $2 AND T0."score" < $3 ) GROUP BY T0."name", T0."age" ) AS T`, + wantArgs: []interface{}{"test_name", int64(18), int64(60)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, args := tc.db.countSQL(tc.qs, mi, cond, tz) + + assert.Equal(t, tc.wantRes, res) + assert.Equal(t, tc.wantArgs, args) + }) + } +} + type testTab struct { ID int64 `orm:"auto;pk;column(id)"` Name string `orm:"column(name)"` diff --git a/client/orm/orm_test.go b/client/orm/orm_test.go index 3dced8ff..a371f193 100644 --- a/client/orm/orm_test.go +++ b/client/orm/orm_test.go @@ -1140,7 +1140,10 @@ func TestOffset(t *testing.T) { throwFail(t, AssertIs(num, 2)) } -func TestOrderBy(t *testing.T) { +func TestCountOrderBy(t *testing.T) { + if IsPostgres { + return + } qs := dORM.QueryTable("user") num, err := qs.OrderBy("-status").Filter("user_name", "nobody").Count() throwFail(t, err) @@ -1175,6 +1178,61 @@ func TestOrderBy(t *testing.T) { } } +func TestOrderBy(t *testing.T) { + var users []*User + qs := dORM.QueryTable("user") + num, err := qs.OrderBy("-status").Filter("user_name", "nobody").All(&users) + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + num, err = qs.OrderBy("status").Filter("user_name", "slene").All(&users) + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + num, err = qs.OrderBy("-profile__age").Filter("user_name", "astaxie").All(&users) + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + num, err = qs.OrderClauses( + order_clause.Clause( + order_clause.Column(`profile__age`), + order_clause.SortDescending(), + ), + ).Filter("user_name", "astaxie").All(&users) + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + if IsMysql { + num, err = qs.OrderClauses( + order_clause.Clause( + order_clause.Column(`rand()`), + order_clause.Raw(), + ), + ).Filter("user_name", "astaxie").All(&users) + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + } +} + +func TestCount(t *testing.T) { + qs := dORM.QueryTable("user") + num, err := qs.Filter("user_name", "nobody").Count() + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + num, err = qs.Filter("user_name", "slene").Count() + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + num, err = qs.Filter("user_name", "astaxie").Count() + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) + + num, err = qs.Filter("user_name", "astaxie").Count() + throwFail(t, err) + throwFail(t, AssertIs(num, 1)) +} + func TestAll(t *testing.T) { var users []*User qs := dORM.QueryTable("user") From c55099756cc35004df695ebb772a7f21e760b3bf Mon Sep 17 00:00:00 2001 From: smx_Morgan <86641888+smx-Morgan@users.noreply.github.com> Date: Thu, 14 Sep 2023 21:59:30 +0800 Subject: [PATCH 7/9] support db_type in ddl (#5404) * support db_type in ddl * postgres test fixed * CHANGELOG.md * mysql bug --- CHANGELOG.md | 2 +- client/orm/ddl.go | 5 +++-- client/orm/ddl_test.go | 10 ++++++++++ client/orm/internal/models/models_info_f.go | 2 ++ client/orm/internal/models/models_utils.go | 1 + 5 files changed, 17 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f0333c4..0bdb6f64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # developing - [fix: refactor Count method](https://github.com/beego/beego/pull/5300) - +- [support db_type in ddl ](https://github.com/beego/beego/pull/5404) # v2.1.1 - [httplib: fix unstable unit test which use the httplib.org](https://github.com/beego/beego/pull/5232) - [rft: remove adapter package](https://github.com/beego/beego/pull/5239) diff --git a/client/orm/ddl.go b/client/orm/ddl.go index f5d3c3a6..ce1a6061 100644 --- a/client/orm/ddl.go +++ b/client/orm/ddl.go @@ -65,8 +65,9 @@ func getDbCreateSQL(mc *imodels.ModelCache, al *alias) (queries []string, tableI for i, fi := range mi.Fields.FieldsDB { column := fmt.Sprintf(" %s%s%s ", Q, fi.Column, Q) col := getColumnTyp(al, fi) - - if fi.Auto { + if fi.DBType != "" { + column += fi.DBType + } else if fi.Auto { switch al.Driver { case DRSqlite, DRPostgres: column += T["auto"] diff --git a/client/orm/ddl_test.go b/client/orm/ddl_test.go index 265ca2ee..237664c7 100644 --- a/client/orm/ddl_test.go +++ b/client/orm/ddl_test.go @@ -42,6 +42,12 @@ type ModelWithEmptyComments struct { Email string `orm:"size(100);description()"` Password string `orm:"size(100);description()"` } +type ModelWithDBTypes struct { + ID int `orm:"column(id);description();db_type(bigserial NOT NULL PRIMARY KEY)"` + UserName string `orm:"size(30);unique;description()"` + Email string `orm:"size(100);description()"` + Password string `orm:"size(100);description()"` +} func TestGetDbCreateSQLWithComment(t *testing.T) { type TestCase struct { @@ -58,14 +64,18 @@ func TestGetDbCreateSQLWithComment(t *testing.T) { testCases = append(testCases, TestCase{name: "model with comments for MySQL", model: &ModelWithComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_comments` (\n `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY COMMENT 'user id',\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE COMMENT 'user name',\n `email` varchar(100) NOT NULL DEFAULT '' COMMENT 'email',\n `password` varchar(100) NOT NULL DEFAULT '' COMMENT 'password'\n) ENGINE=INNODB;", wantErr: nil}) testCases = append(testCases, TestCase{name: "model without comments for MySQL", model: &ModelWithoutComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithoutComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_without_comments` (\n `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n) ENGINE=INNODB;", wantErr: nil}) testCases = append(testCases, TestCase{name: "model with empty comments for MySQL", model: &ModelWithEmptyComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithEmptyComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_empty_comments` (\n `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n) ENGINE=INNODB;", wantErr: nil}) + testCases = append(testCases, TestCase{name: "model with dpType for MySQL", model: &ModelWithDBTypes{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithDBTypes`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_d_b_types` (\n `id` bigserial NOT NULL PRIMARY KEY,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n) ENGINE=INNODB;", wantErr: nil}) case DRPostgres: testCases = append(testCases, TestCase{name: "model with comments for Postgres", model: &ModelWithComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_comments\" (\n \"id\" serial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);\nCOMMENT ON COLUMN \"model_with_comments\".\"id\" is 'user id';\nCOMMENT ON COLUMN \"model_with_comments\".\"user_name\" is 'user name';\nCOMMENT ON COLUMN \"model_with_comments\".\"email\" is 'email';\nCOMMENT ON COLUMN \"model_with_comments\".\"password\" is 'password';", wantErr: nil}) testCases = append(testCases, TestCase{name: "model without comments for Postgres", model: &ModelWithoutComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithoutComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_without_comments\" (\n \"id\" serial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) testCases = append(testCases, TestCase{name: "model with empty comments for Postgres", model: &ModelWithEmptyComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithEmptyComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_empty_comments\" (\n \"id\" serial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) + testCases = append(testCases, TestCase{name: "model with dpType for Postgres", model: &ModelWithDBTypes{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithDBTypes`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_d_b_types\" (\n \"id\" bigserial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) case DRSqlite: testCases = append(testCases, TestCase{name: "model with comments for Sqlite", model: &ModelWithComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_comments` (\n `id` integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) testCases = append(testCases, TestCase{name: "model without comments for Sqlite", model: &ModelWithoutComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithoutComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_without_comments` (\n `id` integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) testCases = append(testCases, TestCase{name: "model with empty comments for Sqlite", model: &ModelWithEmptyComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithEmptyComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_empty_comments` (\n `id` integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) + testCases = append(testCases, TestCase{name: "model with dpType for Sqlite", model: &ModelWithDBTypes{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithDBTypes`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_d_b_types` (\n `id` bigserial NOT NULL PRIMARY KEY,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) + } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/client/orm/internal/models/models_info_f.go b/client/orm/internal/models/models_info_f.go index 3e5e4d6b..474935b4 100644 --- a/client/orm/internal/models/models_info_f.go +++ b/client/orm/internal/models/models_info_f.go @@ -140,6 +140,7 @@ type FieldInfo struct { OnDelete string Description string TimePrecision *int + DBType string } // NewFieldInfo new field info @@ -308,6 +309,7 @@ checkType: fi.Null = attrs["null"] fi.Index = attrs["index"] fi.Auto = attrs["auto"] + fi.DBType = tags["db_type"] fi.Pk = attrs["pk"] fi.Unique = attrs["unique"] diff --git a/client/orm/internal/models/models_utils.go b/client/orm/internal/models/models_utils.go index b5204606..9e950abb 100644 --- a/client/orm/internal/models/models_utils.go +++ b/client/orm/internal/models/models_utils.go @@ -48,6 +48,7 @@ var supportTag = map[string]int{ "type": 2, "description": 2, "precision": 2, + "db_type": 2, } type fn func(string) string From e465249ef6f79351084b3f8eecd0e2d9106b21c0 Mon Sep 17 00:00:00 2001 From: Ming Deng Date: Mon, 18 Sep 2023 11:02:43 +0800 Subject: [PATCH 8/9] orm: PostgreSQL change auto to bigserial (#5415) --- CHANGELOG.md | 1 + client/orm/db_postgres.go | 2 +- client/orm/ddl_test.go | 7 +++---- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0bdb6f64..cf4b594a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # developing - [fix: refactor Count method](https://github.com/beego/beego/pull/5300) - [support db_type in ddl ](https://github.com/beego/beego/pull/5404) +- [orm: PostgreSQL change auto to bigserial](https://github.com/beego/beego/pull/5415) # v2.1.1 - [httplib: fix unstable unit test which use the httplib.org](https://github.com/beego/beego/pull/5232) - [rft: remove adapter package](https://github.com/beego/beego/pull/5239) diff --git a/client/orm/db_postgres.go b/client/orm/db_postgres.go index 9a7383b8..e7fa6aea 100644 --- a/client/orm/db_postgres.go +++ b/client/orm/db_postgres.go @@ -44,7 +44,7 @@ var postgresOperators = map[string]string{ // postgresql column field types. var postgresTypes = map[string]string{ - "auto": "serial NOT NULL PRIMARY KEY", + "auto": "bigserial NOT NULL PRIMARY KEY", "pk": "NOT NULL PRIMARY KEY", "bool": "bool", "string": "varchar(%d)", diff --git a/client/orm/ddl_test.go b/client/orm/ddl_test.go index 237664c7..ba4a4bfb 100644 --- a/client/orm/ddl_test.go +++ b/client/orm/ddl_test.go @@ -66,10 +66,9 @@ func TestGetDbCreateSQLWithComment(t *testing.T) { testCases = append(testCases, TestCase{name: "model with empty comments for MySQL", model: &ModelWithEmptyComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithEmptyComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_empty_comments` (\n `id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n) ENGINE=INNODB;", wantErr: nil}) testCases = append(testCases, TestCase{name: "model with dpType for MySQL", model: &ModelWithDBTypes{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithDBTypes`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_d_b_types` (\n `id` bigserial NOT NULL PRIMARY KEY,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n) ENGINE=INNODB;", wantErr: nil}) case DRPostgres: - testCases = append(testCases, TestCase{name: "model with comments for Postgres", model: &ModelWithComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_comments\" (\n \"id\" serial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);\nCOMMENT ON COLUMN \"model_with_comments\".\"id\" is 'user id';\nCOMMENT ON COLUMN \"model_with_comments\".\"user_name\" is 'user name';\nCOMMENT ON COLUMN \"model_with_comments\".\"email\" is 'email';\nCOMMENT ON COLUMN \"model_with_comments\".\"password\" is 'password';", wantErr: nil}) - testCases = append(testCases, TestCase{name: "model without comments for Postgres", model: &ModelWithoutComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithoutComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_without_comments\" (\n \"id\" serial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) - testCases = append(testCases, TestCase{name: "model with empty comments for Postgres", model: &ModelWithEmptyComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithEmptyComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_empty_comments\" (\n \"id\" serial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) - testCases = append(testCases, TestCase{name: "model with dpType for Postgres", model: &ModelWithDBTypes{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithDBTypes`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_d_b_types\" (\n \"id\" bigserial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) + testCases = append(testCases, TestCase{name: "model with comments for Postgres", model: &ModelWithComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_comments\" (\n \"id\" bigserial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);\nCOMMENT ON COLUMN \"model_with_comments\".\"id\" is 'user id';\nCOMMENT ON COLUMN \"model_with_comments\".\"user_name\" is 'user name';\nCOMMENT ON COLUMN \"model_with_comments\".\"email\" is 'email';\nCOMMENT ON COLUMN \"model_with_comments\".\"password\" is 'password';", wantErr: nil}) + testCases = append(testCases, TestCase{name: "model without comments for Postgres", model: &ModelWithoutComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithoutComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_without_comments\" (\n \"id\" bigserial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) + testCases = append(testCases, TestCase{name: "model with empty comments for Postgres", model: &ModelWithEmptyComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithEmptyComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS \"model_with_empty_comments\" (\n \"id\" bigserial NOT NULL PRIMARY KEY,\n \"user_name\" varchar(30) NOT NULL DEFAULT '' UNIQUE,\n \"email\" varchar(100) NOT NULL DEFAULT '' ,\n \"password\" varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) case DRSqlite: testCases = append(testCases, TestCase{name: "model with comments for Sqlite", model: &ModelWithComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_with_comments` (\n `id` integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) testCases = append(testCases, TestCase{name: "model without comments for Sqlite", model: &ModelWithoutComments{}, wantSQL: "-- --------------------------------------------------\n-- Table Structure for `github.com/beego/beego/v2/client/orm.ModelWithoutComments`\n-- --------------------------------------------------\nCREATE TABLE IF NOT EXISTS `model_without_comments` (\n `id` integer NOT NULL PRIMARY KEY AUTOINCREMENT,\n `user_name` varchar(30) NOT NULL DEFAULT '' UNIQUE,\n `email` varchar(100) NOT NULL DEFAULT '' ,\n `password` varchar(100) NOT NULL DEFAULT '' \n);", wantErr: nil}) From 486fbbf2d528a3093919ca87a8dff751a964544e Mon Sep 17 00:00:00 2001 From: Arthur Kalikiti Date: Mon, 18 Sep 2023 09:28:20 +0200 Subject: [PATCH 9/9] doc: Updated CONTRIBUTING.md to fix some grammatical errors (#5416) Co-authored-by: Arthur Kalikiti --- CHANGELOG.md | 1 + CONTRIBUTING.md | 37 ++++++++++++++++--------------------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf4b594a..45eb4530 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ # developing +- [refactor: CONTRIBUTING.md file grammatical improvements](https://github.com/beego/beego/issues/5411) - [fix: refactor Count method](https://github.com/beego/beego/pull/5300) - [support db_type in ddl ](https://github.com/beego/beego/pull/5404) - [orm: PostgreSQL change auto to bigserial](https://github.com/beego/beego/pull/5415) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5114c356..39749850 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,15 +1,15 @@ # Contributing to beego -beego is an open source project. +Beego is an open-source project. -It is the work of hundreds of contributors. We appreciate your help! +It is the work of hundreds of contributors. And you could be among them, so we appreciate your help! -Here are instructions to get you started. They are probably not perfect, please let us know if anything feels wrong or +Here are instructions to get you started. They are probably not perfect, so please let us know if anything feels wrong or incomplete. ## Prepare environment -Firstly, install some tools. Execute those commands **outside** the project. Or those command will modify go.mod file. +Firstly, you need to install some tools. Execute the commands below **outside** the project. Otherwise, this action will modify the go.mod file. ```shell script go get -u golang.org/x/tools/cmd/goimports @@ -17,7 +17,7 @@ go get -u golang.org/x/tools/cmd/goimports go get -u github.com/gordonklaus/ineffassign ``` -Put those lines into your pre-commit githook script: +Put the lines below in your pre-commit git hook script: ```shell script goimports -w -format-only ./ @@ -29,17 +29,17 @@ staticcheck -show-ignored -checks "-ST1017,-U1000,-ST1005,-S1034,-S1012,-SA4006, ## Prepare middleware -Beego uses many middlewares, including MySQL, Redis, SSDB and so on. +Beego uses many middlewares, including MySQL, Redis, SSDB amongs't others. -We provide docker compose file to start all middlewares. +We provide a docker-compose file to start all middlewares. -You can run: +You can run the following command to start all middlewares: ```shell script docker-compose -f scripts/test_docker_compose.yaml up -d ``` -Unit tests read addresses from environment, here is an example: +Unit tests read addresses from environmental variables, you can set them up as shown in the example below: ```shell script export ORM_DRIVER=mysql @@ -53,23 +53,18 @@ export SSDB_ADDR="192.168.0.105:8888" ### Pull requests -First, beego follow the gitflow. So please send you pull request to **develop** branch. We will close the pull -request to master branch. +Beego follows the gitflow. And as such, please submit your pull request to the **develop** branch. We will close the pull request by merging it into the master branch. -By the way, please don't forget update the `CHANGELOG.md` before you send pull request. -You can just add your pull request following 'developing' section in `CHANGELOG.md`. +**NOTE:** Don't forget to update the `CHANGELOG.md` file by adding the changes made under the **developing** section. We'll release them in the next Beego version. -We are always happy to receive pull requests, and do our best to review them as fast as possible. Not sure if that typo -is worth a pull request? Do it! We will appreciate it. +We are always happy to receive pull requests, and do our best to review them as fast as possible. Not sure if that typo is worth a pull request? Just do it! We will appreciate it. Don't forget to rebase your commits! -If your pull request is not accepted on the first try, don't be discouraged! Sometimes we can make a mistake, please do -more explaining for us. We will appreciate it. +If your pull request is rejected, dont be discouraged. Sometimes we make mistakes. You can provide us with more context by explaining your issue as clearly as possible. -We're trying very hard to keep beego simple and fast. We don't want it to do everything for everybody. This means that -we might decide against incorporating a new feature. But we will give you some advice on how to do it in other way. +In our pursuit of maintaining Beego's simplicity and speed, we might not accept some feature requests. We don't want it to do everything for everybody. For this reason, we might decide against incorporating a new feature. However, we will provide guidance on achieving the same thing using a different approach ### Create issues @@ -86,6 +81,6 @@ Also when filing an issue, make sure to answer these five questions: ### but check existing issues and docs first! -Please take a moment to check that an issue doesn't already exist documenting your bug report or improvement proposal. -If it does, it never hurts to add a quick "+1" or "I have this problem too". This will help prioritize the most common +Take a moment to check that an issue documenting your bug report or improvement proposal doesn't already exist. +If it does, it doesn't hurts to add a quick "+1" or "I have this problem too". This will help prioritize the most common problems and requests.