+// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package model
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/friendsofgo/errors"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/sqlboiler/v4/queries/qm"
+ "github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
+ "github.com/volatiletech/strmangle"
+)
+
+// Config is an object representing the database table.
+type Config struct {
+ ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
+ MetricID int64 `boil:"metric_id" json:"metric_id" toml:"metric_id" yaml:"metric_id"`
+ Opt string `boil:"opt" json:"opt" toml:"opt" yaml:"opt"`
+ Val string `boil:"val" json:"val" toml:"val" yaml:"val"`
+
+ R *configR `boil:"-" json:"-" toml:"-" yaml:"-"`
+ L configL `boil:"-" json:"-" toml:"-" yaml:"-"`
+}
+
+var ConfigColumns = struct {
+ ID string
+ MetricID string
+ Opt string
+ Val string
+}{
+ ID: "id",
+ MetricID: "metric_id",
+ Opt: "opt",
+ Val: "val",
+}
+
+var ConfigTableColumns = struct {
+ ID string
+ MetricID string
+ Opt string
+ Val string
+}{
+ ID: "config.id",
+ MetricID: "config.metric_id",
+ Opt: "config.opt",
+ Val: "config.val",
+}
+
+// Generated where
+
+type whereHelperint64 struct{ field string }
+
+func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
+func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
+func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
+func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
+func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
+func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
+func (w whereHelperint64) IN(slice []int64) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
+}
+func (w whereHelperint64) NIN(slice []int64) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
+}
+
+type whereHelperstring struct{ field string }
+
+func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
+func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
+func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
+func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
+func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
+func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
+func (w whereHelperstring) IN(slice []string) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
+}
+func (w whereHelperstring) NIN(slice []string) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
+}
+
+var ConfigWhere = struct {
+ ID whereHelperint64
+ MetricID whereHelperint64
+ Opt whereHelperstring
+ Val whereHelperstring
+}{
+ ID: whereHelperint64{field: "\"config\".\"id\""},
+ MetricID: whereHelperint64{field: "\"config\".\"metric_id\""},
+ Opt: whereHelperstring{field: "\"config\".\"opt\""},
+ Val: whereHelperstring{field: "\"config\".\"val\""},
+}
+
+// ConfigRels is where relationship names are stored.
+var ConfigRels = struct {
+ Metric string
+}{
+ Metric: "Metric",
+}
+
+// configR is where relationships are stored.
+type configR struct {
+ Metric *Metric `boil:"Metric" json:"Metric" toml:"Metric" yaml:"Metric"`
+}
+
+// NewStruct creates a new relationship struct
+func (*configR) NewStruct() *configR {
+ return &configR{}
+}
+
+func (r *configR) GetMetric() *Metric {
+ if r == nil {
+ return nil
+ }
+ return r.Metric
+}
+
+// configL is where Load methods for each relationship are stored.
+type configL struct{}
+
+var (
+ configAllColumns = []string{"id", "metric_id", "opt", "val"}
+ configColumnsWithoutDefault = []string{"metric_id", "opt", "val"}
+ configColumnsWithDefault = []string{"id"}
+ configPrimaryKeyColumns = []string{"id"}
+ configGeneratedColumns = []string{"id"}
+)
+
+type (
+ // ConfigSlice is an alias for a slice of pointers to Config.
+ // This should almost always be used instead of []Config.
+ ConfigSlice []*Config
+ // ConfigHook is the signature for custom Config hook methods
+ ConfigHook func(context.Context, boil.ContextExecutor, *Config) error
+
+ configQuery struct {
+ *queries.Query
+ }
+)
+
+// Cache for insert, update and upsert
+var (
+ configType = reflect.TypeOf(&Config{})
+ configMapping = queries.MakeStructMapping(configType)
+ configPrimaryKeyMapping, _ = queries.BindMapping(configType, configMapping, configPrimaryKeyColumns)
+ configInsertCacheMut sync.RWMutex
+ configInsertCache = make(map[string]insertCache)
+ configUpdateCacheMut sync.RWMutex
+ configUpdateCache = make(map[string]updateCache)
+ configUpsertCacheMut sync.RWMutex
+ configUpsertCache = make(map[string]insertCache)
+)
+
+var (
+ // Force time package dependency for automated UpdatedAt/CreatedAt.
+ _ = time.Second
+ // Force qmhelper dependency for where clause generation (which doesn't
+ // always happen)
+ _ = qmhelper.Where
+)
+
+var configAfterSelectHooks []ConfigHook
+
+var configBeforeInsertHooks []ConfigHook
+var configAfterInsertHooks []ConfigHook
+
+var configBeforeUpdateHooks []ConfigHook
+var configAfterUpdateHooks []ConfigHook
+
+var configBeforeDeleteHooks []ConfigHook
+var configAfterDeleteHooks []ConfigHook
+
+var configBeforeUpsertHooks []ConfigHook
+var configAfterUpsertHooks []ConfigHook
+
+// doAfterSelectHooks executes all "after Select" hooks.
+func (o *Config) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configAfterSelectHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeInsertHooks executes all "before insert" hooks.
+func (o *Config) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configBeforeInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterInsertHooks executes all "after Insert" hooks.
+func (o *Config) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configAfterInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpdateHooks executes all "before Update" hooks.
+func (o *Config) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configBeforeUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpdateHooks executes all "after Update" hooks.
+func (o *Config) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configAfterUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeDeleteHooks executes all "before Delete" hooks.
+func (o *Config) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configBeforeDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterDeleteHooks executes all "after Delete" hooks.
+func (o *Config) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configAfterDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpsertHooks executes all "before Upsert" hooks.
+func (o *Config) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configBeforeUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpsertHooks executes all "after Upsert" hooks.
+func (o *Config) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range configAfterUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddConfigHook registers your hook function for all future operations.
+func AddConfigHook(hookPoint boil.HookPoint, configHook ConfigHook) {
+ switch hookPoint {
+ case boil.AfterSelectHook:
+ configAfterSelectHooks = append(configAfterSelectHooks, configHook)
+ case boil.BeforeInsertHook:
+ configBeforeInsertHooks = append(configBeforeInsertHooks, configHook)
+ case boil.AfterInsertHook:
+ configAfterInsertHooks = append(configAfterInsertHooks, configHook)
+ case boil.BeforeUpdateHook:
+ configBeforeUpdateHooks = append(configBeforeUpdateHooks, configHook)
+ case boil.AfterUpdateHook:
+ configAfterUpdateHooks = append(configAfterUpdateHooks, configHook)
+ case boil.BeforeDeleteHook:
+ configBeforeDeleteHooks = append(configBeforeDeleteHooks, configHook)
+ case boil.AfterDeleteHook:
+ configAfterDeleteHooks = append(configAfterDeleteHooks, configHook)
+ case boil.BeforeUpsertHook:
+ configBeforeUpsertHooks = append(configBeforeUpsertHooks, configHook)
+ case boil.AfterUpsertHook:
+ configAfterUpsertHooks = append(configAfterUpsertHooks, configHook)
+ }
+}
+
+// One returns a single config record from the query.
+func (q configQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Config, error) {
+ o := &Config{}
+
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Bind(ctx, exec, o)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: failed to execute a one query for config")
+ }
+
+ if err := o.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+
+ return o, nil
+}
+
+// All returns all Config records from the query.
+func (q configQuery) All(ctx context.Context, exec boil.ContextExecutor) (ConfigSlice, error) {
+ var o []*Config
+
+ err := q.Bind(ctx, exec, &o)
+ if err != nil {
+ return nil, errors.Wrap(err, "model: failed to assign all query results to Config slice")
+ }
+
+ if len(configAfterSelectHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+ }
+ }
+
+ return o, nil
+}
+
+// Count returns the count of all Config records in the query.
+func (q configQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to count config rows")
+ }
+
+ return count, nil
+}
+
+// Exists checks if the row exists in the table.
+func (q configQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return false, errors.Wrap(err, "model: failed to check if config exists")
+ }
+
+ return count > 0, nil
+}
+
+// Metric pointed to by the foreign key.
+func (o *Config) Metric(mods ...qm.QueryMod) metricQuery {
+ queryMods := []qm.QueryMod{
+ qm.Where("\"id\" = ?", o.MetricID),
+ }
+
+ queryMods = append(queryMods, mods...)
+
+ return Metrics(queryMods...)
+}
+
+// LoadMetric allows an eager lookup of values, cached into the
+// loaded structs of the objects. This is for an N-1 relationship.
+func (configL) LoadMetric(ctx context.Context, e boil.ContextExecutor, singular bool, maybeConfig interface{}, mods queries.Applicator) error {
+ var slice []*Config
+ var object *Config
+
+ if singular {
+ var ok bool
+ object, ok = maybeConfig.(*Config)
+ if !ok {
+ object = new(Config)
+ ok = queries.SetFromEmbeddedStruct(&object, &maybeConfig)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeConfig))
+ }
+ }
+ } else {
+ s, ok := maybeConfig.(*[]*Config)
+ if ok {
+ slice = *s
+ } else {
+ ok = queries.SetFromEmbeddedStruct(&slice, maybeConfig)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeConfig))
+ }
+ }
+ }
+
+ args := make([]interface{}, 0, 1)
+ if singular {
+ if object.R == nil {
+ object.R = &configR{}
+ }
+ args = append(args, object.MetricID)
+
+ } else {
+ Outer:
+ for _, obj := range slice {
+ if obj.R == nil {
+ obj.R = &configR{}
+ }
+
+ for _, a := range args {
+ if a == obj.MetricID {
+ continue Outer
+ }
+ }
+
+ args = append(args, obj.MetricID)
+
+ }
+ }
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ query := NewQuery(
+ qm.From(`metric`),
+ qm.WhereIn(`metric.id in ?`, args...),
+ )
+ if mods != nil {
+ mods.Apply(query)
+ }
+
+ results, err := query.QueryContext(ctx, e)
+ if err != nil {
+ return errors.Wrap(err, "failed to eager load Metric")
+ }
+
+ var resultSlice []*Metric
+ if err = queries.Bind(results, &resultSlice); err != nil {
+ return errors.Wrap(err, "failed to bind eager loaded slice Metric")
+ }
+
+ if err = results.Close(); err != nil {
+ return errors.Wrap(err, "failed to close results of eager load for metric")
+ }
+ if err = results.Err(); err != nil {
+ return errors.Wrap(err, "error occurred during iteration of eager loaded relations for metric")
+ }
+
+ if len(configAfterSelectHooks) != 0 {
+ for _, obj := range resultSlice {
+ if err := obj.doAfterSelectHooks(ctx, e); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(resultSlice) == 0 {
+ return nil
+ }
+
+ if singular {
+ foreign := resultSlice[0]
+ object.R.Metric = foreign
+ if foreign.R == nil {
+ foreign.R = &metricR{}
+ }
+ foreign.R.Configs = append(foreign.R.Configs, object)
+ return nil
+ }
+
+ for _, local := range slice {
+ for _, foreign := range resultSlice {
+ if local.MetricID == foreign.ID {
+ local.R.Metric = foreign
+ if foreign.R == nil {
+ foreign.R = &metricR{}
+ }
+ foreign.R.Configs = append(foreign.R.Configs, local)
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// SetMetric of the config to the related item.
+// Sets o.R.Metric to related.
+// Adds o to related.R.Configs.
+func (o *Config) SetMetric(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Metric) error {
+ var err error
+ if insert {
+ if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
+ return errors.Wrap(err, "failed to insert into foreign table")
+ }
+ }
+
+ updateQuery := fmt.Sprintf(
+ "UPDATE \"config\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, []string{"metric_id"}),
+ strmangle.WhereClause("\"", "\"", 0, configPrimaryKeyColumns),
+ )
+ values := []interface{}{related.ID, o.ID}
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, updateQuery)
+ fmt.Fprintln(writer, values)
+ }
+ if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
+ return errors.Wrap(err, "failed to update local table")
+ }
+
+ o.MetricID = related.ID
+ if o.R == nil {
+ o.R = &configR{
+ Metric: related,
+ }
+ } else {
+ o.R.Metric = related
+ }
+
+ if related.R == nil {
+ related.R = &metricR{
+ Configs: ConfigSlice{o},
+ }
+ } else {
+ related.R.Configs = append(related.R.Configs, o)
+ }
+
+ return nil
+}
+
+// Configs retrieves all the records using an executor.
+func Configs(mods ...qm.QueryMod) configQuery {
+ mods = append(mods, qm.From("\"config\""))
+ q := NewQuery(mods...)
+ if len(queries.GetSelect(q)) == 0 {
+ queries.SetSelect(q, []string{"\"config\".*"})
+ }
+
+ return configQuery{q}
+}
+
+// FindConfig retrieves a single record by ID with an executor.
+// If selectCols is empty Find will return all columns.
+func FindConfig(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Config, error) {
+ configObj := &Config{}
+
+ sel := "*"
+ if len(selectCols) > 0 {
+ sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
+ }
+ query := fmt.Sprintf(
+ "select %s from \"config\" where \"id\"=?", sel,
+ )
+
+ q := queries.Raw(query, iD)
+
+ err := q.Bind(ctx, exec, configObj)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: unable to select from config")
+ }
+
+ if err = configObj.doAfterSelectHooks(ctx, exec); err != nil {
+ return configObj, err
+ }
+
+ return configObj, nil
+}
+
+// Insert a single record using an executor.
+// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
+func (o *Config) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no config provided for insertion")
+ }
+
+ var err error
+
+ if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(configColumnsWithDefault, o)
+
+ key := makeCacheKey(columns, nzDefaults)
+ configInsertCacheMut.RLock()
+ cache, cached := configInsertCache[key]
+ configInsertCacheMut.RUnlock()
+
+ if !cached {
+ wl, returnColumns := columns.InsertColumnSet(
+ configAllColumns,
+ configColumnsWithDefault,
+ configColumnsWithoutDefault,
+ nzDefaults,
+ )
+ wl = strmangle.SetComplement(wl, configGeneratedColumns)
+
+ cache.valueMapping, err = queries.BindMapping(configType, configMapping, wl)
+ if err != nil {
+ return err
+ }
+ cache.retMapping, err = queries.BindMapping(configType, configMapping, returnColumns)
+ if err != nil {
+ return err
+ }
+ if len(wl) != 0 {
+ cache.query = fmt.Sprintf("INSERT INTO \"config\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
+ } else {
+ cache.query = "INSERT INTO \"config\" %sDEFAULT VALUES%s"
+ }
+
+ var queryOutput, queryReturning string
+
+ if len(cache.retMapping) != 0 {
+ queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
+ }
+
+ cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+
+ if err != nil {
+ return errors.Wrap(err, "model: unable to insert into config")
+ }
+
+ if !cached {
+ configInsertCacheMut.Lock()
+ configInsertCache[key] = cache
+ configInsertCacheMut.Unlock()
+ }
+
+ return o.doAfterInsertHooks(ctx, exec)
+}
+
+// Update uses an executor to update the Config.
+// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
+// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
+func (o *Config) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
+ var err error
+ if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ key := makeCacheKey(columns, nil)
+ configUpdateCacheMut.RLock()
+ cache, cached := configUpdateCache[key]
+ configUpdateCacheMut.RUnlock()
+
+ if !cached {
+ wl := columns.UpdateColumnSet(
+ configAllColumns,
+ configPrimaryKeyColumns,
+ )
+ wl = strmangle.SetComplement(wl, configGeneratedColumns)
+
+ if !columns.IsWhitelist() {
+ wl = strmangle.SetComplement(wl, []string{"created_at"})
+ }
+ if len(wl) == 0 {
+ return 0, errors.New("model: unable to update config, could not build whitelist")
+ }
+
+ cache.query = fmt.Sprintf("UPDATE \"config\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, wl),
+ strmangle.WhereClause("\"", "\"", 0, configPrimaryKeyColumns),
+ )
+ cache.valueMapping, err = queries.BindMapping(configType, configMapping, append(wl, configPrimaryKeyColumns...))
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, values)
+ }
+ var result sql.Result
+ result, err = exec.ExecContext(ctx, cache.query, values...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update config row")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by update for config")
+ }
+
+ if !cached {
+ configUpdateCacheMut.Lock()
+ configUpdateCache[key] = cache
+ configUpdateCacheMut.Unlock()
+ }
+
+ return rowsAff, o.doAfterUpdateHooks(ctx, exec)
+}
+
+// UpdateAll updates all rows with the specified column values.
+func (q configQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ queries.SetUpdate(q.Query, cols)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all for config")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected for config")
+ }
+
+ return rowsAff, nil
+}
+
+// UpdateAll updates all rows with the specified column values, using an executor.
+func (o ConfigSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ ln := int64(len(o))
+ if ln == 0 {
+ return 0, nil
+ }
+
+ if len(cols) == 0 {
+ return 0, errors.New("model: update all requires at least one column argument")
+ }
+
+ colNames := make([]string, len(cols))
+ args := make([]interface{}, len(cols))
+
+ i := 0
+ for name, value := range cols {
+ colNames[i] = name
+ args[i] = value
+ i++
+ }
+
+ // Append all of the primary key values for each column
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), configPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := fmt.Sprintf("UPDATE \"config\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, colNames),
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, configPrimaryKeyColumns, len(o)))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all in config slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected all in update all config")
+ }
+ return rowsAff, nil
+}
+
+// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
+// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
+func (o *Config) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no config provided for upsert")
+ }
+
+ if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(configColumnsWithDefault, o)
+
+ // Build cache key in-line uglily - mysql vs psql problems
+ buf := strmangle.GetBuffer()
+ if updateOnConflict {
+ buf.WriteByte('t')
+ } else {
+ buf.WriteByte('f')
+ }
+ buf.WriteByte('.')
+ for _, c := range conflictColumns {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(updateColumns.Kind))
+ for _, c := range updateColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(insertColumns.Kind))
+ for _, c := range insertColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ for _, c := range nzDefaults {
+ buf.WriteString(c)
+ }
+ key := buf.String()
+ strmangle.PutBuffer(buf)
+
+ configUpsertCacheMut.RLock()
+ cache, cached := configUpsertCache[key]
+ configUpsertCacheMut.RUnlock()
+
+ var err error
+
+ if !cached {
+ insert, ret := insertColumns.InsertColumnSet(
+ configAllColumns,
+ configColumnsWithDefault,
+ configColumnsWithoutDefault,
+ nzDefaults,
+ )
+ update := updateColumns.UpdateColumnSet(
+ configAllColumns,
+ configPrimaryKeyColumns,
+ )
+
+ if updateOnConflict && len(update) == 0 {
+ return errors.New("model: unable to upsert config, could not build update column list")
+ }
+
+ conflict := conflictColumns
+ if len(conflict) == 0 {
+ conflict = make([]string, len(configPrimaryKeyColumns))
+ copy(conflict, configPrimaryKeyColumns)
+ }
+ cache.query = buildUpsertQuerySQLite(dialect, "\"config\"", updateOnConflict, ret, update, conflict, insert)
+
+ cache.valueMapping, err = queries.BindMapping(configType, configMapping, insert)
+ if err != nil {
+ return err
+ }
+ if len(ret) != 0 {
+ cache.retMapping, err = queries.BindMapping(configType, configMapping, ret)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+ var returns []interface{}
+ if len(cache.retMapping) != 0 {
+ returns = queries.PtrsFromMapping(value, cache.retMapping)
+ }
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
+ if errors.Is(err, sql.ErrNoRows) {
+ err = nil // Postgres doesn't return anything when there's no update
+ }
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+ if err != nil {
+ return errors.Wrap(err, "model: unable to upsert config")
+ }
+
+ if !cached {
+ configUpsertCacheMut.Lock()
+ configUpsertCache[key] = cache
+ configUpsertCacheMut.Unlock()
+ }
+
+ return o.doAfterUpsertHooks(ctx, exec)
+}
+
+// Delete deletes a single Config record with an executor.
+// Delete will match against the primary key column to find the record to delete.
+func (o *Config) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if o == nil {
+ return 0, errors.New("model: no Config provided for delete")
+ }
+
+ if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), configPrimaryKeyMapping)
+ sql := "DELETE FROM \"config\" WHERE \"id\"=?"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete from config")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by delete for config")
+ }
+
+ if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all matching rows.
+func (q configQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if q.Query == nil {
+ return 0, errors.New("model: no configQuery provided for delete all")
+ }
+
+ queries.SetDelete(q.Query)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from config")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for config")
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all rows in the slice, using an executor.
+func (o ConfigSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if len(o) == 0 {
+ return 0, nil
+ }
+
+ if len(configBeforeDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ var args []interface{}
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), configPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "DELETE FROM \"config\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, configPrimaryKeyColumns, len(o))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from config slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for config")
+ }
+
+ if len(configAfterDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rowsAff, nil
+}
+
+// Reload refetches the object from the database
+// using the primary keys with an executor.
+func (o *Config) Reload(ctx context.Context, exec boil.ContextExecutor) error {
+ ret, err := FindConfig(ctx, exec, o.ID)
+ if err != nil {
+ return err
+ }
+
+ *o = *ret
+ return nil
+}
+
+// ReloadAll refetches every row with matching primary key column values
+// and overwrites the original object slice with the newly updated slice.
+func (o *ConfigSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
+ if o == nil || len(*o) == 0 {
+ return nil
+ }
+
+ slice := ConfigSlice{}
+ var args []interface{}
+ for _, obj := range *o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), configPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "SELECT \"config\".* FROM \"config\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, configPrimaryKeyColumns, len(*o))
+
+ q := queries.Raw(sql, args...)
+
+ err := q.Bind(ctx, exec, &slice)
+ if err != nil {
+ return errors.Wrap(err, "model: unable to reload all in ConfigSlice")
+ }
+
+ *o = slice
+
+ return nil
+}
+
+// ConfigExists checks if the Config row exists.
+func ConfigExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
+ var exists bool
+ sql := "select exists(select 1 from \"config\" where \"id\"=? limit 1)"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, iD)
+ }
+ row := exec.QueryRowContext(ctx, sql, iD)
+
+ err := row.Scan(&exists)
+ if err != nil {
+ return false, errors.Wrap(err, "model: unable to check if config exists")
+ }
+
+ return exists, nil
+}
+// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package model
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/friendsofgo/errors"
+ "github.com/volatiletech/null/v8"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/sqlboiler/v4/queries/qm"
+ "github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
+ "github.com/volatiletech/strmangle"
+)
+
+// Log is an object representing the database table.
+type Log struct {
+ ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
+ MetricID int64 `boil:"metric_id" json:"metric_id" toml:"metric_id" yaml:"metric_id"`
+ Value int64 `boil:"value" json:"value" toml:"value" yaml:"value"`
+ Timestamp null.Time `boil:"timestamp" json:"timestamp,omitempty" toml:"timestamp" yaml:"timestamp,omitempty"`
+
+ R *logR `boil:"-" json:"-" toml:"-" yaml:"-"`
+ L logL `boil:"-" json:"-" toml:"-" yaml:"-"`
+}
+
+var LogColumns = struct {
+ ID string
+ MetricID string
+ Value string
+ Timestamp string
+}{
+ ID: "id",
+ MetricID: "metric_id",
+ Value: "value",
+ Timestamp: "timestamp",
+}
+
+var LogTableColumns = struct {
+ ID string
+ MetricID string
+ Value string
+ Timestamp string
+}{
+ ID: "log.id",
+ MetricID: "log.metric_id",
+ Value: "log.value",
+ Timestamp: "log.timestamp",
+}
+
+// Generated where
+
+type whereHelpernull_Time struct{ field string }
+
+func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod {
+ return qmhelper.WhereNullEQ(w.field, false, x)
+}
+func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod {
+ return qmhelper.WhereNullEQ(w.field, true, x)
+}
+func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.LT, x)
+}
+func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.LTE, x)
+}
+func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.GT, x)
+}
+func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.GTE, x)
+}
+
+func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
+func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
+
+var LogWhere = struct {
+ ID whereHelperint64
+ MetricID whereHelperint64
+ Value whereHelperint64
+ Timestamp whereHelpernull_Time
+}{
+ ID: whereHelperint64{field: "\"log\".\"id\""},
+ MetricID: whereHelperint64{field: "\"log\".\"metric_id\""},
+ Value: whereHelperint64{field: "\"log\".\"value\""},
+ Timestamp: whereHelpernull_Time{field: "\"log\".\"timestamp\""},
+}
+
+// LogRels is where relationship names are stored.
+var LogRels = struct {
+ Metric string
+ LogComment string
+}{
+ Metric: "Metric",
+ LogComment: "LogComment",
+}
+
+// logR is where relationships are stored.
+type logR struct {
+ Metric *Metric `boil:"Metric" json:"Metric" toml:"Metric" yaml:"Metric"`
+ LogComment *LogComment `boil:"LogComment" json:"LogComment" toml:"LogComment" yaml:"LogComment"`
+}
+
+// NewStruct creates a new relationship struct
+func (*logR) NewStruct() *logR {
+ return &logR{}
+}
+
+func (r *logR) GetMetric() *Metric {
+ if r == nil {
+ return nil
+ }
+ return r.Metric
+}
+
+func (r *logR) GetLogComment() *LogComment {
+ if r == nil {
+ return nil
+ }
+ return r.LogComment
+}
+
+// logL is where Load methods for each relationship are stored.
+type logL struct{}
+
+var (
+ logAllColumns = []string{"id", "metric_id", "value", "timestamp"}
+ logColumnsWithoutDefault = []string{"metric_id", "value"}
+ logColumnsWithDefault = []string{"id", "timestamp"}
+ logPrimaryKeyColumns = []string{"id"}
+ logGeneratedColumns = []string{"id"}
+)
+
+type (
+ // LogSlice is an alias for a slice of pointers to Log.
+ // This should almost always be used instead of []Log.
+ LogSlice []*Log
+ // LogHook is the signature for custom Log hook methods
+ LogHook func(context.Context, boil.ContextExecutor, *Log) error
+
+ logQuery struct {
+ *queries.Query
+ }
+)
+
+// Cache for insert, update and upsert
+var (
+ logType = reflect.TypeOf(&Log{})
+ logMapping = queries.MakeStructMapping(logType)
+ logPrimaryKeyMapping, _ = queries.BindMapping(logType, logMapping, logPrimaryKeyColumns)
+ logInsertCacheMut sync.RWMutex
+ logInsertCache = make(map[string]insertCache)
+ logUpdateCacheMut sync.RWMutex
+ logUpdateCache = make(map[string]updateCache)
+ logUpsertCacheMut sync.RWMutex
+ logUpsertCache = make(map[string]insertCache)
+)
+
+var (
+ // Force time package dependency for automated UpdatedAt/CreatedAt.
+ _ = time.Second
+ // Force qmhelper dependency for where clause generation (which doesn't
+ // always happen)
+ _ = qmhelper.Where
+)
+
+var logAfterSelectHooks []LogHook
+
+var logBeforeInsertHooks []LogHook
+var logAfterInsertHooks []LogHook
+
+var logBeforeUpdateHooks []LogHook
+var logAfterUpdateHooks []LogHook
+
+var logBeforeDeleteHooks []LogHook
+var logAfterDeleteHooks []LogHook
+
+var logBeforeUpsertHooks []LogHook
+var logAfterUpsertHooks []LogHook
+
+// doAfterSelectHooks executes all "after Select" hooks.
+func (o *Log) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logAfterSelectHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeInsertHooks executes all "before insert" hooks.
+func (o *Log) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logBeforeInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterInsertHooks executes all "after Insert" hooks.
+func (o *Log) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logAfterInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpdateHooks executes all "before Update" hooks.
+func (o *Log) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logBeforeUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpdateHooks executes all "after Update" hooks.
+func (o *Log) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logAfterUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeDeleteHooks executes all "before Delete" hooks.
+func (o *Log) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logBeforeDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterDeleteHooks executes all "after Delete" hooks.
+func (o *Log) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logAfterDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpsertHooks executes all "before Upsert" hooks.
+func (o *Log) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logBeforeUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpsertHooks executes all "after Upsert" hooks.
+func (o *Log) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logAfterUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddLogHook registers your hook function for all future operations.
+func AddLogHook(hookPoint boil.HookPoint, logHook LogHook) {
+ switch hookPoint {
+ case boil.AfterSelectHook:
+ logAfterSelectHooks = append(logAfterSelectHooks, logHook)
+ case boil.BeforeInsertHook:
+ logBeforeInsertHooks = append(logBeforeInsertHooks, logHook)
+ case boil.AfterInsertHook:
+ logAfterInsertHooks = append(logAfterInsertHooks, logHook)
+ case boil.BeforeUpdateHook:
+ logBeforeUpdateHooks = append(logBeforeUpdateHooks, logHook)
+ case boil.AfterUpdateHook:
+ logAfterUpdateHooks = append(logAfterUpdateHooks, logHook)
+ case boil.BeforeDeleteHook:
+ logBeforeDeleteHooks = append(logBeforeDeleteHooks, logHook)
+ case boil.AfterDeleteHook:
+ logAfterDeleteHooks = append(logAfterDeleteHooks, logHook)
+ case boil.BeforeUpsertHook:
+ logBeforeUpsertHooks = append(logBeforeUpsertHooks, logHook)
+ case boil.AfterUpsertHook:
+ logAfterUpsertHooks = append(logAfterUpsertHooks, logHook)
+ }
+}
+
+// One returns a single log record from the query.
+func (q logQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Log, error) {
+ o := &Log{}
+
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Bind(ctx, exec, o)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: failed to execute a one query for log")
+ }
+
+ if err := o.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+
+ return o, nil
+}
+
+// All returns all Log records from the query.
+func (q logQuery) All(ctx context.Context, exec boil.ContextExecutor) (LogSlice, error) {
+ var o []*Log
+
+ err := q.Bind(ctx, exec, &o)
+ if err != nil {
+ return nil, errors.Wrap(err, "model: failed to assign all query results to Log slice")
+ }
+
+ if len(logAfterSelectHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+ }
+ }
+
+ return o, nil
+}
+
+// Count returns the count of all Log records in the query.
+func (q logQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to count log rows")
+ }
+
+ return count, nil
+}
+
+// Exists checks if the row exists in the table.
+func (q logQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return false, errors.Wrap(err, "model: failed to check if log exists")
+ }
+
+ return count > 0, nil
+}
+
+// Metric pointed to by the foreign key.
+func (o *Log) Metric(mods ...qm.QueryMod) metricQuery {
+ queryMods := []qm.QueryMod{
+ qm.Where("\"id\" = ?", o.MetricID),
+ }
+
+ queryMods = append(queryMods, mods...)
+
+ return Metrics(queryMods...)
+}
+
+// LogComment pointed to by the foreign key.
+func (o *Log) LogComment(mods ...qm.QueryMod) logCommentQuery {
+ queryMods := []qm.QueryMod{
+ qm.Where("\"log_id\" = ?", o.ID),
+ }
+
+ queryMods = append(queryMods, mods...)
+
+ return LogComments(queryMods...)
+}
+
+// LoadMetric allows an eager lookup of values, cached into the
+// loaded structs of the objects. This is for an N-1 relationship.
+func (logL) LoadMetric(ctx context.Context, e boil.ContextExecutor, singular bool, maybeLog interface{}, mods queries.Applicator) error {
+ var slice []*Log
+ var object *Log
+
+ if singular {
+ var ok bool
+ object, ok = maybeLog.(*Log)
+ if !ok {
+ object = new(Log)
+ ok = queries.SetFromEmbeddedStruct(&object, &maybeLog)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeLog))
+ }
+ }
+ } else {
+ s, ok := maybeLog.(*[]*Log)
+ if ok {
+ slice = *s
+ } else {
+ ok = queries.SetFromEmbeddedStruct(&slice, maybeLog)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeLog))
+ }
+ }
+ }
+
+ args := make([]interface{}, 0, 1)
+ if singular {
+ if object.R == nil {
+ object.R = &logR{}
+ }
+ args = append(args, object.MetricID)
+
+ } else {
+ Outer:
+ for _, obj := range slice {
+ if obj.R == nil {
+ obj.R = &logR{}
+ }
+
+ for _, a := range args {
+ if a == obj.MetricID {
+ continue Outer
+ }
+ }
+
+ args = append(args, obj.MetricID)
+
+ }
+ }
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ query := NewQuery(
+ qm.From(`metric`),
+ qm.WhereIn(`metric.id in ?`, args...),
+ )
+ if mods != nil {
+ mods.Apply(query)
+ }
+
+ results, err := query.QueryContext(ctx, e)
+ if err != nil {
+ return errors.Wrap(err, "failed to eager load Metric")
+ }
+
+ var resultSlice []*Metric
+ if err = queries.Bind(results, &resultSlice); err != nil {
+ return errors.Wrap(err, "failed to bind eager loaded slice Metric")
+ }
+
+ if err = results.Close(); err != nil {
+ return errors.Wrap(err, "failed to close results of eager load for metric")
+ }
+ if err = results.Err(); err != nil {
+ return errors.Wrap(err, "error occurred during iteration of eager loaded relations for metric")
+ }
+
+ if len(logAfterSelectHooks) != 0 {
+ for _, obj := range resultSlice {
+ if err := obj.doAfterSelectHooks(ctx, e); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(resultSlice) == 0 {
+ return nil
+ }
+
+ if singular {
+ foreign := resultSlice[0]
+ object.R.Metric = foreign
+ if foreign.R == nil {
+ foreign.R = &metricR{}
+ }
+ foreign.R.Logs = append(foreign.R.Logs, object)
+ return nil
+ }
+
+ for _, local := range slice {
+ for _, foreign := range resultSlice {
+ if local.MetricID == foreign.ID {
+ local.R.Metric = foreign
+ if foreign.R == nil {
+ foreign.R = &metricR{}
+ }
+ foreign.R.Logs = append(foreign.R.Logs, local)
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// LoadLogComment allows an eager lookup of values, cached into the
+// loaded structs of the objects. This is for a 1-1 relationship.
+func (logL) LoadLogComment(ctx context.Context, e boil.ContextExecutor, singular bool, maybeLog interface{}, mods queries.Applicator) error {
+ var slice []*Log
+ var object *Log
+
+ if singular {
+ var ok bool
+ object, ok = maybeLog.(*Log)
+ if !ok {
+ object = new(Log)
+ ok = queries.SetFromEmbeddedStruct(&object, &maybeLog)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeLog))
+ }
+ }
+ } else {
+ s, ok := maybeLog.(*[]*Log)
+ if ok {
+ slice = *s
+ } else {
+ ok = queries.SetFromEmbeddedStruct(&slice, maybeLog)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeLog))
+ }
+ }
+ }
+
+ args := make([]interface{}, 0, 1)
+ if singular {
+ if object.R == nil {
+ object.R = &logR{}
+ }
+ args = append(args, object.ID)
+ } else {
+ Outer:
+ for _, obj := range slice {
+ if obj.R == nil {
+ obj.R = &logR{}
+ }
+
+ for _, a := range args {
+ if a == obj.ID {
+ continue Outer
+ }
+ }
+
+ args = append(args, obj.ID)
+ }
+ }
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ query := NewQuery(
+ qm.From(`log_comment`),
+ qm.WhereIn(`log_comment.log_id in ?`, args...),
+ )
+ if mods != nil {
+ mods.Apply(query)
+ }
+
+ results, err := query.QueryContext(ctx, e)
+ if err != nil {
+ return errors.Wrap(err, "failed to eager load LogComment")
+ }
+
+ var resultSlice []*LogComment
+ if err = queries.Bind(results, &resultSlice); err != nil {
+ return errors.Wrap(err, "failed to bind eager loaded slice LogComment")
+ }
+
+ if err = results.Close(); err != nil {
+ return errors.Wrap(err, "failed to close results of eager load for log_comment")
+ }
+ if err = results.Err(); err != nil {
+ return errors.Wrap(err, "error occurred during iteration of eager loaded relations for log_comment")
+ }
+
+ if len(logAfterSelectHooks) != 0 {
+ for _, obj := range resultSlice {
+ if err := obj.doAfterSelectHooks(ctx, e); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(resultSlice) == 0 {
+ return nil
+ }
+
+ if singular {
+ foreign := resultSlice[0]
+ object.R.LogComment = foreign
+ if foreign.R == nil {
+ foreign.R = &logCommentR{}
+ }
+ foreign.R.Log = object
+ }
+
+ for _, local := range slice {
+ for _, foreign := range resultSlice {
+ if local.ID == foreign.LogID {
+ local.R.LogComment = foreign
+ if foreign.R == nil {
+ foreign.R = &logCommentR{}
+ }
+ foreign.R.Log = local
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// SetMetric of the log to the related item.
+// Sets o.R.Metric to related.
+// Adds o to related.R.Logs.
+func (o *Log) SetMetric(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Metric) error {
+ var err error
+ if insert {
+ if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
+ return errors.Wrap(err, "failed to insert into foreign table")
+ }
+ }
+
+ updateQuery := fmt.Sprintf(
+ "UPDATE \"log\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, []string{"metric_id"}),
+ strmangle.WhereClause("\"", "\"", 0, logPrimaryKeyColumns),
+ )
+ values := []interface{}{related.ID, o.ID}
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, updateQuery)
+ fmt.Fprintln(writer, values)
+ }
+ if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
+ return errors.Wrap(err, "failed to update local table")
+ }
+
+ o.MetricID = related.ID
+ if o.R == nil {
+ o.R = &logR{
+ Metric: related,
+ }
+ } else {
+ o.R.Metric = related
+ }
+
+ if related.R == nil {
+ related.R = &metricR{
+ Logs: LogSlice{o},
+ }
+ } else {
+ related.R.Logs = append(related.R.Logs, o)
+ }
+
+ return nil
+}
+
+// SetLogComment of the log to the related item.
+// Sets o.R.LogComment to related.
+// Adds o to related.R.Log.
+func (o *Log) SetLogComment(ctx context.Context, exec boil.ContextExecutor, insert bool, related *LogComment) error {
+ var err error
+
+ if insert {
+ related.LogID = o.ID
+
+ if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
+ return errors.Wrap(err, "failed to insert into foreign table")
+ }
+ } else {
+ updateQuery := fmt.Sprintf(
+ "UPDATE \"log_comment\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, []string{"log_id"}),
+ strmangle.WhereClause("\"", "\"", 0, logCommentPrimaryKeyColumns),
+ )
+ values := []interface{}{o.ID, related.ID}
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, updateQuery)
+ fmt.Fprintln(writer, values)
+ }
+ if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
+ return errors.Wrap(err, "failed to update foreign table")
+ }
+
+ related.LogID = o.ID
+ }
+
+ if o.R == nil {
+ o.R = &logR{
+ LogComment: related,
+ }
+ } else {
+ o.R.LogComment = related
+ }
+
+ if related.R == nil {
+ related.R = &logCommentR{
+ Log: o,
+ }
+ } else {
+ related.R.Log = o
+ }
+ return nil
+}
+
+// Logs retrieves all the records using an executor.
+func Logs(mods ...qm.QueryMod) logQuery {
+ mods = append(mods, qm.From("\"log\""))
+ q := NewQuery(mods...)
+ if len(queries.GetSelect(q)) == 0 {
+ queries.SetSelect(q, []string{"\"log\".*"})
+ }
+
+ return logQuery{q}
+}
+
+// FindLog retrieves a single record by ID with an executor.
+// If selectCols is empty Find will return all columns.
+func FindLog(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Log, error) {
+ logObj := &Log{}
+
+ sel := "*"
+ if len(selectCols) > 0 {
+ sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
+ }
+ query := fmt.Sprintf(
+ "select %s from \"log\" where \"id\"=?", sel,
+ )
+
+ q := queries.Raw(query, iD)
+
+ err := q.Bind(ctx, exec, logObj)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: unable to select from log")
+ }
+
+ if err = logObj.doAfterSelectHooks(ctx, exec); err != nil {
+ return logObj, err
+ }
+
+ return logObj, nil
+}
+
+// Insert a single record using an executor.
+// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
+func (o *Log) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no log provided for insertion")
+ }
+
+ var err error
+
+ if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(logColumnsWithDefault, o)
+
+ key := makeCacheKey(columns, nzDefaults)
+ logInsertCacheMut.RLock()
+ cache, cached := logInsertCache[key]
+ logInsertCacheMut.RUnlock()
+
+ if !cached {
+ wl, returnColumns := columns.InsertColumnSet(
+ logAllColumns,
+ logColumnsWithDefault,
+ logColumnsWithoutDefault,
+ nzDefaults,
+ )
+ wl = strmangle.SetComplement(wl, logGeneratedColumns)
+
+ cache.valueMapping, err = queries.BindMapping(logType, logMapping, wl)
+ if err != nil {
+ return err
+ }
+ cache.retMapping, err = queries.BindMapping(logType, logMapping, returnColumns)
+ if err != nil {
+ return err
+ }
+ if len(wl) != 0 {
+ cache.query = fmt.Sprintf("INSERT INTO \"log\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
+ } else {
+ cache.query = "INSERT INTO \"log\" %sDEFAULT VALUES%s"
+ }
+
+ var queryOutput, queryReturning string
+
+ if len(cache.retMapping) != 0 {
+ queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
+ }
+
+ cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+
+ if err != nil {
+ return errors.Wrap(err, "model: unable to insert into log")
+ }
+
+ if !cached {
+ logInsertCacheMut.Lock()
+ logInsertCache[key] = cache
+ logInsertCacheMut.Unlock()
+ }
+
+ return o.doAfterInsertHooks(ctx, exec)
+}
+
+// Update uses an executor to update the Log.
+// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
+// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
+func (o *Log) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
+ var err error
+ if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ key := makeCacheKey(columns, nil)
+ logUpdateCacheMut.RLock()
+ cache, cached := logUpdateCache[key]
+ logUpdateCacheMut.RUnlock()
+
+ if !cached {
+ wl := columns.UpdateColumnSet(
+ logAllColumns,
+ logPrimaryKeyColumns,
+ )
+ wl = strmangle.SetComplement(wl, logGeneratedColumns)
+
+ if !columns.IsWhitelist() {
+ wl = strmangle.SetComplement(wl, []string{"created_at"})
+ }
+ if len(wl) == 0 {
+ return 0, errors.New("model: unable to update log, could not build whitelist")
+ }
+
+ cache.query = fmt.Sprintf("UPDATE \"log\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, wl),
+ strmangle.WhereClause("\"", "\"", 0, logPrimaryKeyColumns),
+ )
+ cache.valueMapping, err = queries.BindMapping(logType, logMapping, append(wl, logPrimaryKeyColumns...))
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, values)
+ }
+ var result sql.Result
+ result, err = exec.ExecContext(ctx, cache.query, values...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update log row")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by update for log")
+ }
+
+ if !cached {
+ logUpdateCacheMut.Lock()
+ logUpdateCache[key] = cache
+ logUpdateCacheMut.Unlock()
+ }
+
+ return rowsAff, o.doAfterUpdateHooks(ctx, exec)
+}
+
+// UpdateAll updates all rows with the specified column values.
+func (q logQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ queries.SetUpdate(q.Query, cols)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all for log")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected for log")
+ }
+
+ return rowsAff, nil
+}
+
+// UpdateAll updates all rows with the specified column values, using an executor.
+func (o LogSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ ln := int64(len(o))
+ if ln == 0 {
+ return 0, nil
+ }
+
+ if len(cols) == 0 {
+ return 0, errors.New("model: update all requires at least one column argument")
+ }
+
+ colNames := make([]string, len(cols))
+ args := make([]interface{}, len(cols))
+
+ i := 0
+ for name, value := range cols {
+ colNames[i] = name
+ args[i] = value
+ i++
+ }
+
+ // Append all of the primary key values for each column
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), logPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := fmt.Sprintf("UPDATE \"log\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, colNames),
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, logPrimaryKeyColumns, len(o)))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all in log slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected all in update all log")
+ }
+ return rowsAff, nil
+}
+
+// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
+// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
+func (o *Log) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no log provided for upsert")
+ }
+
+ if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(logColumnsWithDefault, o)
+
+ // Build cache key in-line uglily - mysql vs psql problems
+ buf := strmangle.GetBuffer()
+ if updateOnConflict {
+ buf.WriteByte('t')
+ } else {
+ buf.WriteByte('f')
+ }
+ buf.WriteByte('.')
+ for _, c := range conflictColumns {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(updateColumns.Kind))
+ for _, c := range updateColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(insertColumns.Kind))
+ for _, c := range insertColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ for _, c := range nzDefaults {
+ buf.WriteString(c)
+ }
+ key := buf.String()
+ strmangle.PutBuffer(buf)
+
+ logUpsertCacheMut.RLock()
+ cache, cached := logUpsertCache[key]
+ logUpsertCacheMut.RUnlock()
+
+ var err error
+
+ if !cached {
+ insert, ret := insertColumns.InsertColumnSet(
+ logAllColumns,
+ logColumnsWithDefault,
+ logColumnsWithoutDefault,
+ nzDefaults,
+ )
+ update := updateColumns.UpdateColumnSet(
+ logAllColumns,
+ logPrimaryKeyColumns,
+ )
+
+ if updateOnConflict && len(update) == 0 {
+ return errors.New("model: unable to upsert log, could not build update column list")
+ }
+
+ conflict := conflictColumns
+ if len(conflict) == 0 {
+ conflict = make([]string, len(logPrimaryKeyColumns))
+ copy(conflict, logPrimaryKeyColumns)
+ }
+ cache.query = buildUpsertQuerySQLite(dialect, "\"log\"", updateOnConflict, ret, update, conflict, insert)
+
+ cache.valueMapping, err = queries.BindMapping(logType, logMapping, insert)
+ if err != nil {
+ return err
+ }
+ if len(ret) != 0 {
+ cache.retMapping, err = queries.BindMapping(logType, logMapping, ret)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+ var returns []interface{}
+ if len(cache.retMapping) != 0 {
+ returns = queries.PtrsFromMapping(value, cache.retMapping)
+ }
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
+ if errors.Is(err, sql.ErrNoRows) {
+ err = nil // Postgres doesn't return anything when there's no update
+ }
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+ if err != nil {
+ return errors.Wrap(err, "model: unable to upsert log")
+ }
+
+ if !cached {
+ logUpsertCacheMut.Lock()
+ logUpsertCache[key] = cache
+ logUpsertCacheMut.Unlock()
+ }
+
+ return o.doAfterUpsertHooks(ctx, exec)
+}
+
+// Delete deletes a single Log record with an executor.
+// Delete will match against the primary key column to find the record to delete.
+func (o *Log) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if o == nil {
+ return 0, errors.New("model: no Log provided for delete")
+ }
+
+ if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), logPrimaryKeyMapping)
+ sql := "DELETE FROM \"log\" WHERE \"id\"=?"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete from log")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by delete for log")
+ }
+
+ if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all matching rows.
+func (q logQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if q.Query == nil {
+ return 0, errors.New("model: no logQuery provided for delete all")
+ }
+
+ queries.SetDelete(q.Query)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from log")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for log")
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all rows in the slice, using an executor.
+func (o LogSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if len(o) == 0 {
+ return 0, nil
+ }
+
+ if len(logBeforeDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ var args []interface{}
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), logPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "DELETE FROM \"log\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, logPrimaryKeyColumns, len(o))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from log slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for log")
+ }
+
+ if len(logAfterDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rowsAff, nil
+}
+
+// Reload refetches the object from the database
+// using the primary keys with an executor.
+func (o *Log) Reload(ctx context.Context, exec boil.ContextExecutor) error {
+ ret, err := FindLog(ctx, exec, o.ID)
+ if err != nil {
+ return err
+ }
+
+ *o = *ret
+ return nil
+}
+
+// ReloadAll refetches every row with matching primary key column values
+// and overwrites the original object slice with the newly updated slice.
+func (o *LogSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
+ if o == nil || len(*o) == 0 {
+ return nil
+ }
+
+ slice := LogSlice{}
+ var args []interface{}
+ for _, obj := range *o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), logPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "SELECT \"log\".* FROM \"log\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, logPrimaryKeyColumns, len(*o))
+
+ q := queries.Raw(sql, args...)
+
+ err := q.Bind(ctx, exec, &slice)
+ if err != nil {
+ return errors.Wrap(err, "model: unable to reload all in LogSlice")
+ }
+
+ *o = slice
+
+ return nil
+}
+
+// LogExists checks if the Log row exists.
+func LogExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
+ var exists bool
+ sql := "select exists(select 1 from \"log\" where \"id\"=? limit 1)"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, iD)
+ }
+ row := exec.QueryRowContext(ctx, sql, iD)
+
+ err := row.Scan(&exists)
+ if err != nil {
+ return false, errors.Wrap(err, "model: unable to check if log exists")
+ }
+
+ return exists, nil
+}
+// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package model
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/friendsofgo/errors"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/sqlboiler/v4/queries/qm"
+ "github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
+ "github.com/volatiletech/strmangle"
+)
+
+// LogComment is an object representing the database table.
+type LogComment struct {
+ ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
+ LogID int64 `boil:"log_id" json:"log_id" toml:"log_id" yaml:"log_id"`
+ Comment string `boil:"comment" json:"comment" toml:"comment" yaml:"comment"`
+
+ R *logCommentR `boil:"-" json:"-" toml:"-" yaml:"-"`
+ L logCommentL `boil:"-" json:"-" toml:"-" yaml:"-"`
+}
+
+var LogCommentColumns = struct {
+ ID string
+ LogID string
+ Comment string
+}{
+ ID: "id",
+ LogID: "log_id",
+ Comment: "comment",
+}
+
+var LogCommentTableColumns = struct {
+ ID string
+ LogID string
+ Comment string
+}{
+ ID: "log_comment.id",
+ LogID: "log_comment.log_id",
+ Comment: "log_comment.comment",
+}
+
+// Generated where
+
+var LogCommentWhere = struct {
+ ID whereHelperint64
+ LogID whereHelperint64
+ Comment whereHelperstring
+}{
+ ID: whereHelperint64{field: "\"log_comment\".\"id\""},
+ LogID: whereHelperint64{field: "\"log_comment\".\"log_id\""},
+ Comment: whereHelperstring{field: "\"log_comment\".\"comment\""},
+}
+
+// LogCommentRels is where relationship names are stored.
+var LogCommentRels = struct {
+ Log string
+}{
+ Log: "Log",
+}
+
+// logCommentR is where relationships are stored.
+type logCommentR struct {
+ Log *Log `boil:"Log" json:"Log" toml:"Log" yaml:"Log"`
+}
+
+// NewStruct creates a new relationship struct
+func (*logCommentR) NewStruct() *logCommentR {
+ return &logCommentR{}
+}
+
+func (r *logCommentR) GetLog() *Log {
+ if r == nil {
+ return nil
+ }
+ return r.Log
+}
+
+// logCommentL is where Load methods for each relationship are stored.
+type logCommentL struct{}
+
+var (
+ logCommentAllColumns = []string{"id", "log_id", "comment"}
+ logCommentColumnsWithoutDefault = []string{"log_id", "comment"}
+ logCommentColumnsWithDefault = []string{"id"}
+ logCommentPrimaryKeyColumns = []string{"id"}
+ logCommentGeneratedColumns = []string{"id"}
+)
+
+type (
+ // LogCommentSlice is an alias for a slice of pointers to LogComment.
+ // This should almost always be used instead of []LogComment.
+ LogCommentSlice []*LogComment
+ // LogCommentHook is the signature for custom LogComment hook methods
+ LogCommentHook func(context.Context, boil.ContextExecutor, *LogComment) error
+
+ logCommentQuery struct {
+ *queries.Query
+ }
+)
+
+// Cache for insert, update and upsert
+var (
+ logCommentType = reflect.TypeOf(&LogComment{})
+ logCommentMapping = queries.MakeStructMapping(logCommentType)
+ logCommentPrimaryKeyMapping, _ = queries.BindMapping(logCommentType, logCommentMapping, logCommentPrimaryKeyColumns)
+ logCommentInsertCacheMut sync.RWMutex
+ logCommentInsertCache = make(map[string]insertCache)
+ logCommentUpdateCacheMut sync.RWMutex
+ logCommentUpdateCache = make(map[string]updateCache)
+ logCommentUpsertCacheMut sync.RWMutex
+ logCommentUpsertCache = make(map[string]insertCache)
+)
+
+var (
+ // Force time package dependency for automated UpdatedAt/CreatedAt.
+ _ = time.Second
+ // Force qmhelper dependency for where clause generation (which doesn't
+ // always happen)
+ _ = qmhelper.Where
+)
+
+var logCommentAfterSelectHooks []LogCommentHook
+
+var logCommentBeforeInsertHooks []LogCommentHook
+var logCommentAfterInsertHooks []LogCommentHook
+
+var logCommentBeforeUpdateHooks []LogCommentHook
+var logCommentAfterUpdateHooks []LogCommentHook
+
+var logCommentBeforeDeleteHooks []LogCommentHook
+var logCommentAfterDeleteHooks []LogCommentHook
+
+var logCommentBeforeUpsertHooks []LogCommentHook
+var logCommentAfterUpsertHooks []LogCommentHook
+
+// doAfterSelectHooks executes all "after Select" hooks.
+func (o *LogComment) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentAfterSelectHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeInsertHooks executes all "before insert" hooks.
+func (o *LogComment) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentBeforeInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterInsertHooks executes all "after Insert" hooks.
+func (o *LogComment) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentAfterInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpdateHooks executes all "before Update" hooks.
+func (o *LogComment) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentBeforeUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpdateHooks executes all "after Update" hooks.
+func (o *LogComment) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentAfterUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeDeleteHooks executes all "before Delete" hooks.
+func (o *LogComment) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentBeforeDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterDeleteHooks executes all "after Delete" hooks.
+func (o *LogComment) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentAfterDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpsertHooks executes all "before Upsert" hooks.
+func (o *LogComment) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentBeforeUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpsertHooks executes all "after Upsert" hooks.
+func (o *LogComment) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range logCommentAfterUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddLogCommentHook registers your hook function for all future operations.
+func AddLogCommentHook(hookPoint boil.HookPoint, logCommentHook LogCommentHook) {
+ switch hookPoint {
+ case boil.AfterSelectHook:
+ logCommentAfterSelectHooks = append(logCommentAfterSelectHooks, logCommentHook)
+ case boil.BeforeInsertHook:
+ logCommentBeforeInsertHooks = append(logCommentBeforeInsertHooks, logCommentHook)
+ case boil.AfterInsertHook:
+ logCommentAfterInsertHooks = append(logCommentAfterInsertHooks, logCommentHook)
+ case boil.BeforeUpdateHook:
+ logCommentBeforeUpdateHooks = append(logCommentBeforeUpdateHooks, logCommentHook)
+ case boil.AfterUpdateHook:
+ logCommentAfterUpdateHooks = append(logCommentAfterUpdateHooks, logCommentHook)
+ case boil.BeforeDeleteHook:
+ logCommentBeforeDeleteHooks = append(logCommentBeforeDeleteHooks, logCommentHook)
+ case boil.AfterDeleteHook:
+ logCommentAfterDeleteHooks = append(logCommentAfterDeleteHooks, logCommentHook)
+ case boil.BeforeUpsertHook:
+ logCommentBeforeUpsertHooks = append(logCommentBeforeUpsertHooks, logCommentHook)
+ case boil.AfterUpsertHook:
+ logCommentAfterUpsertHooks = append(logCommentAfterUpsertHooks, logCommentHook)
+ }
+}
+
+// One returns a single logComment record from the query.
+func (q logCommentQuery) One(ctx context.Context, exec boil.ContextExecutor) (*LogComment, error) {
+ o := &LogComment{}
+
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Bind(ctx, exec, o)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: failed to execute a one query for log_comment")
+ }
+
+ if err := o.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+
+ return o, nil
+}
+
+// All returns all LogComment records from the query.
+func (q logCommentQuery) All(ctx context.Context, exec boil.ContextExecutor) (LogCommentSlice, error) {
+ var o []*LogComment
+
+ err := q.Bind(ctx, exec, &o)
+ if err != nil {
+ return nil, errors.Wrap(err, "model: failed to assign all query results to LogComment slice")
+ }
+
+ if len(logCommentAfterSelectHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+ }
+ }
+
+ return o, nil
+}
+
+// Count returns the count of all LogComment records in the query.
+func (q logCommentQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to count log_comment rows")
+ }
+
+ return count, nil
+}
+
+// Exists checks if the row exists in the table.
+func (q logCommentQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return false, errors.Wrap(err, "model: failed to check if log_comment exists")
+ }
+
+ return count > 0, nil
+}
+
+// Log pointed to by the foreign key.
+func (o *LogComment) Log(mods ...qm.QueryMod) logQuery {
+ queryMods := []qm.QueryMod{
+ qm.Where("\"id\" = ?", o.LogID),
+ }
+
+ queryMods = append(queryMods, mods...)
+
+ return Logs(queryMods...)
+}
+
+// LoadLog allows an eager lookup of values, cached into the
+// loaded structs of the objects. This is for an N-1 relationship.
+func (logCommentL) LoadLog(ctx context.Context, e boil.ContextExecutor, singular bool, maybeLogComment interface{}, mods queries.Applicator) error {
+ var slice []*LogComment
+ var object *LogComment
+
+ if singular {
+ var ok bool
+ object, ok = maybeLogComment.(*LogComment)
+ if !ok {
+ object = new(LogComment)
+ ok = queries.SetFromEmbeddedStruct(&object, &maybeLogComment)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeLogComment))
+ }
+ }
+ } else {
+ s, ok := maybeLogComment.(*[]*LogComment)
+ if ok {
+ slice = *s
+ } else {
+ ok = queries.SetFromEmbeddedStruct(&slice, maybeLogComment)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeLogComment))
+ }
+ }
+ }
+
+ args := make([]interface{}, 0, 1)
+ if singular {
+ if object.R == nil {
+ object.R = &logCommentR{}
+ }
+ args = append(args, object.LogID)
+
+ } else {
+ Outer:
+ for _, obj := range slice {
+ if obj.R == nil {
+ obj.R = &logCommentR{}
+ }
+
+ for _, a := range args {
+ if a == obj.LogID {
+ continue Outer
+ }
+ }
+
+ args = append(args, obj.LogID)
+
+ }
+ }
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ query := NewQuery(
+ qm.From(`log`),
+ qm.WhereIn(`log.id in ?`, args...),
+ )
+ if mods != nil {
+ mods.Apply(query)
+ }
+
+ results, err := query.QueryContext(ctx, e)
+ if err != nil {
+ return errors.Wrap(err, "failed to eager load Log")
+ }
+
+ var resultSlice []*Log
+ if err = queries.Bind(results, &resultSlice); err != nil {
+ return errors.Wrap(err, "failed to bind eager loaded slice Log")
+ }
+
+ if err = results.Close(); err != nil {
+ return errors.Wrap(err, "failed to close results of eager load for log")
+ }
+ if err = results.Err(); err != nil {
+ return errors.Wrap(err, "error occurred during iteration of eager loaded relations for log")
+ }
+
+ if len(logCommentAfterSelectHooks) != 0 {
+ for _, obj := range resultSlice {
+ if err := obj.doAfterSelectHooks(ctx, e); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(resultSlice) == 0 {
+ return nil
+ }
+
+ if singular {
+ foreign := resultSlice[0]
+ object.R.Log = foreign
+ if foreign.R == nil {
+ foreign.R = &logR{}
+ }
+ foreign.R.LogComment = object
+ return nil
+ }
+
+ for _, local := range slice {
+ for _, foreign := range resultSlice {
+ if local.LogID == foreign.ID {
+ local.R.Log = foreign
+ if foreign.R == nil {
+ foreign.R = &logR{}
+ }
+ foreign.R.LogComment = local
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// SetLog of the logComment to the related item.
+// Sets o.R.Log to related.
+// Adds o to related.R.LogComment.
+func (o *LogComment) SetLog(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Log) error {
+ var err error
+ if insert {
+ if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
+ return errors.Wrap(err, "failed to insert into foreign table")
+ }
+ }
+
+ updateQuery := fmt.Sprintf(
+ "UPDATE \"log_comment\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, []string{"log_id"}),
+ strmangle.WhereClause("\"", "\"", 0, logCommentPrimaryKeyColumns),
+ )
+ values := []interface{}{related.ID, o.ID}
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, updateQuery)
+ fmt.Fprintln(writer, values)
+ }
+ if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
+ return errors.Wrap(err, "failed to update local table")
+ }
+
+ o.LogID = related.ID
+ if o.R == nil {
+ o.R = &logCommentR{
+ Log: related,
+ }
+ } else {
+ o.R.Log = related
+ }
+
+ if related.R == nil {
+ related.R = &logR{
+ LogComment: o,
+ }
+ } else {
+ related.R.LogComment = o
+ }
+
+ return nil
+}
+
+// LogComments retrieves all the records using an executor.
+func LogComments(mods ...qm.QueryMod) logCommentQuery {
+ mods = append(mods, qm.From("\"log_comment\""))
+ q := NewQuery(mods...)
+ if len(queries.GetSelect(q)) == 0 {
+ queries.SetSelect(q, []string{"\"log_comment\".*"})
+ }
+
+ return logCommentQuery{q}
+}
+
+// FindLogComment retrieves a single record by ID with an executor.
+// If selectCols is empty Find will return all columns.
+func FindLogComment(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*LogComment, error) {
+ logCommentObj := &LogComment{}
+
+ sel := "*"
+ if len(selectCols) > 0 {
+ sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
+ }
+ query := fmt.Sprintf(
+ "select %s from \"log_comment\" where \"id\"=?", sel,
+ )
+
+ q := queries.Raw(query, iD)
+
+ err := q.Bind(ctx, exec, logCommentObj)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: unable to select from log_comment")
+ }
+
+ if err = logCommentObj.doAfterSelectHooks(ctx, exec); err != nil {
+ return logCommentObj, err
+ }
+
+ return logCommentObj, nil
+}
+
+// Insert a single record using an executor.
+// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
+func (o *LogComment) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no log_comment provided for insertion")
+ }
+
+ var err error
+
+ if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(logCommentColumnsWithDefault, o)
+
+ key := makeCacheKey(columns, nzDefaults)
+ logCommentInsertCacheMut.RLock()
+ cache, cached := logCommentInsertCache[key]
+ logCommentInsertCacheMut.RUnlock()
+
+ if !cached {
+ wl, returnColumns := columns.InsertColumnSet(
+ logCommentAllColumns,
+ logCommentColumnsWithDefault,
+ logCommentColumnsWithoutDefault,
+ nzDefaults,
+ )
+ wl = strmangle.SetComplement(wl, logCommentGeneratedColumns)
+
+ cache.valueMapping, err = queries.BindMapping(logCommentType, logCommentMapping, wl)
+ if err != nil {
+ return err
+ }
+ cache.retMapping, err = queries.BindMapping(logCommentType, logCommentMapping, returnColumns)
+ if err != nil {
+ return err
+ }
+ if len(wl) != 0 {
+ cache.query = fmt.Sprintf("INSERT INTO \"log_comment\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
+ } else {
+ cache.query = "INSERT INTO \"log_comment\" %sDEFAULT VALUES%s"
+ }
+
+ var queryOutput, queryReturning string
+
+ if len(cache.retMapping) != 0 {
+ queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
+ }
+
+ cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+
+ if err != nil {
+ return errors.Wrap(err, "model: unable to insert into log_comment")
+ }
+
+ if !cached {
+ logCommentInsertCacheMut.Lock()
+ logCommentInsertCache[key] = cache
+ logCommentInsertCacheMut.Unlock()
+ }
+
+ return o.doAfterInsertHooks(ctx, exec)
+}
+
+// Update uses an executor to update the LogComment.
+// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
+// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
+func (o *LogComment) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
+ var err error
+ if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ key := makeCacheKey(columns, nil)
+ logCommentUpdateCacheMut.RLock()
+ cache, cached := logCommentUpdateCache[key]
+ logCommentUpdateCacheMut.RUnlock()
+
+ if !cached {
+ wl := columns.UpdateColumnSet(
+ logCommentAllColumns,
+ logCommentPrimaryKeyColumns,
+ )
+ wl = strmangle.SetComplement(wl, logCommentGeneratedColumns)
+
+ if !columns.IsWhitelist() {
+ wl = strmangle.SetComplement(wl, []string{"created_at"})
+ }
+ if len(wl) == 0 {
+ return 0, errors.New("model: unable to update log_comment, could not build whitelist")
+ }
+
+ cache.query = fmt.Sprintf("UPDATE \"log_comment\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, wl),
+ strmangle.WhereClause("\"", "\"", 0, logCommentPrimaryKeyColumns),
+ )
+ cache.valueMapping, err = queries.BindMapping(logCommentType, logCommentMapping, append(wl, logCommentPrimaryKeyColumns...))
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, values)
+ }
+ var result sql.Result
+ result, err = exec.ExecContext(ctx, cache.query, values...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update log_comment row")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by update for log_comment")
+ }
+
+ if !cached {
+ logCommentUpdateCacheMut.Lock()
+ logCommentUpdateCache[key] = cache
+ logCommentUpdateCacheMut.Unlock()
+ }
+
+ return rowsAff, o.doAfterUpdateHooks(ctx, exec)
+}
+
+// UpdateAll updates all rows with the specified column values.
+func (q logCommentQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ queries.SetUpdate(q.Query, cols)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all for log_comment")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected for log_comment")
+ }
+
+ return rowsAff, nil
+}
+
+// UpdateAll updates all rows with the specified column values, using an executor.
+func (o LogCommentSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ ln := int64(len(o))
+ if ln == 0 {
+ return 0, nil
+ }
+
+ if len(cols) == 0 {
+ return 0, errors.New("model: update all requires at least one column argument")
+ }
+
+ colNames := make([]string, len(cols))
+ args := make([]interface{}, len(cols))
+
+ i := 0
+ for name, value := range cols {
+ colNames[i] = name
+ args[i] = value
+ i++
+ }
+
+ // Append all of the primary key values for each column
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), logCommentPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := fmt.Sprintf("UPDATE \"log_comment\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, colNames),
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, logCommentPrimaryKeyColumns, len(o)))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all in logComment slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected all in update all logComment")
+ }
+ return rowsAff, nil
+}
+
+// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
+// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
+func (o *LogComment) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no log_comment provided for upsert")
+ }
+
+ if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(logCommentColumnsWithDefault, o)
+
+ // Build cache key in-line uglily - mysql vs psql problems
+ buf := strmangle.GetBuffer()
+ if updateOnConflict {
+ buf.WriteByte('t')
+ } else {
+ buf.WriteByte('f')
+ }
+ buf.WriteByte('.')
+ for _, c := range conflictColumns {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(updateColumns.Kind))
+ for _, c := range updateColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(insertColumns.Kind))
+ for _, c := range insertColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ for _, c := range nzDefaults {
+ buf.WriteString(c)
+ }
+ key := buf.String()
+ strmangle.PutBuffer(buf)
+
+ logCommentUpsertCacheMut.RLock()
+ cache, cached := logCommentUpsertCache[key]
+ logCommentUpsertCacheMut.RUnlock()
+
+ var err error
+
+ if !cached {
+ insert, ret := insertColumns.InsertColumnSet(
+ logCommentAllColumns,
+ logCommentColumnsWithDefault,
+ logCommentColumnsWithoutDefault,
+ nzDefaults,
+ )
+ update := updateColumns.UpdateColumnSet(
+ logCommentAllColumns,
+ logCommentPrimaryKeyColumns,
+ )
+
+ if updateOnConflict && len(update) == 0 {
+ return errors.New("model: unable to upsert log_comment, could not build update column list")
+ }
+
+ conflict := conflictColumns
+ if len(conflict) == 0 {
+ conflict = make([]string, len(logCommentPrimaryKeyColumns))
+ copy(conflict, logCommentPrimaryKeyColumns)
+ }
+ cache.query = buildUpsertQuerySQLite(dialect, "\"log_comment\"", updateOnConflict, ret, update, conflict, insert)
+
+ cache.valueMapping, err = queries.BindMapping(logCommentType, logCommentMapping, insert)
+ if err != nil {
+ return err
+ }
+ if len(ret) != 0 {
+ cache.retMapping, err = queries.BindMapping(logCommentType, logCommentMapping, ret)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+ var returns []interface{}
+ if len(cache.retMapping) != 0 {
+ returns = queries.PtrsFromMapping(value, cache.retMapping)
+ }
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
+ if errors.Is(err, sql.ErrNoRows) {
+ err = nil // Postgres doesn't return anything when there's no update
+ }
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+ if err != nil {
+ return errors.Wrap(err, "model: unable to upsert log_comment")
+ }
+
+ if !cached {
+ logCommentUpsertCacheMut.Lock()
+ logCommentUpsertCache[key] = cache
+ logCommentUpsertCacheMut.Unlock()
+ }
+
+ return o.doAfterUpsertHooks(ctx, exec)
+}
+
+// Delete deletes a single LogComment record with an executor.
+// Delete will match against the primary key column to find the record to delete.
+func (o *LogComment) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if o == nil {
+ return 0, errors.New("model: no LogComment provided for delete")
+ }
+
+ if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), logCommentPrimaryKeyMapping)
+ sql := "DELETE FROM \"log_comment\" WHERE \"id\"=?"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete from log_comment")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by delete for log_comment")
+ }
+
+ if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all matching rows.
+func (q logCommentQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if q.Query == nil {
+ return 0, errors.New("model: no logCommentQuery provided for delete all")
+ }
+
+ queries.SetDelete(q.Query)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from log_comment")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for log_comment")
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all rows in the slice, using an executor.
+func (o LogCommentSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if len(o) == 0 {
+ return 0, nil
+ }
+
+ if len(logCommentBeforeDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ var args []interface{}
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), logCommentPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "DELETE FROM \"log_comment\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, logCommentPrimaryKeyColumns, len(o))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from logComment slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for log_comment")
+ }
+
+ if len(logCommentAfterDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rowsAff, nil
+}
+
+// Reload refetches the object from the database
+// using the primary keys with an executor.
+func (o *LogComment) Reload(ctx context.Context, exec boil.ContextExecutor) error {
+ ret, err := FindLogComment(ctx, exec, o.ID)
+ if err != nil {
+ return err
+ }
+
+ *o = *ret
+ return nil
+}
+
+// ReloadAll refetches every row with matching primary key column values
+// and overwrites the original object slice with the newly updated slice.
+func (o *LogCommentSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
+ if o == nil || len(*o) == 0 {
+ return nil
+ }
+
+ slice := LogCommentSlice{}
+ var args []interface{}
+ for _, obj := range *o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), logCommentPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "SELECT \"log_comment\".* FROM \"log_comment\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, logCommentPrimaryKeyColumns, len(*o))
+
+ q := queries.Raw(sql, args...)
+
+ err := q.Bind(ctx, exec, &slice)
+ if err != nil {
+ return errors.Wrap(err, "model: unable to reload all in LogCommentSlice")
+ }
+
+ *o = slice
+
+ return nil
+}
+
+// LogCommentExists checks if the LogComment row exists.
+func LogCommentExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
+ var exists bool
+ sql := "select exists(select 1 from \"log_comment\" where \"id\"=? limit 1)"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, iD)
+ }
+ row := exec.QueryRowContext(ctx, sql, iD)
+
+ err := row.Scan(&exists)
+ if err != nil {
+ return false, errors.Wrap(err, "model: unable to check if log_comment exists")
+ }
+
+ return exists, nil
+}
+// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package model
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/friendsofgo/errors"
+ "github.com/volatiletech/null/v8"
+ "github.com/volatiletech/sqlboiler/v4/boil"
+ "github.com/volatiletech/sqlboiler/v4/queries"
+ "github.com/volatiletech/sqlboiler/v4/queries/qm"
+ "github.com/volatiletech/sqlboiler/v4/queries/qmhelper"
+ "github.com/volatiletech/strmangle"
+)
+
+// Metric is an object representing the database table.
+type Metric struct {
+ ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
+ Name null.String `boil:"name" json:"name,omitempty" toml:"name" yaml:"name,omitempty"`
+
+ R *metricR `boil:"-" json:"-" toml:"-" yaml:"-"`
+ L metricL `boil:"-" json:"-" toml:"-" yaml:"-"`
+}
+
+var MetricColumns = struct {
+ ID string
+ Name string
+}{
+ ID: "id",
+ Name: "name",
+}
+
+var MetricTableColumns = struct {
+ ID string
+ Name string
+}{
+ ID: "metric.id",
+ Name: "metric.name",
+}
+
+// Generated where
+
+type whereHelpernull_String struct{ field string }
+
+func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
+ return qmhelper.WhereNullEQ(w.field, false, x)
+}
+func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
+ return qmhelper.WhereNullEQ(w.field, true, x)
+}
+func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.LT, x)
+}
+func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.LTE, x)
+}
+func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.GT, x)
+}
+func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
+ return qmhelper.Where(w.field, qmhelper.GTE, x)
+}
+func (w whereHelpernull_String) IN(slice []string) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
+}
+func (w whereHelpernull_String) NIN(slice []string) qm.QueryMod {
+ values := make([]interface{}, 0, len(slice))
+ for _, value := range slice {
+ values = append(values, value)
+ }
+ return qm.WhereNotIn(fmt.Sprintf("%s NOT IN ?", w.field), values...)
+}
+
+func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
+func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
+
+var MetricWhere = struct {
+ ID whereHelperint64
+ Name whereHelpernull_String
+}{
+ ID: whereHelperint64{field: "\"metric\".\"id\""},
+ Name: whereHelpernull_String{field: "\"metric\".\"name\""},
+}
+
+// MetricRels is where relationship names are stored.
+var MetricRels = struct {
+ Configs string
+ Logs string
+}{
+ Configs: "Configs",
+ Logs: "Logs",
+}
+
+// metricR is where relationships are stored.
+type metricR struct {
+ Configs ConfigSlice `boil:"Configs" json:"Configs" toml:"Configs" yaml:"Configs"`
+ Logs LogSlice `boil:"Logs" json:"Logs" toml:"Logs" yaml:"Logs"`
+}
+
+// NewStruct creates a new relationship struct
+func (*metricR) NewStruct() *metricR {
+ return &metricR{}
+}
+
+func (r *metricR) GetConfigs() ConfigSlice {
+ if r == nil {
+ return nil
+ }
+ return r.Configs
+}
+
+func (r *metricR) GetLogs() LogSlice {
+ if r == nil {
+ return nil
+ }
+ return r.Logs
+}
+
+// metricL is where Load methods for each relationship are stored.
+type metricL struct{}
+
+var (
+ metricAllColumns = []string{"id", "name"}
+ metricColumnsWithoutDefault = []string{}
+ metricColumnsWithDefault = []string{"id", "name"}
+ metricPrimaryKeyColumns = []string{"id"}
+ metricGeneratedColumns = []string{"id"}
+)
+
+type (
+ // MetricSlice is an alias for a slice of pointers to Metric.
+ // This should almost always be used instead of []Metric.
+ MetricSlice []*Metric
+ // MetricHook is the signature for custom Metric hook methods
+ MetricHook func(context.Context, boil.ContextExecutor, *Metric) error
+
+ metricQuery struct {
+ *queries.Query
+ }
+)
+
+// Cache for insert, update and upsert
+var (
+ metricType = reflect.TypeOf(&Metric{})
+ metricMapping = queries.MakeStructMapping(metricType)
+ metricPrimaryKeyMapping, _ = queries.BindMapping(metricType, metricMapping, metricPrimaryKeyColumns)
+ metricInsertCacheMut sync.RWMutex
+ metricInsertCache = make(map[string]insertCache)
+ metricUpdateCacheMut sync.RWMutex
+ metricUpdateCache = make(map[string]updateCache)
+ metricUpsertCacheMut sync.RWMutex
+ metricUpsertCache = make(map[string]insertCache)
+)
+
+var (
+ // Force time package dependency for automated UpdatedAt/CreatedAt.
+ _ = time.Second
+ // Force qmhelper dependency for where clause generation (which doesn't
+ // always happen)
+ _ = qmhelper.Where
+)
+
+var metricAfterSelectHooks []MetricHook
+
+var metricBeforeInsertHooks []MetricHook
+var metricAfterInsertHooks []MetricHook
+
+var metricBeforeUpdateHooks []MetricHook
+var metricAfterUpdateHooks []MetricHook
+
+var metricBeforeDeleteHooks []MetricHook
+var metricAfterDeleteHooks []MetricHook
+
+var metricBeforeUpsertHooks []MetricHook
+var metricAfterUpsertHooks []MetricHook
+
+// doAfterSelectHooks executes all "after Select" hooks.
+func (o *Metric) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricAfterSelectHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeInsertHooks executes all "before insert" hooks.
+func (o *Metric) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricBeforeInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterInsertHooks executes all "after Insert" hooks.
+func (o *Metric) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricAfterInsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpdateHooks executes all "before Update" hooks.
+func (o *Metric) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricBeforeUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpdateHooks executes all "after Update" hooks.
+func (o *Metric) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricAfterUpdateHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeDeleteHooks executes all "before Delete" hooks.
+func (o *Metric) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricBeforeDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterDeleteHooks executes all "after Delete" hooks.
+func (o *Metric) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricAfterDeleteHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doBeforeUpsertHooks executes all "before Upsert" hooks.
+func (o *Metric) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricBeforeUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doAfterUpsertHooks executes all "after Upsert" hooks.
+func (o *Metric) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
+ if boil.HooksAreSkipped(ctx) {
+ return nil
+ }
+
+ for _, hook := range metricAfterUpsertHooks {
+ if err := hook(ctx, exec, o); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// AddMetricHook registers your hook function for all future operations.
+func AddMetricHook(hookPoint boil.HookPoint, metricHook MetricHook) {
+ switch hookPoint {
+ case boil.AfterSelectHook:
+ metricAfterSelectHooks = append(metricAfterSelectHooks, metricHook)
+ case boil.BeforeInsertHook:
+ metricBeforeInsertHooks = append(metricBeforeInsertHooks, metricHook)
+ case boil.AfterInsertHook:
+ metricAfterInsertHooks = append(metricAfterInsertHooks, metricHook)
+ case boil.BeforeUpdateHook:
+ metricBeforeUpdateHooks = append(metricBeforeUpdateHooks, metricHook)
+ case boil.AfterUpdateHook:
+ metricAfterUpdateHooks = append(metricAfterUpdateHooks, metricHook)
+ case boil.BeforeDeleteHook:
+ metricBeforeDeleteHooks = append(metricBeforeDeleteHooks, metricHook)
+ case boil.AfterDeleteHook:
+ metricAfterDeleteHooks = append(metricAfterDeleteHooks, metricHook)
+ case boil.BeforeUpsertHook:
+ metricBeforeUpsertHooks = append(metricBeforeUpsertHooks, metricHook)
+ case boil.AfterUpsertHook:
+ metricAfterUpsertHooks = append(metricAfterUpsertHooks, metricHook)
+ }
+}
+
+// One returns a single metric record from the query.
+func (q metricQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Metric, error) {
+ o := &Metric{}
+
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Bind(ctx, exec, o)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: failed to execute a one query for metric")
+ }
+
+ if err := o.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+
+ return o, nil
+}
+
+// All returns all Metric records from the query.
+func (q metricQuery) All(ctx context.Context, exec boil.ContextExecutor) (MetricSlice, error) {
+ var o []*Metric
+
+ err := q.Bind(ctx, exec, &o)
+ if err != nil {
+ return nil, errors.Wrap(err, "model: failed to assign all query results to Metric slice")
+ }
+
+ if len(metricAfterSelectHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
+ return o, err
+ }
+ }
+ }
+
+ return o, nil
+}
+
+// Count returns the count of all Metric records in the query.
+func (q metricQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to count metric rows")
+ }
+
+ return count, nil
+}
+
+// Exists checks if the row exists in the table.
+func (q metricQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
+ var count int64
+
+ queries.SetSelect(q.Query, nil)
+ queries.SetCount(q.Query)
+ queries.SetLimit(q.Query, 1)
+
+ err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
+ if err != nil {
+ return false, errors.Wrap(err, "model: failed to check if metric exists")
+ }
+
+ return count > 0, nil
+}
+
+// Configs retrieves all the config's Configs with an executor.
+func (o *Metric) Configs(mods ...qm.QueryMod) configQuery {
+ var queryMods []qm.QueryMod
+ if len(mods) != 0 {
+ queryMods = append(queryMods, mods...)
+ }
+
+ queryMods = append(queryMods,
+ qm.Where("\"config\".\"metric_id\"=?", o.ID),
+ )
+
+ return Configs(queryMods...)
+}
+
+// Logs retrieves all the log's Logs with an executor.
+func (o *Metric) Logs(mods ...qm.QueryMod) logQuery {
+ var queryMods []qm.QueryMod
+ if len(mods) != 0 {
+ queryMods = append(queryMods, mods...)
+ }
+
+ queryMods = append(queryMods,
+ qm.Where("\"log\".\"metric_id\"=?", o.ID),
+ )
+
+ return Logs(queryMods...)
+}
+
+// LoadConfigs allows an eager lookup of values, cached into the
+// loaded structs of the objects. This is for a 1-M or N-M relationship.
+func (metricL) LoadConfigs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeMetric interface{}, mods queries.Applicator) error {
+ var slice []*Metric
+ var object *Metric
+
+ if singular {
+ var ok bool
+ object, ok = maybeMetric.(*Metric)
+ if !ok {
+ object = new(Metric)
+ ok = queries.SetFromEmbeddedStruct(&object, &maybeMetric)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeMetric))
+ }
+ }
+ } else {
+ s, ok := maybeMetric.(*[]*Metric)
+ if ok {
+ slice = *s
+ } else {
+ ok = queries.SetFromEmbeddedStruct(&slice, maybeMetric)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeMetric))
+ }
+ }
+ }
+
+ args := make([]interface{}, 0, 1)
+ if singular {
+ if object.R == nil {
+ object.R = &metricR{}
+ }
+ args = append(args, object.ID)
+ } else {
+ Outer:
+ for _, obj := range slice {
+ if obj.R == nil {
+ obj.R = &metricR{}
+ }
+
+ for _, a := range args {
+ if a == obj.ID {
+ continue Outer
+ }
+ }
+
+ args = append(args, obj.ID)
+ }
+ }
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ query := NewQuery(
+ qm.From(`config`),
+ qm.WhereIn(`config.metric_id in ?`, args...),
+ )
+ if mods != nil {
+ mods.Apply(query)
+ }
+
+ results, err := query.QueryContext(ctx, e)
+ if err != nil {
+ return errors.Wrap(err, "failed to eager load config")
+ }
+
+ var resultSlice []*Config
+ if err = queries.Bind(results, &resultSlice); err != nil {
+ return errors.Wrap(err, "failed to bind eager loaded slice config")
+ }
+
+ if err = results.Close(); err != nil {
+ return errors.Wrap(err, "failed to close results in eager load on config")
+ }
+ if err = results.Err(); err != nil {
+ return errors.Wrap(err, "error occurred during iteration of eager loaded relations for config")
+ }
+
+ if len(configAfterSelectHooks) != 0 {
+ for _, obj := range resultSlice {
+ if err := obj.doAfterSelectHooks(ctx, e); err != nil {
+ return err
+ }
+ }
+ }
+ if singular {
+ object.R.Configs = resultSlice
+ for _, foreign := range resultSlice {
+ if foreign.R == nil {
+ foreign.R = &configR{}
+ }
+ foreign.R.Metric = object
+ }
+ return nil
+ }
+
+ for _, foreign := range resultSlice {
+ for _, local := range slice {
+ if local.ID == foreign.MetricID {
+ local.R.Configs = append(local.R.Configs, foreign)
+ if foreign.R == nil {
+ foreign.R = &configR{}
+ }
+ foreign.R.Metric = local
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// LoadLogs allows an eager lookup of values, cached into the
+// loaded structs of the objects. This is for a 1-M or N-M relationship.
+func (metricL) LoadLogs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeMetric interface{}, mods queries.Applicator) error {
+ var slice []*Metric
+ var object *Metric
+
+ if singular {
+ var ok bool
+ object, ok = maybeMetric.(*Metric)
+ if !ok {
+ object = new(Metric)
+ ok = queries.SetFromEmbeddedStruct(&object, &maybeMetric)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", object, maybeMetric))
+ }
+ }
+ } else {
+ s, ok := maybeMetric.(*[]*Metric)
+ if ok {
+ slice = *s
+ } else {
+ ok = queries.SetFromEmbeddedStruct(&slice, maybeMetric)
+ if !ok {
+ return errors.New(fmt.Sprintf("failed to set %T from embedded struct %T", slice, maybeMetric))
+ }
+ }
+ }
+
+ args := make([]interface{}, 0, 1)
+ if singular {
+ if object.R == nil {
+ object.R = &metricR{}
+ }
+ args = append(args, object.ID)
+ } else {
+ Outer:
+ for _, obj := range slice {
+ if obj.R == nil {
+ obj.R = &metricR{}
+ }
+
+ for _, a := range args {
+ if a == obj.ID {
+ continue Outer
+ }
+ }
+
+ args = append(args, obj.ID)
+ }
+ }
+
+ if len(args) == 0 {
+ return nil
+ }
+
+ query := NewQuery(
+ qm.From(`log`),
+ qm.WhereIn(`log.metric_id in ?`, args...),
+ )
+ if mods != nil {
+ mods.Apply(query)
+ }
+
+ results, err := query.QueryContext(ctx, e)
+ if err != nil {
+ return errors.Wrap(err, "failed to eager load log")
+ }
+
+ var resultSlice []*Log
+ if err = queries.Bind(results, &resultSlice); err != nil {
+ return errors.Wrap(err, "failed to bind eager loaded slice log")
+ }
+
+ if err = results.Close(); err != nil {
+ return errors.Wrap(err, "failed to close results in eager load on log")
+ }
+ if err = results.Err(); err != nil {
+ return errors.Wrap(err, "error occurred during iteration of eager loaded relations for log")
+ }
+
+ if len(logAfterSelectHooks) != 0 {
+ for _, obj := range resultSlice {
+ if err := obj.doAfterSelectHooks(ctx, e); err != nil {
+ return err
+ }
+ }
+ }
+ if singular {
+ object.R.Logs = resultSlice
+ for _, foreign := range resultSlice {
+ if foreign.R == nil {
+ foreign.R = &logR{}
+ }
+ foreign.R.Metric = object
+ }
+ return nil
+ }
+
+ for _, foreign := range resultSlice {
+ for _, local := range slice {
+ if local.ID == foreign.MetricID {
+ local.R.Logs = append(local.R.Logs, foreign)
+ if foreign.R == nil {
+ foreign.R = &logR{}
+ }
+ foreign.R.Metric = local
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+// AddConfigs adds the given related objects to the existing relationships
+// of the metric, optionally inserting them as new records.
+// Appends related to o.R.Configs.
+// Sets related.R.Metric appropriately.
+func (o *Metric) AddConfigs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Config) error {
+ var err error
+ for _, rel := range related {
+ if insert {
+ rel.MetricID = o.ID
+ if err = rel.Insert(ctx, exec, boil.Infer()); err != nil {
+ return errors.Wrap(err, "failed to insert into foreign table")
+ }
+ } else {
+ updateQuery := fmt.Sprintf(
+ "UPDATE \"config\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, []string{"metric_id"}),
+ strmangle.WhereClause("\"", "\"", 0, configPrimaryKeyColumns),
+ )
+ values := []interface{}{o.ID, rel.ID}
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, updateQuery)
+ fmt.Fprintln(writer, values)
+ }
+ if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
+ return errors.Wrap(err, "failed to update foreign table")
+ }
+
+ rel.MetricID = o.ID
+ }
+ }
+
+ if o.R == nil {
+ o.R = &metricR{
+ Configs: related,
+ }
+ } else {
+ o.R.Configs = append(o.R.Configs, related...)
+ }
+
+ for _, rel := range related {
+ if rel.R == nil {
+ rel.R = &configR{
+ Metric: o,
+ }
+ } else {
+ rel.R.Metric = o
+ }
+ }
+ return nil
+}
+
+// AddLogs adds the given related objects to the existing relationships
+// of the metric, optionally inserting them as new records.
+// Appends related to o.R.Logs.
+// Sets related.R.Metric appropriately.
+func (o *Metric) AddLogs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Log) error {
+ var err error
+ for _, rel := range related {
+ if insert {
+ rel.MetricID = o.ID
+ if err = rel.Insert(ctx, exec, boil.Infer()); err != nil {
+ return errors.Wrap(err, "failed to insert into foreign table")
+ }
+ } else {
+ updateQuery := fmt.Sprintf(
+ "UPDATE \"log\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, []string{"metric_id"}),
+ strmangle.WhereClause("\"", "\"", 0, logPrimaryKeyColumns),
+ )
+ values := []interface{}{o.ID, rel.ID}
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, updateQuery)
+ fmt.Fprintln(writer, values)
+ }
+ if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
+ return errors.Wrap(err, "failed to update foreign table")
+ }
+
+ rel.MetricID = o.ID
+ }
+ }
+
+ if o.R == nil {
+ o.R = &metricR{
+ Logs: related,
+ }
+ } else {
+ o.R.Logs = append(o.R.Logs, related...)
+ }
+
+ for _, rel := range related {
+ if rel.R == nil {
+ rel.R = &logR{
+ Metric: o,
+ }
+ } else {
+ rel.R.Metric = o
+ }
+ }
+ return nil
+}
+
+// Metrics retrieves all the records using an executor.
+func Metrics(mods ...qm.QueryMod) metricQuery {
+ mods = append(mods, qm.From("\"metric\""))
+ q := NewQuery(mods...)
+ if len(queries.GetSelect(q)) == 0 {
+ queries.SetSelect(q, []string{"\"metric\".*"})
+ }
+
+ return metricQuery{q}
+}
+
+// FindMetric retrieves a single record by ID with an executor.
+// If selectCols is empty Find will return all columns.
+func FindMetric(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Metric, error) {
+ metricObj := &Metric{}
+
+ sel := "*"
+ if len(selectCols) > 0 {
+ sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
+ }
+ query := fmt.Sprintf(
+ "select %s from \"metric\" where \"id\"=?", sel,
+ )
+
+ q := queries.Raw(query, iD)
+
+ err := q.Bind(ctx, exec, metricObj)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, sql.ErrNoRows
+ }
+ return nil, errors.Wrap(err, "model: unable to select from metric")
+ }
+
+ if err = metricObj.doAfterSelectHooks(ctx, exec); err != nil {
+ return metricObj, err
+ }
+
+ return metricObj, nil
+}
+
+// Insert a single record using an executor.
+// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
+func (o *Metric) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no metric provided for insertion")
+ }
+
+ var err error
+
+ if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(metricColumnsWithDefault, o)
+
+ key := makeCacheKey(columns, nzDefaults)
+ metricInsertCacheMut.RLock()
+ cache, cached := metricInsertCache[key]
+ metricInsertCacheMut.RUnlock()
+
+ if !cached {
+ wl, returnColumns := columns.InsertColumnSet(
+ metricAllColumns,
+ metricColumnsWithDefault,
+ metricColumnsWithoutDefault,
+ nzDefaults,
+ )
+ wl = strmangle.SetComplement(wl, metricGeneratedColumns)
+
+ cache.valueMapping, err = queries.BindMapping(metricType, metricMapping, wl)
+ if err != nil {
+ return err
+ }
+ cache.retMapping, err = queries.BindMapping(metricType, metricMapping, returnColumns)
+ if err != nil {
+ return err
+ }
+ if len(wl) != 0 {
+ cache.query = fmt.Sprintf("INSERT INTO \"metric\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
+ } else {
+ cache.query = "INSERT INTO \"metric\" %sDEFAULT VALUES%s"
+ }
+
+ var queryOutput, queryReturning string
+
+ if len(cache.retMapping) != 0 {
+ queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
+ }
+
+ cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+
+ if err != nil {
+ return errors.Wrap(err, "model: unable to insert into metric")
+ }
+
+ if !cached {
+ metricInsertCacheMut.Lock()
+ metricInsertCache[key] = cache
+ metricInsertCacheMut.Unlock()
+ }
+
+ return o.doAfterInsertHooks(ctx, exec)
+}
+
+// Update uses an executor to update the Metric.
+// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
+// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
+func (o *Metric) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
+ var err error
+ if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ key := makeCacheKey(columns, nil)
+ metricUpdateCacheMut.RLock()
+ cache, cached := metricUpdateCache[key]
+ metricUpdateCacheMut.RUnlock()
+
+ if !cached {
+ wl := columns.UpdateColumnSet(
+ metricAllColumns,
+ metricPrimaryKeyColumns,
+ )
+ wl = strmangle.SetComplement(wl, metricGeneratedColumns)
+
+ if !columns.IsWhitelist() {
+ wl = strmangle.SetComplement(wl, []string{"created_at"})
+ }
+ if len(wl) == 0 {
+ return 0, errors.New("model: unable to update metric, could not build whitelist")
+ }
+
+ cache.query = fmt.Sprintf("UPDATE \"metric\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, wl),
+ strmangle.WhereClause("\"", "\"", 0, metricPrimaryKeyColumns),
+ )
+ cache.valueMapping, err = queries.BindMapping(metricType, metricMapping, append(wl, metricPrimaryKeyColumns...))
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, values)
+ }
+ var result sql.Result
+ result, err = exec.ExecContext(ctx, cache.query, values...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update metric row")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by update for metric")
+ }
+
+ if !cached {
+ metricUpdateCacheMut.Lock()
+ metricUpdateCache[key] = cache
+ metricUpdateCacheMut.Unlock()
+ }
+
+ return rowsAff, o.doAfterUpdateHooks(ctx, exec)
+}
+
+// UpdateAll updates all rows with the specified column values.
+func (q metricQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ queries.SetUpdate(q.Query, cols)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all for metric")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected for metric")
+ }
+
+ return rowsAff, nil
+}
+
+// UpdateAll updates all rows with the specified column values, using an executor.
+func (o MetricSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
+ ln := int64(len(o))
+ if ln == 0 {
+ return 0, nil
+ }
+
+ if len(cols) == 0 {
+ return 0, errors.New("model: update all requires at least one column argument")
+ }
+
+ colNames := make([]string, len(cols))
+ args := make([]interface{}, len(cols))
+
+ i := 0
+ for name, value := range cols {
+ colNames[i] = name
+ args[i] = value
+ i++
+ }
+
+ // Append all of the primary key values for each column
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), metricPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := fmt.Sprintf("UPDATE \"metric\" SET %s WHERE %s",
+ strmangle.SetParamNames("\"", "\"", 0, colNames),
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, metricPrimaryKeyColumns, len(o)))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to update all in metric slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to retrieve rows affected all in update all metric")
+ }
+ return rowsAff, nil
+}
+
+// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
+// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
+func (o *Metric) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
+ if o == nil {
+ return errors.New("model: no metric provided for upsert")
+ }
+
+ if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
+ return err
+ }
+
+ nzDefaults := queries.NonZeroDefaultSet(metricColumnsWithDefault, o)
+
+ // Build cache key in-line uglily - mysql vs psql problems
+ buf := strmangle.GetBuffer()
+ if updateOnConflict {
+ buf.WriteByte('t')
+ } else {
+ buf.WriteByte('f')
+ }
+ buf.WriteByte('.')
+ for _, c := range conflictColumns {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(updateColumns.Kind))
+ for _, c := range updateColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ buf.WriteString(strconv.Itoa(insertColumns.Kind))
+ for _, c := range insertColumns.Cols {
+ buf.WriteString(c)
+ }
+ buf.WriteByte('.')
+ for _, c := range nzDefaults {
+ buf.WriteString(c)
+ }
+ key := buf.String()
+ strmangle.PutBuffer(buf)
+
+ metricUpsertCacheMut.RLock()
+ cache, cached := metricUpsertCache[key]
+ metricUpsertCacheMut.RUnlock()
+
+ var err error
+
+ if !cached {
+ insert, ret := insertColumns.InsertColumnSet(
+ metricAllColumns,
+ metricColumnsWithDefault,
+ metricColumnsWithoutDefault,
+ nzDefaults,
+ )
+ update := updateColumns.UpdateColumnSet(
+ metricAllColumns,
+ metricPrimaryKeyColumns,
+ )
+
+ if updateOnConflict && len(update) == 0 {
+ return errors.New("model: unable to upsert metric, could not build update column list")
+ }
+
+ conflict := conflictColumns
+ if len(conflict) == 0 {
+ conflict = make([]string, len(metricPrimaryKeyColumns))
+ copy(conflict, metricPrimaryKeyColumns)
+ }
+ cache.query = buildUpsertQuerySQLite(dialect, "\"metric\"", updateOnConflict, ret, update, conflict, insert)
+
+ cache.valueMapping, err = queries.BindMapping(metricType, metricMapping, insert)
+ if err != nil {
+ return err
+ }
+ if len(ret) != 0 {
+ cache.retMapping, err = queries.BindMapping(metricType, metricMapping, ret)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(o))
+ vals := queries.ValuesFromMapping(value, cache.valueMapping)
+ var returns []interface{}
+ if len(cache.retMapping) != 0 {
+ returns = queries.PtrsFromMapping(value, cache.retMapping)
+ }
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, cache.query)
+ fmt.Fprintln(writer, vals)
+ }
+ if len(cache.retMapping) != 0 {
+ err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
+ if errors.Is(err, sql.ErrNoRows) {
+ err = nil // Postgres doesn't return anything when there's no update
+ }
+ } else {
+ _, err = exec.ExecContext(ctx, cache.query, vals...)
+ }
+ if err != nil {
+ return errors.Wrap(err, "model: unable to upsert metric")
+ }
+
+ if !cached {
+ metricUpsertCacheMut.Lock()
+ metricUpsertCache[key] = cache
+ metricUpsertCacheMut.Unlock()
+ }
+
+ return o.doAfterUpsertHooks(ctx, exec)
+}
+
+// Delete deletes a single Metric record with an executor.
+// Delete will match against the primary key column to find the record to delete.
+func (o *Metric) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if o == nil {
+ return 0, errors.New("model: no Metric provided for delete")
+ }
+
+ if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), metricPrimaryKeyMapping)
+ sql := "DELETE FROM \"metric\" WHERE \"id\"=?"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args...)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete from metric")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by delete for metric")
+ }
+
+ if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all matching rows.
+func (q metricQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if q.Query == nil {
+ return 0, errors.New("model: no metricQuery provided for delete all")
+ }
+
+ queries.SetDelete(q.Query)
+
+ result, err := q.Query.ExecContext(ctx, exec)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from metric")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for metric")
+ }
+
+ return rowsAff, nil
+}
+
+// DeleteAll deletes all rows in the slice, using an executor.
+func (o MetricSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
+ if len(o) == 0 {
+ return 0, nil
+ }
+
+ if len(metricBeforeDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ var args []interface{}
+ for _, obj := range o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), metricPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "DELETE FROM \"metric\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, metricPrimaryKeyColumns, len(o))
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, args)
+ }
+ result, err := exec.ExecContext(ctx, sql, args...)
+ if err != nil {
+ return 0, errors.Wrap(err, "model: unable to delete all from metric slice")
+ }
+
+ rowsAff, err := result.RowsAffected()
+ if err != nil {
+ return 0, errors.Wrap(err, "model: failed to get rows affected by deleteall for metric")
+ }
+
+ if len(metricAfterDeleteHooks) != 0 {
+ for _, obj := range o {
+ if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rowsAff, nil
+}
+
+// Reload refetches the object from the database
+// using the primary keys with an executor.
+func (o *Metric) Reload(ctx context.Context, exec boil.ContextExecutor) error {
+ ret, err := FindMetric(ctx, exec, o.ID)
+ if err != nil {
+ return err
+ }
+
+ *o = *ret
+ return nil
+}
+
+// ReloadAll refetches every row with matching primary key column values
+// and overwrites the original object slice with the newly updated slice.
+func (o *MetricSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
+ if o == nil || len(*o) == 0 {
+ return nil
+ }
+
+ slice := MetricSlice{}
+ var args []interface{}
+ for _, obj := range *o {
+ pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), metricPrimaryKeyMapping)
+ args = append(args, pkeyArgs...)
+ }
+
+ sql := "SELECT \"metric\".* FROM \"metric\" WHERE " +
+ strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, metricPrimaryKeyColumns, len(*o))
+
+ q := queries.Raw(sql, args...)
+
+ err := q.Bind(ctx, exec, &slice)
+ if err != nil {
+ return errors.Wrap(err, "model: unable to reload all in MetricSlice")
+ }
+
+ *o = slice
+
+ return nil
+}
+
+// MetricExists checks if the Metric row exists.
+func MetricExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
+ var exists bool
+ sql := "select exists(select 1 from \"metric\" where \"id\"=? limit 1)"
+
+ if boil.IsDebug(ctx) {
+ writer := boil.DebugWriterFrom(ctx)
+ fmt.Fprintln(writer, sql)
+ fmt.Fprintln(writer, iD)
+ }
+ row := exec.QueryRowContext(ctx, sql, iD)
+
+ err := row.Scan(&exists)
+ if err != nil {
+ return false, errors.Wrap(err, "model: unable to check if metric exists")
+ }
+
+ return exists, nil
+}
+// Code generated by SQLBoiler 4.13.0 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
+// This file is meant to be re-generated in place and/or deleted at any time.
+
+package model
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/volatiletech/sqlboiler/v4/drivers"
+ "github.com/volatiletech/strmangle"
+)
+
+// buildUpsertQuerySQLite builds a SQL statement string using the upsertData provided.
+func buildUpsertQuerySQLite(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string) string {
+ conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict)
+ whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist)
+ ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret)
+
+ buf := strmangle.GetBuffer()
+ defer strmangle.PutBuffer(buf)
+
+ columns := "DEFAULT VALUES"
+ if len(whitelist) != 0 {
+ columns = fmt.Sprintf("(%s) VALUES (%s)",
+ strings.Join(whitelist, ", "),
+ strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1))
+ }
+
+ fmt.Fprintf(
+ buf,
+ "INSERT INTO %s %s ON CONFLICT ",
+ tableName,
+ columns,
+ )
+
+ if !updateOnConflict || len(update) == 0 {
+ buf.WriteString("DO NOTHING")
+ } else {
+ buf.WriteByte('(')
+ buf.WriteString(strings.Join(conflict, ", "))
+ buf.WriteString(") DO UPDATE SET ")
+
+ for i, v := range update {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v)
+ buf.WriteString(quoted)
+ buf.WriteString(" = EXCLUDED.")
+ buf.WriteString(quoted)
+ }
+ }
+
+ if len(ret) != 0 {
+ buf.WriteString(" RETURNING ")
+ buf.WriteString(strings.Join(ret, ", "))
+ }
+
+ return buf.String()
+}
+output = "internal/model"
+pkgname = "model"
+wipe = true
+no-tests = true
+
+[sqlite3]
+dbname = "./ct.db"
+blacklist = ["schema_migrations"]