diff --git a/go.mod b/go.mod index ca4bc26..1fd8a09 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( ) require ( + filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect @@ -47,6 +48,7 @@ require ( github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/protobuf v1.5.3 // indirect diff --git a/go.sum b/go.sum index b3d8bdb..582d387 100644 --- a/go.sum +++ b/go.sum @@ -31,6 +31,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= @@ -164,6 +166,8 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= diff --git a/metrics/metrics.go b/metrics/metrics.go index 28c9c5e..f5f25e2 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -10,6 +10,7 @@ import ( "github.com/coroot/coroot-cluster-agent/flags" "github.com/coroot/coroot-cluster-agent/metrics/aws" "github.com/coroot/coroot-cluster-agent/metrics/mongo" + "github.com/coroot/coroot-cluster-agent/metrics/mysql" postgres "github.com/coroot/coroot-pg-agent/collector" "github.com/coroot/coroot/model" "github.com/coroot/logger" @@ -147,7 +148,25 @@ func (ms *Metrics) createCollector(config ExporterConfig) (prometheus.Collector, } query.Set("sslmode", sslmode) dsn := fmt.Sprintf("postgresql://%s@%s/postgres?%s", userPass, config.Address(), query.Encode()) - collector, err := postgres.New(dsn, ms.scrapeTimeout, logger.NewKlog(config.Address())) + collector, err := postgres.New(dsn, ms.scrapeInterval, logger.NewKlog(config.Address())) + if err != nil { + return nil, nil, err + } + return collector, func() { _ = collector.Close() }, nil + + case model.ApplicationTypeMysql: + klog.Infoln(config) + + userPass := url.UserPassword(config.Credentials.Username, config.Credentials.Password) + query := url.Values{} + query.Set("timeout", fmt.Sprintf("%dms", ms.scrapeTimeout.Milliseconds())) + tls := config.Params["tls"] + if tls == "" { + tls = "false" + } + query.Set("tls", tls) + dsn := fmt.Sprintf("%s@tcp(%s)/mysql?%s", userPass, config.Address(), query.Encode()) + collector, err := mysql.New(dsn, logger.NewKlog(config.Address()), ms.scrapeInterval) if err != nil { return nil, nil, err } diff --git a/metrics/mysql/mysql.go b/metrics/mysql/mysql.go new file mode 100644 index 0000000..b845501 --- /dev/null +++ b/metrics/mysql/mysql.go @@ -0,0 +1,218 @@ +package mysql + +import ( + "context" + "database/sql" + "github.com/coroot/logger" + _ "github.com/go-sql-driver/mysql" + "github.com/prometheus/client_golang/prometheus" + "strconv" + "sync" + "time" +) + +const ( + picoSeconds = 1e12 +) + +var ( + dUp = desc("mysql_up", "") + dScrapeError = desc("mysql_scrape_error", "", "error", "warning") + dInfo = desc("mysql_info", "", "server_version", "server_id", "server_uuid") + + dQueryCalls = desc("mysql_top_query_calls_per_second", "", "schema", "query") + dQueryTotalTime = desc("mysql_top_query_time_per_second", "", "schema", "query") + dQueryLockTime = desc("mysql_top_query_lock_time_per_second", "", "schema", "query") + + dReplicationIORunning = desc("mysql_replication_io_status", "", "source_server_id", "source_server_uuid", "state", "last_error") + dReplicationSQLRunning = desc("mysql_replication_sql_status", "", "source_server_id", "source_server_uuid", "state", "last_error") + dReplicationLag = desc("mysql_replication_lag_seconds", "", "source_server_id", "source_server_uuid") + + dConnectionsMax = desc("mysql_connections_max", "") + dConnectionsCurrent = desc("mysql_connections_current", "") + dConnectionsTotal = desc("mysql_connections_total", "") + dConnectionsAborted = desc("mysql_connections_aborted_total", "") + + dBytesReceived = desc("mysql_traffic_received_bytes_total", "") + dBytesSent = desc("mysql_traffic_sent_bytes_total", "") + + dQueries = desc("mysql_queries_total", "") + dSlowQueries = desc("mysql_slow_queries_total", "") + + dIOTime = desc("mysql_top_table_io_wait_time_per_second", "", "schema", "table", "operation") +) + +type Collector struct { + host string + db *sql.DB + logger logger.Logger + topN int + cancelFunc context.CancelFunc + lock sync.RWMutex + scrapeErrors map[string]bool + + globalVariables map[string]string + globalStatus map[string]string + perfschemaPrev *statementsSummarySnapshot + perfschemaCurr *statementsSummarySnapshot + replicaStatuses []*ReplicaStatus + ioByTablePrev *ioByTableSnapshot + ioByTableCurr *ioByTableSnapshot + + invalidQueries map[string]bool +} + +func New(dsn string, logger logger.Logger, scrapeInterval time.Duration) (*Collector, error) { + ctx, cancelFunc := context.WithCancel(context.Background()) + c := &Collector{ + logger: logger, + cancelFunc: cancelFunc, + globalStatus: map[string]string{}, + globalVariables: map[string]string{}, + invalidQueries: map[string]bool{}, + } + var err error + c.db, err = sql.Open("mysql", dsn) + if err != nil { + return nil, err + } + c.db.SetMaxOpenConns(1) + if err := c.db.Ping(); err != nil { + c.logger.Warning("probe failed:", err) + } + go func() { + ticker := time.NewTicker(scrapeInterval) + c.snapshot() + for { + select { + case <-ticker.C: + c.snapshot() + case <-ctx.Done(): + c.logger.Info("stopping mysql collector") + return + } + } + }() + + return c, nil +} + +func (c *Collector) Close() error { + c.cancelFunc() + return c.db.Close() +} + +func (c *Collector) Collect(ch chan<- prometheus.Metric) { + if err := c.db.Ping(); err != nil { + c.logger.Warning("probe failed:", err) + ch <- gauge(dUp, 0) + ch <- gauge(dScrapeError, 1, err.Error(), "") + return + } + ch <- gauge(dUp, 1) + c.lock.RLock() + defer c.lock.RUnlock() + if version := c.globalVariables["version"]; version != "" { + ch <- gauge(dInfo, 1, version, c.globalVariables["server_id"], c.globalVariables["server_uuid"]) + } + + if len(c.scrapeErrors) > 0 { + for e := range c.scrapeErrors { + ch <- gauge(dScrapeError, 1, "", e) + } + } else { + ch <- gauge(dScrapeError, 0, "", "") + } + c.queryMetrics(ch, 20) + c.ioMetrics(ch, 20) + c.replicationMetrics(ch) + metricFromVariable(ch, dConnectionsMax, "max_connections", prometheus.GaugeValue, c.globalVariables) + metricFromVariable(ch, dConnectionsCurrent, "Threads_connected", prometheus.GaugeValue, c.globalStatus) + metricFromVariable(ch, dConnectionsTotal, "Connections", prometheus.CounterValue, c.globalStatus) + metricFromVariable(ch, dConnectionsAborted, "Aborted_connects", prometheus.CounterValue, c.globalStatus) + metricFromVariable(ch, dBytesReceived, "Bytes_received", prometheus.CounterValue, c.globalStatus) + metricFromVariable(ch, dBytesSent, "Bytes_sent", prometheus.CounterValue, c.globalStatus) + metricFromVariable(ch, dQueries, "Questions", prometheus.CounterValue, c.globalStatus) + metricFromVariable(ch, dSlowQueries, "Slow_queries", prometheus.CounterValue, c.globalStatus) +} + +func (c *Collector) snapshot() { + c.lock.Lock() + defer c.lock.Unlock() + + c.scrapeErrors = map[string]bool{} + + if err := c.updateVariables("SHOW GLOBAL VARIABLES", c.globalVariables); err != nil { + c.logger.Warning(err) + c.scrapeErrors[err.Error()] = true + return + } + if err := c.updateVariables("SHOW GLOBAL STATUS", c.globalStatus); err != nil { + c.logger.Warning(err) + c.scrapeErrors[err.Error()] = true + return + } + if err := c.updateReplicationStatus(); err != nil { + c.logger.Warning(err) + c.scrapeErrors[err.Error()] = true + return + } + c.perfschemaPrev = c.perfschemaCurr + var err error + c.perfschemaCurr, err = c.queryStatementsSummary(c.perfschemaPrev) + if err != nil { + c.logger.Warning(err) + c.scrapeErrors[err.Error()] = true + return + } + c.ioByTablePrev = c.ioByTableCurr + c.ioByTableCurr, err = c.queryTableIOWaits() + if err != nil { + c.logger.Warning(err) + c.scrapeErrors[err.Error()] = true + return + } +} + +func (c *Collector) Describe(ch chan<- *prometheus.Desc) { + ch <- dUp + ch <- dScrapeError + ch <- dInfo + ch <- dQueryCalls + ch <- dQueryTotalTime + ch <- dQueryLockTime + ch <- dReplicationIORunning + ch <- dReplicationSQLRunning + ch <- dReplicationLag + ch <- dConnectionsMax + ch <- dConnectionsCurrent + ch <- dConnectionsTotal + ch <- dConnectionsAborted + ch <- dBytesReceived + ch <- dBytesSent + ch <- dQueries + ch <- dSlowQueries + ch <- dIOTime +} + +func metricFromVariable(ch chan<- prometheus.Metric, desc *prometheus.Desc, name string, typ prometheus.ValueType, variables map[string]string) { + v, ok := variables[name] + if !ok { + return + } + if f, err := strconv.ParseFloat(v, 64); err == nil { + ch <- prometheus.MustNewConstMetric(desc, typ, f) + } +} + +func desc(name, help string, labels ...string) *prometheus.Desc { + return prometheus.NewDesc(name, help, labels, nil) +} + +func gauge(desc *prometheus.Desc, value float64, labels ...string) prometheus.Metric { + return prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, value, labels...) +} + +func counter(desc *prometheus.Desc, value float64, labels ...string) prometheus.Metric { + return prometheus.MustNewConstMetric(desc, prometheus.CounterValue, value, labels...) +} diff --git a/metrics/mysql/perfschema_io_waits_summary_by_table.go b/metrics/mysql/perfschema_io_waits_summary_by_table.go new file mode 100644 index 0000000..b6a824b --- /dev/null +++ b/metrics/mysql/perfschema_io_waits_summary_by_table.go @@ -0,0 +1,99 @@ +package mysql + +import ( + "github.com/prometheus/client_golang/prometheus" + "sort" + "time" +) + +type tableKey struct { + schema string + table string +} + +type ioSummary struct { + readTotalTime uint64 + writeTotalTime uint64 +} + +type ioByTableSnapshot struct { + ts time.Time + rows map[tableKey]ioSummary +} + +func (c *Collector) queryTableIOWaits() (*ioByTableSnapshot, error) { + snapshot := &ioByTableSnapshot{ts: time.Now(), rows: map[tableKey]ioSummary{}} + q := ` + SELECT + OBJECT_SCHEMA, + OBJECT_NAME, + SUM_TIMER_READ, + SUM_TIMER_WRITE + FROM performance_schema.table_io_waits_summary_by_table + WHERE + OBJECT_SCHEMA is not null AND + OBJECT_NAME is not null` + rows, err := c.db.Query(q) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var k tableKey + var r ioSummary + if err := rows.Scan(&k.schema, &k.table, &r.readTotalTime, &r.writeTotalTime); err != nil { + c.logger.Warning(err) + continue + } + snapshot.rows[k] = r + } + return snapshot, nil +} + +type ioStats struct { + readTimePerSecond float64 + writeTimePerSecond float64 + totalTimePerSecond float64 +} + +type ioStatsWithKey struct { + k tableKey + s *ioStats +} + +func (c *Collector) ioMetrics(ch chan<- prometheus.Metric, n int) { + if c.ioByTablePrev == nil || c.ioByTableCurr == nil { + return + } + res := map[tableKey]*ioStats{} + + withKeys := make([]ioStatsWithKey, 0, len(res)) + + interval := c.ioByTableCurr.ts.Sub(c.ioByTablePrev.ts).Seconds() + for k, s := range c.ioByTableCurr.rows { + prev := c.ioByTablePrev.rows[k] + stats := &ioStats{} + if v := s.readTotalTime - prev.readTotalTime; v > 0 { + stats.readTimePerSecond = float64(v) / picoSeconds / interval + } + if v := s.writeTotalTime - prev.writeTotalTime; v > 0 { + stats.writeTimePerSecond = float64(v) / picoSeconds / interval + } + stats.totalTimePerSecond = stats.readTimePerSecond + stats.writeTimePerSecond + if stats.totalTimePerSecond > 0 { + withKeys = append(withKeys, ioStatsWithKey{k: k, s: stats}) + } + } + sort.Slice(withKeys, func(i, j int) bool { + return withKeys[i].s.totalTimePerSecond > withKeys[j].s.totalTimePerSecond + }) + if n > len(withKeys) { + n = len(withKeys) + } + for _, i := range withKeys[:n] { + ch <- gauge(dIOTime, i.s.readTimePerSecond, i.k.schema, i.k.table, "read") + ch <- gauge(dIOTime, i.s.writeTimePerSecond, i.k.schema, i.k.table, "write") + } + +} diff --git a/metrics/mysql/perfschema_summary_by_digest.go b/metrics/mysql/perfschema_summary_by_digest.go new file mode 100644 index 0000000..8f490bb --- /dev/null +++ b/metrics/mysql/perfschema_summary_by_digest.go @@ -0,0 +1,132 @@ +package mysql + +import ( + "github.com/coroot/coroot-pg-agent/obfuscate" + "github.com/prometheus/client_golang/prometheus" + "sort" + "time" +) + +type queryKey struct { + schema string + query string +} + +type statementsSummaryRow struct { + obfuscatedQueryText string + calls uint64 + totalTime uint64 + lockTime uint64 +} + +type digestKey struct { + schema string + digest string +} + +type statementsSummarySnapshot struct { + ts time.Time + rows map[digestKey]statementsSummaryRow +} + +func (c *Collector) queryStatementsSummary(prev *statementsSummarySnapshot) (*statementsSummarySnapshot, error) { + snapshot := &statementsSummarySnapshot{ts: time.Now(), rows: map[digestKey]statementsSummaryRow{}} + q := ` + SELECT + ifnull(SCHEMA_NAME, ''), + DIGEST, + DIGEST_TEXT, + COUNT_STAR, + SUM_TIMER_WAIT, + SUM_LOCK_TIME + FROM + performance_schema.events_statements_summary_by_digest + WHERE + DIGEST IS NOT NULL AND + DIGEST_TEXT IS NOT NULL` + rows, err := c.db.Query(q) + if err != nil { + return nil, err + } + defer rows.Close() + + var digestText string + for rows.Next() { + var k digestKey + var r statementsSummaryRow + if err := rows.Scan(&k.schema, &k.digest, &digestText, &r.calls, &r.totalTime, &r.lockTime); err != nil { + c.logger.Warning(err) + continue + } + if prev != nil { + if p, ok := prev.rows[k]; ok { + r.obfuscatedQueryText = p.obfuscatedQueryText + } + } + if r.obfuscatedQueryText == "" { + r.obfuscatedQueryText = obfuscate.Sql(digestText) + } + snapshot.rows[k] = r + } + return snapshot, nil +} + +type queryStats struct { + callsPerSecond float64 + totalTimePerSecond float64 + lockTimePerSecond float64 +} + +type statsWithKey struct { + k queryKey + s *queryStats +} + +func (c *Collector) queryMetrics(ch chan<- prometheus.Metric, n int) { + if c.perfschemaPrev == nil || c.perfschemaCurr == nil { + return + } + res := map[queryKey]*queryStats{} + + interval := c.perfschemaCurr.ts.Sub(c.perfschemaPrev.ts).Seconds() + + for k, s := range c.perfschemaCurr.rows { + prev := c.perfschemaPrev.rows[k] + qk := queryKey{schema: k.schema, query: s.obfuscatedQueryText} + + r := res[qk] + if r == nil { + r = &queryStats{} + res[qk] = r + } + if calls := s.calls - prev.calls; calls > 0 { + r.callsPerSecond += float64(calls) / interval + } + if totalTime := s.totalTime - prev.totalTime; totalTime > 0 { + r.totalTimePerSecond += float64(totalTime) / picoSeconds / interval + } + if lockTime := s.lockTime - prev.lockTime; lockTime > 0 { + r.lockTimePerSecond += float64(lockTime) / picoSeconds / interval + } + } + + withKeys := make([]statsWithKey, 0, len(res)) + for k, s := range res { + if s.callsPerSecond == 0 { + continue + } + withKeys = append(withKeys, statsWithKey{k: k, s: s}) + } + sort.Slice(withKeys, func(i, j int) bool { + return withKeys[i].s.totalTimePerSecond > withKeys[j].s.totalTimePerSecond + }) + if n > len(withKeys) { + n = len(withKeys) + } + for _, i := range withKeys[:n] { + ch <- gauge(dQueryCalls, i.s.callsPerSecond, i.k.schema, i.k.query) + ch <- gauge(dQueryTotalTime, i.s.totalTimePerSecond, i.k.schema, i.k.query) + ch <- gauge(dQueryLockTime, i.s.lockTimePerSecond, i.k.schema, i.k.query) + } + +} diff --git a/metrics/mysql/replication.go b/metrics/mysql/replication.go new file mode 100644 index 0000000..c95c029 --- /dev/null +++ b/metrics/mysql/replication.go @@ -0,0 +1,102 @@ +package mysql + +import ( + "database/sql" + "github.com/go-sql-driver/mysql" + "github.com/prometheus/client_golang/prometheus" + "strconv" +) + +type ReplicaStatus struct { + vals map[string]string +} + +func (rs *ReplicaStatus) Get(keys ...string) string { + for _, key := range keys { + if val, ok := rs.vals[key]; ok { + return val + } + } + return "" +} + +func (c *Collector) updateReplicationStatus() error { + c.replicaStatuses = c.replicaStatuses[:0] + for _, q := range []string{"SHOW REPLICA STATUS", "SHOW SLAVE STATUS"} { + if c.invalidQueries[q] { + continue + } + rows, err := c.db.Query(q) + if err != nil { + if mysqlErr, ok := err.(*mysql.MySQLError); ok && mysqlErr.Number == 1064 { + c.invalidQueries[q] = true + continue + } + return err + } + defer rows.Close() + for rows.Next() { + cols, err := rows.Columns() + if err != nil { + return err + } + scanArgs := make([]interface{}, len(cols)) + for i := range scanArgs { + scanArgs[i] = &sql.RawBytes{} + } + if err = rows.Scan(scanArgs...); err != nil { + return err + } + st := &ReplicaStatus{vals: map[string]string{}} + for i, col := range cols { + raw, ok := scanArgs[i].(*sql.RawBytes) + if !ok { + continue + } + st.vals[col] = string(*raw) + } + c.replicaStatuses = append(c.replicaStatuses, st) + } + break + } + return nil +} + +func (c *Collector) replicationMetrics(ch chan<- prometheus.Metric) { + for _, st := range c.replicaStatuses { + sourceServerId := st.Get("Source_Server_Id", "Master_Server_Id") + sourceServerUUID := st.Get("Source_UUID", "Master_UUID") + + if ioRunning := st.Get("Replica_IO_Running", "Slave_IO_Running"); ioRunning != "" { + status := 0. + if ioRunning == "Yes" { + status = 1. + } + ch <- gauge( + dReplicationIORunning, + status, + sourceServerId, + sourceServerUUID, + st.Get("Replica_IO_State", "Slave_IO_State"), + st.Get("Last_IO_Error"), + ) + } + if sqlRunning := st.Get("Replica_SQL_Running", "Slave_SQL_Running"); sqlRunning != "" { + status := 0. + if sqlRunning == "Yes" { + status = 1. + } + ch <- gauge( + dReplicationSQLRunning, + status, + sourceServerId, + sourceServerUUID, + st.Get("Replica_SQL_Running_State", "Slave_SQL_Running_State"), + st.Get("Last_SQL_Error"), + ) + } + if lag, err := strconv.ParseUint(st.Get("Seconds_Behind_Source", "Seconds_Behind_Master"), 10, 64); err == nil { + ch <- gauge(dReplicationLag, float64(lag), sourceServerId, sourceServerUUID) + } + } +} diff --git a/metrics/mysql/variables.go b/metrics/mysql/variables.go new file mode 100644 index 0000000..302c496 --- /dev/null +++ b/metrics/mysql/variables.go @@ -0,0 +1,19 @@ +package mysql + +func (c *Collector) updateVariables(query string, dest map[string]string) error { + rows, err := c.db.Query(query) + if err != nil { + return err + } + clear(dest) + defer rows.Close() + for rows.Next() { + var name, value string + if err := rows.Scan(&name, &value); err != nil { + c.logger.Warning(err) + continue + } + dest[name] = value + } + return nil +}