Merge pull request #1374 from timstclair/comments

Cleanup comment style: // should be proceeded by a space
This commit is contained in:
Tim St. Clair 2016-10-07 17:16:27 -07:00 committed by GitHub
commit a9b1ab1dd8
13 changed files with 62 additions and 62 deletions

View File

@ -22,43 +22,43 @@ import (
) )
type Config struct { type Config struct {
//the endpoint to hit to scrape metrics // the endpoint to hit to scrape metrics
Endpoint EndpointConfig `json:"endpoint"` Endpoint EndpointConfig `json:"endpoint"`
//holds information about different metrics that can be collected // holds information about different metrics that can be collected
MetricsConfig []MetricConfig `json:"metrics_config"` MetricsConfig []MetricConfig `json:"metrics_config"`
} }
// metricConfig holds information extracted from the config file about a metric // metricConfig holds information extracted from the config file about a metric
type MetricConfig struct { type MetricConfig struct {
//the name of the metric // the name of the metric
Name string `json:"name"` Name string `json:"name"`
//enum type for the metric type // enum type for the metric type
MetricType v1.MetricType `json:"metric_type"` MetricType v1.MetricType `json:"metric_type"`
// metric units to display on UI and in storage (eg: MB, cores) // metric units to display on UI and in storage (eg: MB, cores)
// this is only used for display. // this is only used for display.
Units string `json:"units"` Units string `json:"units"`
//data type of the metric (eg: int, float) // data type of the metric (eg: int, float)
DataType v1.DataType `json:"data_type"` DataType v1.DataType `json:"data_type"`
//the frequency at which the metric should be collected // the frequency at which the metric should be collected
PollingFrequency time.Duration `json:"polling_frequency"` PollingFrequency time.Duration `json:"polling_frequency"`
//the regular expression that can be used to extract the metric // the regular expression that can be used to extract the metric
Regex string `json:"regex"` Regex string `json:"regex"`
} }
type Prometheus struct { type Prometheus struct {
//the endpoint to hit to scrape metrics // the endpoint to hit to scrape metrics
Endpoint EndpointConfig `json:"endpoint"` Endpoint EndpointConfig `json:"endpoint"`
//the frequency at which metrics should be collected // the frequency at which metrics should be collected
PollingFrequency time.Duration `json:"polling_frequency"` PollingFrequency time.Duration `json:"polling_frequency"`
//holds names of different metrics that can be collected // holds names of different metrics that can be collected
MetricsConfig []string `json:"metrics_config"` MetricsConfig []string `json:"metrics_config"`
} }

View File

@ -29,13 +29,13 @@ import (
) )
type GenericCollector struct { type GenericCollector struct {
//name of the collector // name of the collector
name string name string
//holds information extracted from the config file for a collector // holds information extracted from the config file for a collector
configFile Config configFile Config
//holds information necessary to extract metrics // holds information necessary to extract metrics
info *collectorInfo info *collectorInfo
// The Http client to use when connecting to metric endpoints // The Http client to use when connecting to metric endpoints
@ -43,10 +43,10 @@ type GenericCollector struct {
} }
type collectorInfo struct { type collectorInfo struct {
//minimum polling frequency among all metrics // minimum polling frequency among all metrics
minPollingFrequency time.Duration minPollingFrequency time.Duration
//regular expresssions for all metrics // regular expresssions for all metrics
regexps []*regexp.Regexp regexps []*regexp.Regexp
// Limit for the number of srcaped metrics. If the count is higher, // Limit for the number of srcaped metrics. If the count is higher,
@ -54,7 +54,7 @@ type collectorInfo struct {
metricCountLimit int metricCountLimit int
} }
//Returns a new collector using the information extracted from the configfile // Returns a new collector using the information extracted from the configfile
func NewCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*GenericCollector, error) { func NewCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*GenericCollector, error) {
var configInJSON Config var configInJSON Config
err := json.Unmarshal(configFile, &configInJSON) err := json.Unmarshal(configFile, &configInJSON)
@ -64,7 +64,7 @@ func NewCollector(collectorName string, configFile []byte, metricCountLimit int,
configInJSON.Endpoint.configure(containerHandler) configInJSON.Endpoint.configure(containerHandler)
//TODO : Add checks for validity of config file (eg : Accurate JSON fields) // TODO : Add checks for validity of config file (eg : Accurate JSON fields)
if len(configInJSON.MetricsConfig) == 0 { if len(configInJSON.MetricsConfig) == 0 {
return nil, fmt.Errorf("No metrics provided in config") return nil, fmt.Errorf("No metrics provided in config")
@ -109,7 +109,7 @@ func NewCollector(collectorName string, configFile []byte, metricCountLimit int,
}, nil }, nil
} }
//Returns name of the collector // Returns name of the collector
func (collector *GenericCollector) Name() string { func (collector *GenericCollector) Name() string {
return collector.name return collector.name
} }
@ -132,7 +132,7 @@ func (collector *GenericCollector) GetSpec() []v1.MetricSpec {
return specs return specs
} }
//Returns collected metrics and the next collection time of the collector // Returns collected metrics and the next collection time of the collector
func (collector *GenericCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) { func (collector *GenericCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) {
currentTime := time.Now() currentTime := time.Now()
nextCollectionTime := currentTime.Add(time.Duration(collector.info.minPollingFrequency)) nextCollectionTime := currentTime.Add(time.Duration(collector.info.minPollingFrequency))

View File

@ -39,7 +39,7 @@ func TestEmptyConfig(t *testing.T) {
} }
` `
//Create a temporary config file 'temp.json' with invalid json format // Create a temporary config file 'temp.json' with invalid json format
assert.NoError(ioutil.WriteFile("temp.json", []byte(emptyConfig), 0777)) assert.NoError(ioutil.WriteFile("temp.json", []byte(emptyConfig), 0777))
configFile, err := ioutil.ReadFile("temp.json") configFile, err := ioutil.ReadFile("temp.json")
@ -55,7 +55,7 @@ func TestEmptyConfig(t *testing.T) {
func TestConfigWithErrors(t *testing.T) { func TestConfigWithErrors(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Syntax error: Missed '"' after activeConnections // Syntax error: Missed '"' after activeConnections
invalid := ` invalid := `
{ {
"endpoint" : "http://localhost:8000/nginx_status", "endpoint" : "http://localhost:8000/nginx_status",
@ -71,7 +71,7 @@ func TestConfigWithErrors(t *testing.T) {
} }
` `
//Create a temporary config file 'temp.json' with invalid json format // Create a temporary config file 'temp.json' with invalid json format
assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777)) assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777))
configFile, err := ioutil.ReadFile("temp.json") configFile, err := ioutil.ReadFile("temp.json")
assert.NoError(err) assert.NoError(err)
@ -86,7 +86,7 @@ func TestConfigWithErrors(t *testing.T) {
func TestConfigWithRegexErrors(t *testing.T) { func TestConfigWithRegexErrors(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Error: Missed operand for '+' in activeConnections regex // Error: Missed operand for '+' in activeConnections regex
invalid := ` invalid := `
{ {
"endpoint" : "host:port/nginx_status", "endpoint" : "host:port/nginx_status",
@ -109,7 +109,7 @@ func TestConfigWithRegexErrors(t *testing.T) {
} }
` `
//Create a temporary config file 'temp.json' // Create a temporary config file 'temp.json'
assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777)) assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777))
configFile, err := ioutil.ReadFile("temp.json") configFile, err := ioutil.ReadFile("temp.json")
@ -125,7 +125,7 @@ func TestConfigWithRegexErrors(t *testing.T) {
func TestConfig(t *testing.T) { func TestConfig(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Create an nginx collector using the config file 'sample_config.json' // Create an nginx collector using the config file 'sample_config.json'
configFile, err := ioutil.ReadFile("config/sample_config.json") configFile, err := ioutil.ReadFile("config/sample_config.json")
assert.NoError(err) assert.NoError(err)
@ -157,7 +157,7 @@ func TestEndpointConfig(t *testing.T) {
func TestMetricCollection(t *testing.T) { func TestMetricCollection(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Collect nginx metrics from a fake nginx endpoint // Collect nginx metrics from a fake nginx endpoint
configFile, err := ioutil.ReadFile("config/sample_config.json") configFile, err := ioutil.ReadFile("config/sample_config.json")
assert.NoError(err) assert.NoError(err)
@ -193,7 +193,7 @@ func TestMetricCollection(t *testing.T) {
func TestMetricCollectionLimit(t *testing.T) { func TestMetricCollectionLimit(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Collect nginx metrics from a fake nginx endpoint // Collect nginx metrics from a fake nginx endpoint
configFile, err := ioutil.ReadFile("config/sample_config.json") configFile, err := ioutil.ReadFile("config/sample_config.json")
assert.NoError(err) assert.NoError(err)

View File

@ -32,13 +32,13 @@ import (
) )
type PrometheusCollector struct { type PrometheusCollector struct {
//name of the collector // name of the collector
name string name string
//rate at which metrics are collected // rate at which metrics are collected
pollingFrequency time.Duration pollingFrequency time.Duration
//holds information extracted from the config file for a collector // holds information extracted from the config file for a collector
configFile Prometheus configFile Prometheus
// the metrics to gather (uses a map as a set) // the metrics to gather (uses a map as a set)
@ -52,7 +52,7 @@ type PrometheusCollector struct {
httpClient *http.Client httpClient *http.Client
} }
//Returns a new collector using the information extracted from the configfile // Returns a new collector using the information extracted from the configfile
func NewPrometheusCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*PrometheusCollector, error) { func NewPrometheusCollector(collectorName string, configFile []byte, metricCountLimit int, containerHandler container.ContainerHandler, httpClient *http.Client) (*PrometheusCollector, error) {
var configInJSON Prometheus var configInJSON Prometheus
err := json.Unmarshal(configFile, &configInJSON) err := json.Unmarshal(configFile, &configInJSON)
@ -87,7 +87,7 @@ func NewPrometheusCollector(collectorName string, configFile []byte, metricCount
return nil, fmt.Errorf("Too many metrics defined: %d limit %d", len(configInJSON.MetricsConfig), metricCountLimit) return nil, fmt.Errorf("Too many metrics defined: %d limit %d", len(configInJSON.MetricsConfig), metricCountLimit)
} }
//TODO : Add checks for validity of config file (eg : Accurate JSON fields) // TODO : Add checks for validity of config file (eg : Accurate JSON fields)
return &PrometheusCollector{ return &PrometheusCollector{
name: collectorName, name: collectorName,
pollingFrequency: minPollingFrequency, pollingFrequency: minPollingFrequency,
@ -98,7 +98,7 @@ func NewPrometheusCollector(collectorName string, configFile []byte, metricCount
}, nil }, nil
} }
//Returns name of the collector // Returns name of the collector
func (collector *PrometheusCollector) Name() string { func (collector *PrometheusCollector) Name() string {
return collector.name return collector.name
} }
@ -201,7 +201,7 @@ func prometheusLabelSetToCadvisorLabel(promLabels model.Metric) string {
return string(b.Bytes()) return string(b.Bytes())
} }
//Returns collected metrics and the next collection time of the collector // Returns collected metrics and the next collection time of the collector
func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) { func (collector *PrometheusCollector) Collect(metrics map[string][]v1.MetricVal) (time.Time, map[string][]v1.MetricVal, error) {
currentTime := time.Now() currentTime := time.Now()
nextCollectionTime := currentTime.Add(time.Duration(collector.pollingFrequency)) nextCollectionTime := currentTime.Add(time.Duration(collector.pollingFrequency))

View File

@ -31,7 +31,7 @@ import (
func TestPrometheus(t *testing.T) { func TestPrometheus(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Create a prometheus collector using the config file 'sample_config_prometheus.json' // Create a prometheus collector using the config file 'sample_config_prometheus.json'
configFile, err := ioutil.ReadFile("config/sample_config_prometheus.json") configFile, err := ioutil.ReadFile("config/sample_config_prometheus.json")
containerHandler := containertest.NewMockContainerHandler("mockContainer") containerHandler := containertest.NewMockContainerHandler("mockContainer")
collector, err := NewPrometheusCollector("Prometheus", configFile, 100, containerHandler, http.DefaultClient) collector, err := NewPrometheusCollector("Prometheus", configFile, 100, containerHandler, http.DefaultClient)
@ -130,7 +130,7 @@ func TestPrometheusEndpointConfig(t *testing.T) {
func TestPrometheusShortResponse(t *testing.T) { func TestPrometheusShortResponse(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Create a prometheus collector using the config file 'sample_config_prometheus.json' // Create a prometheus collector using the config file 'sample_config_prometheus.json'
configFile, err := ioutil.ReadFile("config/sample_config_prometheus.json") configFile, err := ioutil.ReadFile("config/sample_config_prometheus.json")
containerHandler := containertest.NewMockContainerHandler("mockContainer") containerHandler := containertest.NewMockContainerHandler("mockContainer")
collector, err := NewPrometheusCollector("Prometheus", configFile, 100, containerHandler, http.DefaultClient) collector, err := NewPrometheusCollector("Prometheus", configFile, 100, containerHandler, http.DefaultClient)
@ -153,7 +153,7 @@ func TestPrometheusShortResponse(t *testing.T) {
func TestPrometheusMetricCountLimit(t *testing.T) { func TestPrometheusMetricCountLimit(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Create a prometheus collector using the config file 'sample_config_prometheus.json' // Create a prometheus collector using the config file 'sample_config_prometheus.json'
configFile, err := ioutil.ReadFile("config/sample_config_prometheus.json") configFile, err := ioutil.ReadFile("config/sample_config_prometheus.json")
containerHandler := containertest.NewMockContainerHandler("mockContainer") containerHandler := containertest.NewMockContainerHandler("mockContainer")
collector, err := NewPrometheusCollector("Prometheus", configFile, 10, containerHandler, http.DefaultClient) collector, err := NewPrometheusCollector("Prometheus", configFile, 10, containerHandler, http.DefaultClient)
@ -182,7 +182,7 @@ func TestPrometheusMetricCountLimit(t *testing.T) {
func TestPrometheusFiltersMetrics(t *testing.T) { func TestPrometheusFiltersMetrics(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Create a prometheus collector using the config file 'sample_config_prometheus_filtered.json' // Create a prometheus collector using the config file 'sample_config_prometheus_filtered.json'
configFile, err := ioutil.ReadFile("config/sample_config_prometheus_filtered.json") configFile, err := ioutil.ReadFile("config/sample_config_prometheus_filtered.json")
containerHandler := containertest.NewMockContainerHandler("mockContainer") containerHandler := containertest.NewMockContainerHandler("mockContainer")
collector, err := NewPrometheusCollector("Prometheus", configFile, 100, containerHandler, http.DefaultClient) collector, err := NewPrometheusCollector("Prometheus", configFile, 100, containerHandler, http.DefaultClient)
@ -221,7 +221,7 @@ go_goroutines 16
func TestPrometheusFiltersMetricsCountLimit(t *testing.T) { func TestPrometheusFiltersMetricsCountLimit(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
//Create a prometheus collector using the config file 'sample_config_prometheus_filtered.json' // Create a prometheus collector using the config file 'sample_config_prometheus_filtered.json'
configFile, err := ioutil.ReadFile("config/sample_config_prometheus_filtered.json") configFile, err := ioutil.ReadFile("config/sample_config_prometheus_filtered.json")
containerHandler := containertest.NewMockContainerHandler("mockContainer") containerHandler := containertest.NewMockContainerHandler("mockContainer")
_, err = NewPrometheusCollector("Prometheus", configFile, 1, containerHandler, http.DefaultClient) _, err = NewPrometheusCollector("Prometheus", configFile, 1, containerHandler, http.DefaultClient)

View File

@ -84,7 +84,7 @@ func newRktContainerHandler(name string, rktClient rktapi.PublicAPIClient, rktPa
return nil, fmt.Errorf("this should be impossible!, new handler failing, but factory allowed, name = %s", name) return nil, fmt.Errorf("this should be impossible!, new handler failing, but factory allowed, name = %s", name)
} }
//rktnetes uses containerID: rkt://fff40827-b994-4e3a-8f88-6427c2c8a5ac:nginx // rktnetes uses containerID: rkt://fff40827-b994-4e3a-8f88-6427c2c8a5ac:nginx
if parsed.Container == "" { if parsed.Container == "" {
isPod = true isPod = true
aliases = append(aliases, "rkt://"+parsed.Pod) aliases = append(aliases, "rkt://"+parsed.Pod)

View File

@ -389,27 +389,27 @@ type NetworkStats struct {
} }
type TcpStat struct { type TcpStat struct {
//Count of TCP connections in state "Established" // Count of TCP connections in state "Established"
Established uint64 Established uint64
//Count of TCP connections in state "Syn_Sent" // Count of TCP connections in state "Syn_Sent"
SynSent uint64 SynSent uint64
//Count of TCP connections in state "Syn_Recv" // Count of TCP connections in state "Syn_Recv"
SynRecv uint64 SynRecv uint64
//Count of TCP connections in state "Fin_Wait1" // Count of TCP connections in state "Fin_Wait1"
FinWait1 uint64 FinWait1 uint64
//Count of TCP connections in state "Fin_Wait2" // Count of TCP connections in state "Fin_Wait2"
FinWait2 uint64 FinWait2 uint64
//Count of TCP connections in state "Time_Wait // Count of TCP connections in state "Time_Wait
TimeWait uint64 TimeWait uint64
//Count of TCP connections in state "Close" // Count of TCP connections in state "Close"
Close uint64 Close uint64
//Count of TCP connections in state "Close_Wait" // Count of TCP connections in state "Close_Wait"
CloseWait uint64 CloseWait uint64
//Count of TCP connections in state "Listen_Ack" // Count of TCP connections in state "Listen_Ack"
LastAck uint64 LastAck uint64
//Count of TCP connections in state "Listen" // Count of TCP connections in state "Listen"
Listen uint64 Listen uint64
//Count of TCP connections in state "Closing" // Count of TCP connections in state "Closing"
Closing uint64 Closing uint64
} }
@ -511,7 +511,7 @@ type ContainerStats struct {
// Task load stats // Task load stats
TaskStats LoadStats `json:"task_stats,omitempty"` TaskStats LoadStats `json:"task_stats,omitempty"`
//Custom metrics from all collectors // Custom metrics from all collectors
CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"` CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"`
} }

View File

@ -90,7 +90,7 @@ func runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bu
username := "root" username := "root"
password := "root" password := "root"
hostname := "localhost:8086" hostname := "localhost:8086"
//percentilesDuration := 10 * time.Minute // percentilesDuration := 10 * time.Minute
config := influxdb.Config{ config := influxdb.Config{
URL: url.URL{Scheme: "http", Host: hostname}, URL: url.URL{Scheme: "http", Host: hostname},
@ -108,7 +108,7 @@ func runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bu
} }
// Delete all data by the end of the call. // Delete all data by the end of the call.
//defer client.Query(influxdb.Query{Command: fmt.Sprintf("drop database \"%v\"", database)}) // defer client.Query(influxdb.Query{Command: fmt.Sprintf("drop database \"%v\"", database)})
driver, err := newStorage(machineName, driver, err := newStorage(machineName,
table, table,

View File

@ -64,7 +64,7 @@ func (self *redisStorage) defaultReadyToFlush() bool {
return time.Since(self.lastWrite) >= self.bufferDuration return time.Since(self.lastWrite) >= self.bufferDuration
} }
//We must add some default params (for example: MachineName,ContainerName...)because containerStats do not include them // We must add some default params (for example: MachineName,ContainerName...)because containerStats do not include them
func (self *redisStorage) containerStatsAndDefaultValues(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec { func (self *redisStorage) containerStatsAndDefaultValues(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec {
timestamp := stats.Timestamp.UnixNano() / 1E3 timestamp := stats.Timestamp.UnixNano() / 1E3
var containerName string var containerName string
@ -82,7 +82,7 @@ func (self *redisStorage) containerStatsAndDefaultValues(ref info.ContainerRefer
return detail return detail
} }
//Push the data into redis // Push the data into redis
func (self *redisStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { func (self *redisStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
if stats == nil { if stats == nil {
return nil return nil
@ -94,7 +94,7 @@ func (self *redisStorage) AddStats(ref info.ContainerReference, stats *info.Cont
defer self.lock.Unlock() defer self.lock.Unlock()
// Add some default params based on containerStats // Add some default params based on containerStats
detail := self.containerStatsAndDefaultValues(ref, stats) detail := self.containerStatsAndDefaultValues(ref, stats)
//To json // To json
b, _ := json.Marshal(detail) b, _ := json.Marshal(detail)
if self.readyToFlush() { if self.readyToFlush() {
seriesToFlush = b seriesToFlush = b
@ -102,7 +102,7 @@ func (self *redisStorage) AddStats(ref info.ContainerReference, stats *info.Cont
} }
}() }()
if len(seriesToFlush) > 0 { if len(seriesToFlush) > 0 {
//We use redis's "LPUSH" to push the data to the redis // We use redis's "LPUSH" to push the data to the redis
self.conn.Send("LPUSH", self.redisKey, seriesToFlush) self.conn.Send("LPUSH", self.redisKey, seriesToFlush)
} }
return nil return nil

View File

@ -104,7 +104,7 @@ func (self *statsdStorage) containerFsStatsToValues(
} }
} }
//Push the data into redis // Push the data into redis
func (self *statsdStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { func (self *statsdStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
if stats == nil { if stats == nil {
return nil return nil

View File

@ -34,7 +34,7 @@ func onAzure() bool {
return strings.Contains(string(data), MicrosoftCorporation) return strings.Contains(string(data), MicrosoftCorporation)
} }
//TODO: Implement method. // TODO: Implement method.
func getAzureInstanceType() info.InstanceType { func getAzureInstanceType() info.InstanceType {
return info.UnknownInstance return info.UnknownInstance
} }

View File

@ -97,7 +97,7 @@ func detectInstanceID(cloudProvider info.CloudProvider) info.InstanceID {
return info.UnNamedInstance return info.UnNamedInstance
} }
//TODO: Implement method. // TODO: Implement method.
func onBaremetal() bool { func onBaremetal() bool {
return false return false
} }

View File

@ -88,7 +88,7 @@ func (t *Tail) attemptOpen() error {
t.file, err = os.Open(t.filename) t.file, err = os.Open(t.filename)
if err == nil { if err == nil {
// TODO: not interested in old events? // TODO: not interested in old events?
//t.file.Seek(0, os.SEEK_END) // t.file.Seek(0, os.SEEK_END)
t.reader = bufio.NewReader(t.file) t.reader = bufio.NewReader(t.file)
return nil return nil
} }