diff --git a/cmd/agent.go b/cmd/agent.go index c0d142cb..ee4483ef 100644 --- a/cmd/agent.go +++ b/cmd/agent.go @@ -16,7 +16,7 @@ var agentCmd = &cobra.Command{ Short: "start the preflight agent", Long: `The agent will periodically gather data for the configured data gatherers and send it to a remote backend for evaluation`, - Run: agent.Run, + RunE: agent.Run, } var agentInfoCmd = &cobra.Command{ diff --git a/cmd/echo.go b/cmd/echo.go index 34b302e4..04477cac 100644 --- a/cmd/echo.go +++ b/cmd/echo.go @@ -11,7 +11,7 @@ var echoCmd = &cobra.Command{ Short: "starts an echo server to test the agent", Long: `The agent sends data to a server. This echo server can be used to act as the server part and echo the data received by the agent.`, - Run: echo.Echo, + RunE: echo.Echo, } func init() { diff --git a/deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml b/deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml index 3453475a..9453e98d 100644 --- a/deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml +++ b/deploy/charts/venafi-kubernetes-agent/templates/deployment.yaml @@ -55,6 +55,7 @@ spec: {{- end }} args: - "agent" + - "--log-format=json" - "-c" - "/etc/venafi/agent/config/{{ default "config.yaml" .Values.config.configmap.key }}" {{- if .Values.authentication.venafiConnection.enabled }} diff --git a/pkg/agent/config.go b/pkg/agent/config.go index 6f8186cc..7c2842ef 100644 --- a/pkg/agent/config.go +++ b/pkg/agent/config.go @@ -3,11 +3,11 @@ package agent import ( "fmt" "io" - "log" "net/url" "os" "time" + "github.com/go-logr/logr" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -20,6 +20,7 @@ import ( "github.com/jetstack/preflight/pkg/datagatherer/k8s" "github.com/jetstack/preflight/pkg/datagatherer/local" "github.com/jetstack/preflight/pkg/kubeconfig" + "github.com/jetstack/preflight/pkg/logs" "github.com/jetstack/preflight/pkg/version" ) @@ -164,9 +165,12 @@ type AgentCmdFlags struct { // Prometheus (--enable-metrics) enables the Prometheus metrics server. Prometheus bool + + LogOptions logs.LogOptions } func InitAgentCmdFlags(c *cobra.Command, cfg *AgentCmdFlags) { + logs.SetupFlags(c.Flags(), &cfg.LogOptions) c.PersistentFlags().StringVarP( &cfg.ConfigFilePath, "agent-config-file", @@ -346,32 +350,35 @@ type CombinedConfig struct { // The error returned may be a multierror.Error. Use multierror.Prefix(err, // "context:") rather than fmt.Errorf("context: %w", err) when wrapping the // error. -func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) (CombinedConfig, client.Client, error) { +func ValidateAndCombineConfig(log logr.Logger, cfg Config, flags AgentCmdFlags) (CombinedConfig, client.Client, error) { res := CombinedConfig{} var errs error { - var mode AuthMode + var ( + mode AuthMode + reason string + ) switch { case flags.VenafiCloudMode && flags.CredentialsPath != "": mode = VenafiCloudKeypair - log.Printf("Using the %s auth mode since --venafi-cloud and --credentials-path were specified.", mode) + reason = "--venafi-cloud and --credentials-path were specified." case flags.ClientID != "" && flags.PrivateKeyPath != "": mode = VenafiCloudKeypair - log.Printf("Using the %s auth mode since --client-id and --private-key-path were specified.", mode) + reason = "--client-id and --private-key-path were specified." case flags.ClientID != "": return CombinedConfig{}, nil, fmt.Errorf("if --client-id is specified, --private-key-path must also be specified") case flags.PrivateKeyPath != "": return CombinedConfig{}, nil, fmt.Errorf("--private-key-path is specified, --client-id must also be specified") case flags.VenConnName != "": mode = VenafiCloudVenafiConnection - log.Printf("Using the %s auth mode since --venafi-connection was specified.", mode) + reason = "--venafi-connection was specified." case flags.APIToken != "": mode = JetstackSecureAPIToken - log.Printf("Using the %s auth mode since --api-token was specified.", mode) + reason = "--api-token was specified." case !flags.VenafiCloudMode && flags.CredentialsPath != "": mode = JetstackSecureOAuth - log.Printf("Using the %s auth mode since --credentials-file was specified without --venafi-cloud.", mode) + reason = "--credentials-file was specified without --venafi-cloud." default: return CombinedConfig{}, nil, fmt.Errorf("no auth mode specified. You can use one of four auth modes:\n" + " - Use (--venafi-cloud with --credentials-file) or (--client-id with --private-key-path) to use the " + string(VenafiCloudKeypair) + " mode.\n" + @@ -379,6 +386,7 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) " - Use --credentials-file alone if you want to use the " + string(JetstackSecureOAuth) + " mode.\n" + " - Use --api-token if you want to use the " + string(JetstackSecureAPIToken) + " mode.\n") } + log.Info("Auth mode", "mode", mode, "reason", reason) res.AuthMode = mode } @@ -394,10 +402,10 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) case hasServerField && hasEndpointField: // The `server` field takes precedence over the deprecated // `endpoint` field. - log.Printf("The `server` and `endpoint` fields are both set in the config; using the `server` field.") + log.Info("The `server` and `endpoint` fields are both set in the config; using the `server` field.") server = cfg.Server case !hasServerField && hasEndpointField: - log.Printf("Using deprecated Endpoint configuration. User Server instead.") + log.Info("Using deprecated Endpoint configuration. User Server instead.") if cfg.Endpoint.Protocol == "" && cfg.Server == "" { cfg.Endpoint.Protocol = "http" } @@ -415,7 +423,10 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) errs = multierror.Append(errs, fmt.Errorf("server %q is not a valid URL", server)) } if res.AuthMode == VenafiCloudVenafiConnection && server != "" { - log.Printf("ignoring the server field specified in the config file. In %s mode, this field is not needed.", VenafiCloudVenafiConnection) + log.Info( + "ignoring the server field specified in the config file.", + "reason", fmt.Sprintf("In %s mode, this field is not needed.", VenafiCloudVenafiConnection), + ) server = "" } res.Server = server @@ -445,7 +456,10 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) // change this value with the new --venafi-connection flag, and this // field is simply ignored. if cfg.VenafiCloud != nil && cfg.VenafiCloud.UploadPath != "" { - log.Printf(`ignoring the venafi-cloud.upload_path field in the config file. In %s mode, this field is not needed.`, res.AuthMode) + log.Info( + "ignoring the venafi-cloud.upload_path field in the config file.", + "reason", fmt.Sprintf(`In %s mode, this field is not needed.`, res.AuthMode), + ) } uploadPath = "" } @@ -463,7 +477,10 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) // https://venafi.atlassian.net/browse/VC-35385 is done. { if cfg.VenafiCloud != nil && cfg.VenafiCloud.UploaderID != "" { - log.Printf(`ignoring the venafi-cloud.uploader_id field in the config file. This field is not needed in %s mode.`, res.AuthMode) + log.Info( + "ignoring the venafi-cloud.uploader_id field in the config file.", + "reason", fmt.Sprintf("This field is not needed in %s mode.", res.AuthMode), + ) } } @@ -515,13 +532,13 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) case flags.Period == 0 && cfg.Period == 0: errs = multierror.Append(errs, fmt.Errorf("period must be set using --period or -p, or using the 'period' field in the config file")) case flags.Period == 0 && cfg.Period > 0: - log.Printf("Using period from config %s", cfg.Period) + log.Info("Using period from config", "period", cfg.Period) period = cfg.Period case flags.Period > 0 && cfg.Period == 0: period = flags.Period case flags.Period > 0 && cfg.Period > 0: // The flag takes precedence. - log.Printf("Both the 'period' field and --period are set. Using the value provided with --period.") + log.Info("Both the 'period' field and --period are set. Using the value provided with --period.") period = flags.Period } res.Period = period @@ -582,7 +599,7 @@ func ValidateAndCombineConfig(log *log.Logger, cfg Config, flags AgentCmdFlags) // The error returned may be a multierror.Error. Use multierror.Prefix(err, // "context:") rather than fmt.Errorf("context: %w", err) when wrapping the // error. -func validateCredsAndCreateClient(log *log.Logger, flagCredentialsPath, flagClientID, flagPrivateKeyPath, flagAPIToken string, cfg CombinedConfig) (client.Client, error) { +func validateCredsAndCreateClient(log logr.Logger, flagCredentialsPath, flagClientID, flagPrivateKeyPath, flagAPIToken string, cfg CombinedConfig) (client.Client, error) { var errs error var preflightClient client.Client @@ -702,7 +719,7 @@ func ValidateDataGatherers(dataGatherers []DataGatherer) error { // The error returned may be a multierror.Error. Instead of adding context to // the error with fmt.Errorf("%w", err), use multierror.Prefix(err, "context"). -func createCredentialClient(log *log.Logger, credentials client.Credentials, cfg CombinedConfig, agentMetadata *api.AgentMetadata) (client.Client, error) { +func createCredentialClient(log logr.Logger, credentials client.Credentials, cfg CombinedConfig, agentMetadata *api.AgentMetadata) (client.Client, error) { switch creds := credentials.(type) { case *client.VenafiSvcAccountCredentials: // The uploader ID isn't actually used in the backend, let's use an @@ -713,7 +730,7 @@ func createCredentialClient(log *log.Logger, credentials client.Credentials, cfg if cfg.AuthMode == VenafiCloudKeypair { // We don't do this for the VenafiCloudVenafiConnection mode because // the upload_path field is ignored in that mode. - log.Println("Loading upload_path from \"venafi-cloud\" configuration.") + log.Info("Loading upload_path from \"venafi-cloud\" configuration.") uploadPath = cfg.UploadPath } return client.NewVenafiCloudClient(agentMetadata, creds, cfg.Server, uploaderID, uploadPath) diff --git a/pkg/agent/dummy_data_gatherer.go b/pkg/agent/dummy_data_gatherer.go index 73ef6a16..49f88b61 100644 --- a/pkg/agent/dummy_data_gatherer.go +++ b/pkg/agent/dummy_data_gatherer.go @@ -44,7 +44,7 @@ func (g *dummyDataGatherer) Delete() error { return nil } -func (c *dummyDataGatherer) Fetch() (interface{}, int, error) { +func (c *dummyDataGatherer) Fetch(_ context.Context) (interface{}, int, error) { var err error if c.attemptNumber < c.FailedAttempts { err = fmt.Errorf("First %d attempts will fail", c.FailedAttempts) diff --git a/pkg/agent/run.go b/pkg/agent/run.go index 6d0b474e..a6fcb848 100644 --- a/pkg/agent/run.go +++ b/pkg/agent/run.go @@ -18,6 +18,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/jetstack/preflight/api" @@ -26,6 +28,7 @@ import ( "github.com/jetstack/preflight/pkg/logs" "github.com/jetstack/preflight/pkg/version" + "net/http/pprof" _ "net/http/pprof" ) @@ -40,91 +43,98 @@ var Flags AgentCmdFlags const schemaVersion string = "v2.0.0" // Run starts the agent process -func Run(cmd *cobra.Command, args []string) { - logs.Log.Printf("Preflight agent version: %s (%s)", version.PreflightVersion, version.Commit) - ctx, cancel := context.WithCancel(context.Background()) +func Run(cmd *cobra.Command, args []string) (runErr error) { + log := Flags.LogOptions.Initialize() + + ctx, cancel := context.WithCancel(klog.NewContext(context.Background(), log)) defer cancel() + log.Info("starting", "version", version.PreflightVersion, "commit", version.Commit) + file, err := os.Open(Flags.ConfigFilePath) if err != nil { - logs.Log.Fatalf("Failed to load config file for agent from: %s", Flags.ConfigFilePath) + return fmt.Errorf("Failed to load config file for agent from %q: %s", Flags.ConfigFilePath, err) } defer file.Close() b, err := io.ReadAll(file) if err != nil { - logs.Log.Fatalf("Failed to read config file: %s", err) + return fmt.Errorf("Failed to read config file: %s", err) } cfg, err := ParseConfig(b) if err != nil { - logs.Log.Fatalf("Failed to parse config file: %s", err) + return fmt.Errorf("Failed to parse config file: %s", err) } - config, preflightClient, err := ValidateAndCombineConfig(logs.Log, cfg, Flags) + config, preflightClient, err := ValidateAndCombineConfig(log, cfg, Flags) if err != nil { - logs.Log.Fatalf("While evaluating configuration: %v", err) + return fmt.Errorf("Failed to evaluate configuration: %s", err) } + serverMux := http.NewServeMux() + if Flags.Profiling { - logs.Log.Printf("pprof profiling was enabled.\nRunning profiling on port :6060") - go func() { - err := http.ListenAndServe(":6060", nil) - if err != nil && !errors.Is(err, http.ErrServerClosed) { - logs.Log.Fatalf("failed to run pprof profiler: %s", err) - } - }() - } + log.Info("pprof profiling enabled") + serverMux.HandleFunc("/debug/pprof/", pprof.Index) + serverMux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + serverMux.HandleFunc("/debug/pprof/profile", pprof.Profile) + serverMux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + serverMux.HandleFunc("/debug/pprof/trace", pprof.Trace) - go func() { - server := http.NewServeMux() + } - if Flags.Prometheus { - logs.Log.Printf("Prometheus was enabled.\nRunning prometheus on port :8081") - prometheus.MustRegister(metricPayloadSize) - server.Handle("/metrics", promhttp.Handler()) - } + if Flags.Prometheus { + log.Info("Prometheus metrics enabled") + prometheus.MustRegister(metricPayloadSize) + serverMux.Handle("/metrics", promhttp.Handler()) + } - // Health check endpoint. Since we haven't figured a good way of knowning - // what "ready" means for the agent, we just return 200 OK inconditionally. - // The goal is to satisfy some Kubernetes distributions, like OpenShift, - // that require a liveness and health probe to be present for each pod. - server.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - server.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) + // Health check endpoint. Since we haven't figured a good way of knowning + // what "ready" means for the agent, we just return 200 OK inconditionally. + // The goal is to satisfy some Kubernetes distributions, like OpenShift, + // that require a liveness and health probe to be present for each pod. + serverMux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + serverMux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + serverPort := ":8081" + server := http.Server{Addr: serverPort, Handler: serverMux} + + group, gCTX := errgroup.WithContext(ctx) + defer func() { + cancel() + err = utilerrors.NewAggregate([]error{ + runErr, + group.Wait(), }) + }() - err := http.ListenAndServe(":8081", server) + group.Go(func() error { + err := server.ListenAndServe() if err != nil && !errors.Is(err, http.ErrServerClosed) { - logs.Log.Fatalf("failed to run the health check server: %s", err) + return fmt.Errorf("failed to HTTP server: %s", err) } - }() + return nil + }) + group.Go(func() error { + <-gCTX.Done() + ctx, cancel := context.WithTimeout(context.WithoutCancel(gCTX), time.Second*3) + defer cancel() + return server.Shutdown(ctx) + }) _, isVenConn := preflightClient.(*client.VenConnClient) if isVenConn { - go func() { - err := preflightClient.(manager.Runnable).Start(ctx) - if err != nil { - logs.Log.Fatalf("failed to start a controller-runtime component: %v", err) - } - - // The agent must stop if the controller-runtime component stops. - cancel() - }() + group.Go(func() error { + return preflightClient.(manager.Runnable).Start(gCTX) + }) } dataGatherers := map[string]datagatherer.DataGatherer{} - group, gctx := errgroup.WithContext(ctx) - - defer func() { - // TODO: replace Fatalf log calls with Errorf and return the error - cancel() - if err := group.Wait(); err != nil { - logs.Log.Fatalf("failed to wait for controller-runtime component to stop: %v", err) - } - }() // load datagatherer config and boot each one for _, dgConfig := range config.DataGatherers { @@ -134,16 +144,16 @@ func Run(cmd *cobra.Command, args []string) { logs.Log.Fatalf("running data gatherer %s of type %s as Local, data-path override present: %s", dgConfig.Name, dgConfig.Kind, dgConfig.DataPath) } - newDg, err := dgConfig.Config.NewDataGatherer(gctx) + newDg, err := dgConfig.Config.NewDataGatherer(gCTX) if err != nil { - logs.Log.Fatalf("failed to instantiate %q data gatherer %q: %v", kind, dgConfig.Name, err) + return fmt.Errorf("failed to instantiate %q data gatherer %q: %v", kind, dgConfig.Name, err) } - logs.Log.Printf("starting %q datagatherer", dgConfig.Name) + log.Info("starting datagatherer", "name", dgConfig.Name) // start the data gatherers and wait for the cache sync group.Go(func() error { - if err := newDg.Run(gctx.Done()); err != nil { + if err := newDg.Run(gCTX.Done()); err != nil { return fmt.Errorf("failed to start %q data gatherer %q: %v", kind, dgConfig.Name, err) } return nil @@ -163,7 +173,7 @@ func Run(cmd *cobra.Command, args []string) { // seconds to perform an initial sync. It may fail, and that's fine // too, it will backoff and retry of its own accord. Initial boot // will only be delayed by a max of 5 seconds. - bootCtx, bootCancel := context.WithTimeout(gctx, 5*time.Second) + bootCtx, bootCancel := context.WithTimeout(gCTX, 5*time.Second) defer bootCancel() for _, dgConfig := range config.DataGatherers { dg := dataGatherers[dgConfig.Name] @@ -172,7 +182,7 @@ func Run(cmd *cobra.Command, args []string) { // the run. if err := dg.WaitForCacheSync(bootCtx.Done()); err != nil { // log sync failure, this might recover in future - logs.Log.Printf("failed to complete initial sync of %q data gatherer %q: %v", dgConfig.Kind, dgConfig.Name, err) + log.Error(err, "failed to complete initial sync of data gatherer", "kind", dgConfig.Kind, "name", dgConfig.Name) } } @@ -180,7 +190,9 @@ func Run(cmd *cobra.Command, args []string) { // configured output using data in datagatherer caches or refreshing from // APIs each cycle depending on datagatherer implementation for { - gatherAndOutputData(config, preflightClient, dataGatherers) + if err := gatherAndOutputData(gCTX, config, preflightClient, dataGatherers); err != nil { + return err + } if config.OneShot { break @@ -188,69 +200,74 @@ func Run(cmd *cobra.Command, args []string) { time.Sleep(config.Period) } + return nil } -func gatherAndOutputData(config CombinedConfig, preflightClient client.Client, dataGatherers map[string]datagatherer.DataGatherer) { +func gatherAndOutputData(ctx context.Context, config CombinedConfig, preflightClient client.Client, dataGatherers map[string]datagatherer.DataGatherer) error { + log := klog.FromContext(ctx) var readings []*api.DataReading if config.InputPath != "" { - logs.Log.Printf("Reading data from local file: %s", config.InputPath) + log.Info("Reading data from local file", "path", config.InputPath) data, err := os.ReadFile(config.InputPath) if err != nil { - logs.Log.Fatalf("failed to read local data file: %s", err) + fmt.Errorf("failed to read local data file: %s", err) } err = json.Unmarshal(data, &readings) if err != nil { - logs.Log.Fatalf("failed to unmarshal local data file: %s", err) + return fmt.Errorf("failed to unmarshal local data file: %s", err) } } else { - readings = gatherData(config, dataGatherers) + + if readings, err := gatherData(ctx, config, dataGatherers); err != nil { + return err + } else { + readings = readings + } } if config.OutputPath != "" { data, err := json.MarshalIndent(readings, "", " ") if err != nil { - logs.Log.Fatal("failed to marshal JSON") + return fmt.Errorf("failed to marshal JSON") } err = os.WriteFile(config.OutputPath, data, 0644) if err != nil { - logs.Log.Fatalf("failed to output to local file: %s", err) + return fmt.Errorf("failed to output to local file: %s", err) } - logs.Log.Printf("Data saved to local file: %s", config.OutputPath) + log.Info("Data saved to local file", "path", config.OutputPath) } else { backOff := backoff.NewExponentialBackOff() backOff.InitialInterval = 30 * time.Second backOff.MaxInterval = 3 * time.Minute backOff.MaxElapsedTime = config.BackoffMaxTime post := func() error { - return postData(config, preflightClient, readings) + return postData(ctx, config, preflightClient, readings) } err := backoff.RetryNotify(post, backOff, func(err error, t time.Duration) { - logs.Log.Printf("retrying in %v after error: %s", t, err) + log.Error(err, "retrying", "in", t) }) if err != nil { - logs.Log.Fatalf("Exiting due to fatal error uploading: %v", err) + return fmt.Errorf("Exiting due to fatal error uploading: %v", err) } } + return nil } -func gatherData(config CombinedConfig, dataGatherers map[string]datagatherer.DataGatherer) []*api.DataReading { +func gatherData(ctx context.Context, config CombinedConfig, dataGatherers map[string]datagatherer.DataGatherer) ([]*api.DataReading, error) { + log := klog.FromContext(ctx).WithName("gather-data") var readings []*api.DataReading var dgError *multierror.Error for k, dg := range dataGatherers { - dgData, count, err := dg.Fetch() + dgData, count, err := dg.Fetch(ctx) if err != nil { dgError = multierror.Append(dgError, fmt.Errorf("error in datagatherer %s: %w", k, err)) continue } - if count >= 0 { - logs.Log.Printf("successfully gathered %d items from %q datagatherer", count, k) - } else { - logs.Log.Printf("successfully gathered data from %q datagatherer", k) - } + log.Info("success", "count", count, "name", k) readings = append(readings, &api.DataReading{ ClusterID: config.ClusterID, DataGatherer: k, @@ -273,16 +290,18 @@ func gatherData(config CombinedConfig, dataGatherers map[string]datagatherer.Dat } if config.StrictMode && dgError.ErrorOrNil() != nil { - logs.Log.Fatalf("halting datagathering in strict mode due to error: %s", dgError.ErrorOrNil()) + return nil, fmt.Errorf("halting datagathering in strict mode due to error: %s", dgError.ErrorOrNil()) } - return readings + return readings, nil } -func postData(config CombinedConfig, preflightClient client.Client, readings []*api.DataReading) error { +func postData(ctx context.Context, config CombinedConfig, preflightClient client.Client, readings []*api.DataReading) error { + log := klog.FromContext(ctx) + baseURL := config.Server - logs.Log.Println("Posting data to:", baseURL) + log.Info("Posting data", "URL", baseURL) if config.AuthMode == VenafiCloudKeypair || config.AuthMode == VenafiCloudVenafiConnection { // orgID and clusterID are not required for Venafi Cloud auth @@ -293,7 +312,7 @@ func postData(config CombinedConfig, preflightClient client.Client, readings []* if err != nil { return fmt.Errorf("post to server failed: %+v", err) } - logs.Log.Println("Data sent successfully.") + log.Info("Data sent successfully") return nil } @@ -301,7 +320,7 @@ func postData(config CombinedConfig, preflightClient client.Client, readings []* if config.OrganizationID == "" { data, err := json.Marshal(readings) if err != nil { - logs.Log.Fatalf("Cannot marshal readings: %+v", err) + return fmt.Errorf("Cannot marshal readings: %+v", err) } // log and collect metrics about the upload size @@ -309,7 +328,7 @@ func postData(config CombinedConfig, preflightClient client.Client, readings []* prometheus.Labels{"organization": config.OrganizationID, "cluster": config.ClusterID}, ) metric.Set(float64(len(data))) - logs.Log.Printf("Data readings upload size: %d", len(data)) + log.Info("Data readings upload size: %d", len(data)) path := config.EndpointPath if path == "" { path = "/api/v1/datareadings" @@ -329,7 +348,7 @@ func postData(config CombinedConfig, preflightClient client.Client, readings []* return fmt.Errorf("received response with status code %d. Body: [%s]", code, errorContent) } - logs.Log.Println("Data sent successfully.") + log.Info("Data sent successfully") return err } @@ -341,7 +360,7 @@ func postData(config CombinedConfig, preflightClient client.Client, readings []* if err != nil { return fmt.Errorf("post to server failed: %+v", err) } - logs.Log.Println("Data sent successfully.") + log.Info("Data sent successfully") return nil } diff --git a/pkg/datagatherer/datagatherer.go b/pkg/datagatherer/datagatherer.go index ec5b8b67..cb31daf8 100644 --- a/pkg/datagatherer/datagatherer.go +++ b/pkg/datagatherer/datagatherer.go @@ -14,7 +14,7 @@ type DataGatherer interface { // Fetch retrieves data. // count is the number of items that were discovered. A negative count means the number // of items was indeterminate. - Fetch() (data interface{}, count int, err error) + Fetch(ctx context.Context) (data interface{}, count int, err error) // Run starts the data gatherer's informers for resource collection. // Returns error if the data gatherer informer wasn't initialized Run(stopCh <-chan struct{}) error diff --git a/pkg/datagatherer/k8s/discovery.go b/pkg/datagatherer/k8s/discovery.go index 49ac5324..9ee979b5 100644 --- a/pkg/datagatherer/k8s/discovery.go +++ b/pkg/datagatherer/k8s/discovery.go @@ -63,7 +63,7 @@ func (g *DataGathererDiscovery) Delete() error { } // Fetch will fetch discovery data from the apiserver, or return an error -func (g *DataGathererDiscovery) Fetch() (interface{}, int, error) { +func (g *DataGathererDiscovery) Fetch(_ context.Context) (interface{}, int, error) { data, err := g.cl.ServerVersion() if err != nil { return nil, -1, fmt.Errorf("failed to get server version: %v", err) diff --git a/pkg/datagatherer/k8s/dynamic.go b/pkg/datagatherer/k8s/dynamic.go index d24b0629..96b3d4b5 100644 --- a/pkg/datagatherer/k8s/dynamic.go +++ b/pkg/datagatherer/k8s/dynamic.go @@ -305,7 +305,7 @@ func (g *DataGathererDynamic) Delete() error { // Fetch will fetch the requested data from the apiserver, or return an error // if fetching the data fails. -func (g *DataGathererDynamic) Fetch() (interface{}, int, error) { +func (g *DataGathererDynamic) Fetch(_ context.Context) (interface{}, int, error) { if g.groupVersionResource.String() == "" { return nil, -1, fmt.Errorf("resource type must be specified") } diff --git a/pkg/datagatherer/local/local.go b/pkg/datagatherer/local/local.go index c8957efc..f4f20374 100644 --- a/pkg/datagatherer/local/local.go +++ b/pkg/datagatherer/local/local.go @@ -54,7 +54,7 @@ func (g *DataGatherer) WaitForCacheSync(stopCh <-chan struct{}) error { } // Fetch loads and returns the data from the LocalDatagatherer's dataPath -func (g *DataGatherer) Fetch() (interface{}, int, error) { +func (g *DataGatherer) Fetch(_ context.Context) (interface{}, int, error) { dataBytes, err := os.ReadFile(g.dataPath) if err != nil { return nil, -1, err diff --git a/pkg/echo/echo.go b/pkg/echo/echo.go index 62fa5f7a..c66d38ee 100644 --- a/pkg/echo/echo.go +++ b/pkg/echo/echo.go @@ -2,6 +2,7 @@ package echo import ( "encoding/json" + "errors" "fmt" "net/http" @@ -9,20 +10,20 @@ import ( "github.com/spf13/cobra" "github.com/jetstack/preflight/api" - "github.com/jetstack/preflight/pkg/logs" ) var EchoListen string var Compact bool -func Echo(cmd *cobra.Command, args []string) { +func Echo(cmd *cobra.Command, args []string) error { http.HandleFunc("/", echoHandler) fmt.Println("Listening to requests at ", EchoListen) err := http.ListenAndServe(EchoListen, nil) - if err != nil { - logs.Log.Fatal(err) + if err != nil && !errors.Is(err, http.ErrServerClosed) { + return err } + return nil } func echoHandler(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/logs/logs.go b/pkg/logs/logs.go index 042281ae..113eb8ea 100644 --- a/pkg/logs/logs.go +++ b/pkg/logs/logs.go @@ -1,8 +1,90 @@ package logs import ( + "errors" "log" + "log/slog" "os" + + "github.com/go-logr/logr" + "github.com/spf13/pflag" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" +) + +var Log *log.Logger + +func init() { + Log = slog.NewLogLogger(slog.Default().Handler(), slog.LevelDebug) +} + +type LogOptions struct { + Format logFormat + Level int +} + +const ( + LogFormatText logFormat = "text" + LogFormatJSON logFormat = "json" ) -var Log = log.New(os.Stderr, "", log.LstdFlags) +type logFormat string + +// String is used both by fmt.Print and by Cobra in help text +func (e *logFormat) String() string { + if len(*e) == 0 { + return string(LogFormatText) + } + return string(*e) +} + +// Set must have pointer receiver to avoid changing the value of a copy +func (e *logFormat) Set(v string) error { + switch v { + case "text", "json": + *e = logFormat(v) + return nil + default: + return errors.New(`must be one of "text" or "json"`) + } +} + +// Type is only used in help text +func (e *logFormat) Type() string { + return "string" +} + +func SetupFlags(fs *pflag.FlagSet, logOptions *LogOptions) { + var nfs cliflag.NamedFlagSets + + lfs := nfs.FlagSet("Logging") + lfs.Var(&logOptions.Format, + "log-format", + "Log format (text or json)") + + lfs.IntVarP(&logOptions.Level, + "log-level", "v", 1, + "Log level (1-5).") + + for _, f := range nfs.FlagSets { + fs.AddFlagSet(f) + } +} + +func (o *LogOptions) Initialize() logr.Logger { + opts := &slog.HandlerOptions{ + // To avoid a breaking change in application configuration, + // we negate the (configured) logr verbosity level to get the corresponding slog level + Level: slog.Level(-o.Level), + } + var handler slog.Handler = slog.NewTextHandler(os.Stdout, opts) + if o.Format == LogFormatJSON { + handler = slog.NewJSONHandler(os.Stdout, opts) + } + + slog.SetDefault(slog.New(handler)) + + log := logr.FromSlogHandler(handler) + klog.SetLogger(log) + return log +}