From 79a949b55e3c18b837375738a72600166b4676b6 Mon Sep 17 00:00:00 2001 From: Christoph Kluge Date: Mon, 23 Jan 2023 18:48:06 +0100 Subject: [PATCH] Rework pkg/log, add 'loglevel' and 'logdate' flags, streamline - removes some previously added manual location strings: now handled by pkg/log depending on loglevel - kept manual string locations on fmt print functions - add 'notice' and 'critical' loglevels - add 'Panic' and 'Panicf' functions to log panics - adresses issue #26 --- cmd/cc-backend/main.go | 52 ++++--- internal/api/rest.go | 120 +++++++-------- internal/auth/auth.go | 18 +-- internal/auth/jwt.go | 18 +-- internal/auth/ldap.go | 12 +- internal/auth/users.go | 2 +- internal/config/config.go | 8 +- internal/graph/schema.resolvers.go | 5 +- internal/metricdata/influxdb-v2.go | 10 +- internal/metricdata/metricdata.go | 4 +- internal/metricdata/prometheus.go | 22 +-- internal/repository/dbConnection.go | 6 +- internal/repository/init.go | 14 +- internal/repository/job.go | 18 +-- internal/repository/query.go | 4 +- internal/repository/user.go | 4 +- internal/routerConfig/routes.go | 8 +- internal/runtimeEnv/setup.go | 2 +- pkg/archive/fsBackend.go | 22 +-- pkg/archive/nodelist.go | 2 +- pkg/log/log.go | 224 +++++++++++++++++++++++----- pkg/schema/validate.go | 2 +- 22 files changed, 368 insertions(+), 209 deletions(-) diff --git a/cmd/cc-backend/main.go b/cmd/cc-backend/main.go index 9d69748..a314990 100644 --- a/cmd/cc-backend/main.go +++ b/cmd/cc-backend/main.go @@ -62,19 +62,21 @@ var ( ) func main() { - var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagDev, flagVersion bool - var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob string + var flagReinitDB, flagServer, flagSyncLDAP, flagGops, flagDev, flagVersion, flagLogDateTime bool + var flagNewUser, flagDelUser, flagGenJWT, flagConfigFile, flagImportJob, flagLogLevel string flag.BoolVar(&flagReinitDB, "init-db", false, "Go through job-archive and re-initialize the 'job', 'tag', and 'jobtag' tables (all running jobs will be lost!)") flag.BoolVar(&flagSyncLDAP, "sync-ldap", false, "Sync the 'user' table with ldap") flag.BoolVar(&flagServer, "server", false, "Start a server, continues listening on port after initialization and argument handling") flag.BoolVar(&flagGops, "gops", false, "Listen via github.com/google/gops/agent (for debugging)") flag.BoolVar(&flagDev, "dev", false, "Enable development components: GraphQL Playground and Swagger UI") flag.BoolVar(&flagVersion, "version", false, "Show version information and exit") + flag.BoolVar(&flagLogDateTime, "logdate", false, "Set true to add date and time to log messages (Default: false)") flag.StringVar(&flagConfigFile, "config", "./config.json", "Specify alternative path to `config.json`") flag.StringVar(&flagNewUser, "add-user", "", "Add a new user. Argument format: `:[admin,support,api,user]:`") flag.StringVar(&flagDelUser, "del-user", "", "Remove user by `username`") flag.StringVar(&flagGenJWT, "jwt", "", "Generate and print a JWT for the user specified by its `username`") flag.StringVar(&flagImportJob, "import-job", "", "Import a job. Argument format: `:,...`") + flag.StringVar(&flagLogLevel, "loglevel", "debug", "Sets the logging level: `[debug (default),info,notice,warn,err,fatal,crit]`") flag.Parse() if flagVersion { @@ -85,15 +87,19 @@ func main() { os.Exit(0) } + // Apply config flags for pkg/log + log.SetLogLevel(flagLogLevel) + log.SetLogDateTime(flagLogDateTime) + // See https://github.com/google/gops (Runtime overhead is almost zero) if flagGops { if err := agent.Listen(agent.Options{}); err != nil { - log.Fatalf("MAIN > gops/agent.Listen failed: %s", err.Error()) + log.Fatalf("gops/agent.Listen failed: %s", err.Error()) } } if err := runtimeEnv.LoadEnv("./.env"); err != nil && !os.IsNotExist(err) { - log.Fatalf("MAIN > parsing './.env' file failed: %s", err.Error()) + log.Fatalf("parsing './.env' file failed: %s", err.Error()) } // Initialize sub-modules and handle command line flags. @@ -118,7 +124,7 @@ func main() { "ldap": config.Keys.LdapConfig, "jwt": config.Keys.JwtConfig, }); err != nil { - log.Fatalf("MAIN > auth initialization failed: %v", err) + log.Fatalf("auth initialization failed: %v", err) } if d, err := time.ParseDuration(config.Keys.SessionMaxAge); err != nil { @@ -128,70 +134,70 @@ func main() { if flagNewUser != "" { parts := strings.SplitN(flagNewUser, ":", 3) if len(parts) != 3 || len(parts[0]) == 0 { - log.Fatal("MAIN > invalid argument format for user creation") + log.Fatal("invalid argument format for user creation") } if err := authentication.AddUser(&auth.User{ Username: parts[0], Password: parts[2], Roles: strings.Split(parts[1], ","), }); err != nil { - log.Fatalf("MAIN > adding '%s' user authentication failed: %v", parts[0], err) + log.Fatalf("adding '%s' user authentication failed: %v", parts[0], err) } } if flagDelUser != "" { if err := authentication.DelUser(flagDelUser); err != nil { - log.Fatalf("MAIN > deleting user failed: %v", err) + log.Fatalf("deleting user failed: %v", err) } } if flagSyncLDAP { if authentication.LdapAuth == nil { - log.Fatal("MAIN > cannot sync: LDAP authentication is not configured") + log.Fatal("cannot sync: LDAP authentication is not configured") } if err := authentication.LdapAuth.Sync(); err != nil { - log.Fatalf("MAIN > LDAP sync failed: %v", err) + log.Fatalf("LDAP sync failed: %v", err) } - log.Info("MAIN > LDAP sync successfull") + log.Info("LDAP sync successfull") } if flagGenJWT != "" { user, err := authentication.GetUser(flagGenJWT) if err != nil { - log.Fatalf("MAIN > could not get user from JWT: %v", err) + log.Fatalf("could not get user from JWT: %v", err) } if !user.HasRole(auth.RoleApi) { - log.Warnf("MAIN > user '%s' does not have the API role", user.Username) + log.Warnf("user '%s' does not have the API role", user.Username) } jwt, err := authentication.JwtAuth.ProvideJWT(user) if err != nil { - log.Fatalf("MAIN > failed to provide JWT to user '%s': %v", user.Username, err) + log.Fatalf("failed to provide JWT to user '%s': %v", user.Username, err) } fmt.Printf("MAIN > JWT for '%s': %s\n", user.Username, jwt) } } else if flagNewUser != "" || flagDelUser != "" { - log.Fatal("MAIN > arguments --add-user and --del-user can only be used if authentication is enabled") + log.Fatal("arguments --add-user and --del-user can only be used if authentication is enabled") } if err := archive.Init(config.Keys.Archive, config.Keys.DisableArchive); err != nil { - log.Fatalf("MAIN > failed to initialize archive: %s", err.Error()) + log.Fatalf("failed to initialize archive: %s", err.Error()) } if err := metricdata.Init(config.Keys.DisableArchive); err != nil { - log.Fatalf("MAIN > failed to initialize metricdata repository: %s", err.Error()) + log.Fatalf("failed to initialize metricdata repository: %s", err.Error()) } if flagReinitDB { if err := repository.InitDB(); err != nil { - log.Fatal("MAIN > failed to re-initialize repository DB: %s", err.Error()) + log.Fatalf("failed to re-initialize repository DB: %s", err.Error()) } } if flagImportJob != "" { if err := repository.HandleImportFlag(flagImportJob); err != nil { - log.Fatalf("MAIN > job import failed: %s", err.Error()) + log.Fatalf("job import failed: %s", err.Error()) } } @@ -361,7 +367,7 @@ func main() { // Start http or https server listener, err := net.Listen("tcp", config.Keys.Addr) if err != nil { - log.Fatalf("MAIN > starting http listener failed: %v", err) + log.Fatalf("starting http listener failed: %v", err) } if !strings.HasSuffix(config.Keys.Addr, ":80") && config.Keys.RedirectHttpTo != "" { @@ -373,7 +379,7 @@ func main() { if config.Keys.HttpsCertFile != "" && config.Keys.HttpsKeyFile != "" { cert, err := tls.LoadX509KeyPair(config.Keys.HttpsCertFile, config.Keys.HttpsKeyFile) if err != nil { - log.Fatalf("MAIN > loading X509 keypair failed: %v", err) + log.Fatalf("loading X509 keypair failed: %v", err) } listener = tls.NewListener(listener, &tls.Config{ Certificates: []tls.Certificate{cert}, @@ -393,14 +399,14 @@ func main() { // be established first, then the user can be changed, and after that, // the actual http server can be started. if err := runtimeEnv.DropPrivileges(config.Keys.Group, config.Keys.User); err != nil { - log.Fatalf("MAIN > error while preparing server start: %s", err.Error()) + log.Fatalf("error while preparing server start: %s", err.Error()) } wg.Add(1) go func() { defer wg.Done() if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { - log.Fatalf("MAIN > starting server failed: %v", err) + log.Fatalf("starting server failed: %v", err) } }() diff --git a/internal/api/rest.go b/internal/api/rest.go index 004d183..75be29f 100644 --- a/internal/api/rest.go +++ b/internal/api/rest.go @@ -134,7 +134,7 @@ type ApiTag struct { type TagJobApiRequest []*ApiTag func handleError(err error, statusCode int, rw http.ResponseWriter) { - log.Warnf("API/REST > ERROR : %s", err.Error()) + log.Warnf("REST ERROR : %s", err.Error()) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(statusCode) json.NewEncoder(rw).Encode(ErrorResponse{ @@ -169,7 +169,7 @@ func decode(r io.Reader, val interface{}) error { // @router /jobs/ [get] func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } @@ -184,7 +184,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { for _, s := range vals { state := schema.JobState(s) if !state.Valid() { - http.Error(rw, "invalid query parameter value: state", http.StatusBadRequest) + http.Error(rw, "REST > invalid query parameter value: state", http.StatusBadRequest) return } filter.State = append(filter.State, state) @@ -194,7 +194,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { case "start-time": st := strings.Split(vals[0], "-") if len(st) != 2 { - http.Error(rw, "invalid query parameter value: startTime", http.StatusBadRequest) + http.Error(rw, "REST > invalid query parameter value: startTime", http.StatusBadRequest) return } from, err := strconv.ParseInt(st[0], 10, 64) @@ -226,7 +226,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { case "with-metadata": withMetadata = true default: - http.Error(rw, "invalid query parameter: "+key, http.StatusBadRequest) + http.Error(rw, "REST > invalid query parameter: "+key, http.StatusBadRequest) return } } @@ -271,7 +271,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { results = append(results, res) } - log.Debugf("API/REST > /api/jobs: %d jobs returned", len(results)) + log.Debugf("/api/jobs: %d jobs returned", len(results)) bw := bufio.NewWriter(rw) defer bw.Flush() if err := json.NewEncoder(bw).Encode(map[string]interface{}{ @@ -300,7 +300,7 @@ func (api *RestApi) getJobs(rw http.ResponseWriter, r *http.Request) { // @router /jobs/tag_job/{id} [post] func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } @@ -365,13 +365,13 @@ func (api *RestApi) tagJob(rw http.ResponseWriter, r *http.Request) { // @router /jobs/start_job/ [post] func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } req := schema.JobMeta{BaseJob: schema.JobDefaults} if err := decode(r.Body, &req); err != nil { - handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw) + handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) return } @@ -391,12 +391,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { // Check if combination of (job_id, cluster_id, start_time) already exists: jobs, err := api.JobRepository.FindAll(&req.JobID, &req.Cluster, nil) if err != nil && err != sql.ErrNoRows { - handleError(fmt.Errorf("API/REST > checking for duplicate failed: %w", err), http.StatusInternalServerError, rw) + handleError(fmt.Errorf("checking for duplicate failed: %w", err), http.StatusInternalServerError, rw) return } else if err == nil { for _, job := range jobs { if (req.StartTime - job.StartTimeUnix) < 86400 { - handleError(fmt.Errorf("API/REST > a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("a job with that jobId, cluster and startTime already exists: dbid: %d", job.ID), http.StatusUnprocessableEntity, rw) return } } @@ -404,7 +404,7 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { id, err := api.JobRepository.Start(&req) if err != nil { - handleError(fmt.Errorf("API/REST > insert into database failed: %w", err), http.StatusInternalServerError, rw) + handleError(fmt.Errorf("insert into database failed: %w", err), http.StatusInternalServerError, rw) return } // unlock here, adding Tags can be async @@ -413,12 +413,12 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { for _, tag := range req.Tags { if _, err := api.JobRepository.AddTagOrCreate(id, tag.Type, tag.Name); err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) - handleError(fmt.Errorf("API/REST > adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw) + handleError(fmt.Errorf("adding tag to new job %d failed: %w", id, err), http.StatusInternalServerError, rw) return } } - log.Printf("API/REST > new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) + log.Printf("new job (id: %d): cluster=%s, jobId=%d, user=%s, startTime=%d", id, req.Cluster, req.JobID, req.User, req.StartTime) rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusCreated) json.NewEncoder(rw).Encode(StartJobApiResponse{ @@ -446,14 +446,14 @@ func (api *RestApi) startJob(rw http.ResponseWriter, r *http.Request) { // @router /jobs/stop_job/{id} [post] func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } // Parse request body: Only StopTime and State req := StopJobApiRequest{} if err := decode(r.Body, &req); err != nil { - handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw) + handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) return } @@ -464,17 +464,17 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) { if ok { id, e := strconv.ParseInt(id, 10, 64) if e != nil { - handleError(fmt.Errorf("API/REST > integer expected in path for id: %w", e), http.StatusBadRequest, rw) + handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw) return } job, err = api.JobRepository.FindById(id) } else { - handleError(errors.New("API/REST > the parameter 'id' is required"), http.StatusBadRequest, rw) + handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) return } if err != nil { - handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) return } @@ -499,14 +499,14 @@ func (api *RestApi) stopJobById(rw http.ResponseWriter, r *http.Request) { // @router /jobs/stop_job/ [post] func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } // Parse request body req := StopJobApiRequest{} if err := decode(r.Body, &req); err != nil { - handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw) + handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) return } @@ -514,14 +514,14 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { var job *schema.Job var err error if req.JobId == nil { - handleError(errors.New("API/REST > the field 'jobId' is required"), http.StatusBadRequest, rw) + handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw) return } job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { - handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) return } @@ -545,7 +545,7 @@ func (api *RestApi) stopJobByRequest(rw http.ResponseWriter, r *http.Request) { // @router /jobs/delete_job/{id} [delete] func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } @@ -555,23 +555,23 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { if ok { id, e := strconv.ParseInt(id, 10, 64) if e != nil { - handleError(fmt.Errorf("API/REST > integer expected in path for id: %w", e), http.StatusBadRequest, rw) + handleError(fmt.Errorf("integer expected in path for id: %w", e), http.StatusBadRequest, rw) return } err = api.JobRepository.DeleteJobById(id) } else { - handleError(errors.New("API/REST > the parameter 'id' is required"), http.StatusBadRequest, rw) + handleError(errors.New("the parameter 'id' is required"), http.StatusBadRequest, rw) return } if err != nil { - handleError(fmt.Errorf("API/REST > deleting job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw) return } rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) json.NewEncoder(rw).Encode(DeleteJobApiResponse{ - Message: fmt.Sprintf("API/REST > Successfully deleted job %s", id), + Message: fmt.Sprintf("Successfully deleted job %s", id), }) } @@ -593,14 +593,14 @@ func (api *RestApi) deleteJobById(rw http.ResponseWriter, r *http.Request) { // @router /jobs/delete_job/ [delete] func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } // Parse request body req := DeleteJobApiRequest{} if err := decode(r.Body, &req); err != nil { - handleError(fmt.Errorf("API/REST > parsing request body failed: %w", err), http.StatusBadRequest, rw) + handleError(fmt.Errorf("parsing request body failed: %w", err), http.StatusBadRequest, rw) return } @@ -608,27 +608,27 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) var job *schema.Job var err error if req.JobId == nil { - handleError(errors.New("API/REST > the field 'jobId' is required"), http.StatusBadRequest, rw) + handleError(errors.New("the field 'jobId' is required"), http.StatusBadRequest, rw) return } job, err = api.JobRepository.Find(req.JobId, req.Cluster, req.StartTime) if err != nil { - handleError(fmt.Errorf("API/REST > finding job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("finding job failed: %w", err), http.StatusUnprocessableEntity, rw) return } err = api.JobRepository.DeleteJobById(job.ID) if err != nil { - handleError(fmt.Errorf("API/REST > deleting job failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("deleting job failed: %w", err), http.StatusUnprocessableEntity, rw) return } rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) json.NewEncoder(rw).Encode(DeleteJobApiResponse{ - Message: fmt.Sprintf("API/REST > Successfully deleted job %d", job.ID), + Message: fmt.Sprintf("Successfully deleted job %d", job.ID), }) } @@ -649,7 +649,7 @@ func (api *RestApi) deleteJobByRequest(rw http.ResponseWriter, r *http.Request) // @router /jobs/delete_job_before/{ts} [delete] func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { - handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) + handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) return } @@ -660,24 +660,24 @@ func (api *RestApi) deleteJobBefore(rw http.ResponseWriter, r *http.Request) { if ok { ts, e := strconv.ParseInt(id, 10, 64) if e != nil { - handleError(fmt.Errorf("API/REST > integer expected in path for ts: %w", e), http.StatusBadRequest, rw) + handleError(fmt.Errorf("integer expected in path for ts: %w", e), http.StatusBadRequest, rw) return } cnt, err = api.JobRepository.DeleteJobsBefore(ts) } else { - handleError(errors.New("API/REST > the parameter 'ts' is required"), http.StatusBadRequest, rw) + handleError(errors.New("the parameter 'ts' is required"), http.StatusBadRequest, rw) return } if err != nil { - handleError(fmt.Errorf("API/REST > deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw) + handleError(fmt.Errorf("deleting jobs failed: %w", err), http.StatusUnprocessableEntity, rw) return } rw.Header().Add("Content-Type", "application/json") rw.WriteHeader(http.StatusOK) json.NewEncoder(rw).Encode(DeleteJobApiResponse{ - Message: fmt.Sprintf("API/REST > Successfully deleted %d jobs", cnt), + Message: fmt.Sprintf("Successfully deleted %d jobs", cnt), }) } @@ -685,12 +685,12 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo // Sanity checks if job == nil || job.StartTime.Unix() >= req.StopTime || job.State != schema.JobStateRunning { - handleError(errors.New("API/REST > stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw) + handleError(errors.New("stopTime must be larger than startTime and only running jobs can be stopped"), http.StatusBadRequest, rw) return } if req.State != "" && !req.State.Valid() { - handleError(fmt.Errorf("API/REST > invalid job state: %#v", req.State), http.StatusBadRequest, rw) + handleError(fmt.Errorf("invalid job state: %#v", req.State), http.StatusBadRequest, rw) return } else if req.State == "" { req.State = schema.JobStateCompleted @@ -700,11 +700,11 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo job.Duration = int32(req.StopTime - job.StartTime.Unix()) job.State = req.State if err := api.JobRepository.Stop(job.ID, job.Duration, job.State, job.MonitoringStatus); err != nil { - handleError(fmt.Errorf("API/REST > marking job as stopped failed: %w", err), http.StatusInternalServerError, rw) + handleError(fmt.Errorf("marking job as stopped failed: %w", err), http.StatusInternalServerError, rw) return } - log.Printf("API/REST > archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) + log.Printf("archiving job... (dbid: %d): cluster=%s, jobId=%d, user=%s, startTime=%s", job.ID, job.Cluster, job.JobID, job.User, job.StartTime) // Send a response (with status OK). This means that erros that happen from here on forward // can *NOT* be communicated to the client. If reading from a MetricDataRepository or @@ -724,7 +724,7 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo // func (api *RestApi) importJob(rw http.ResponseWriter, r *http.Request) { // if user := auth.GetUser(r.Context()); user != nil && !user.HasRole(auth.RoleApi) { -// handleError(fmt.Errorf("API/REST > missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) +// handleError(fmt.Errorf("missing role: %#v", auth.RoleApi), http.StatusForbidden, rw) // return // } @@ -733,12 +733,12 @@ func (api *RestApi) checkAndHandleStopJob(rw http.ResponseWriter, job *schema.Jo // Data *schema.JobData `json:"data"` // } // if err := decode(r.Body, &body); err != nil { -// handleError(fmt.Errorf("API/REST > import failed: %s", err.Error()), http.StatusBadRequest, rw) +// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusBadRequest, rw) // return // } // if err := api.JobRepository.ImportJob(body.Meta, body.Data); err != nil { -// handleError(fmt.Errorf("API/REST > import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw) +// handleError(fmt.Errorf("import failed: %s", err.Error()), http.StatusUnprocessableEntity, rw) // return // } @@ -793,7 +793,7 @@ func (api *RestApi) getJWT(rw http.ResponseWriter, r *http.Request) { me := auth.GetUser(r.Context()) if !me.HasRole(auth.RoleAdmin) { if username != me.Username { - http.Error(rw, "API/REST > only admins are allowed to sign JWTs not for themselves", http.StatusForbidden) + http.Error(rw, "REST > only admins are allowed to sign JWTs not for themselves", http.StatusForbidden) return } } @@ -818,13 +818,13 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { rw.Header().Set("Content-Type", "text/plain") me := auth.GetUser(r.Context()) if !me.HasRole(auth.RoleAdmin) { - http.Error(rw, "API/REST > only admins are allowed to create new users", http.StatusForbidden) + http.Error(rw, "REST > only admins are allowed to create new users", http.StatusForbidden) return } username, password, role, name, email := r.FormValue("username"), r.FormValue("password"), r.FormValue("role"), r.FormValue("name"), r.FormValue("email") if len(password) == 0 && role != auth.RoleApi { - http.Error(rw, "API/REST > only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest) + http.Error(rw, "REST > only API users are allowed to have a blank password (login will be impossible)", http.StatusBadRequest) return } @@ -838,12 +838,12 @@ func (api *RestApi) createUser(rw http.ResponseWriter, r *http.Request) { return } - rw.Write([]byte(fmt.Sprintf("API/REST > User %#v successfully created!\n", username))) + rw.Write([]byte(fmt.Sprintf("User %#v successfully created!\n", username))) } func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { - http.Error(rw, "API/REST > only admins are allowed to delete a user", http.StatusForbidden) + http.Error(rw, "REST > only admins are allowed to delete a user", http.StatusForbidden) return } @@ -858,7 +858,7 @@ func (api *RestApi) deleteUser(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { - http.Error(rw, "API/REST > only admins are allowed to fetch a list of users", http.StatusForbidden) + http.Error(rw, "REST > only admins are allowed to fetch a list of users", http.StatusForbidden) return } @@ -873,7 +873,7 @@ func (api *RestApi) getUsers(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { if user := auth.GetUser(r.Context()); !user.HasRole(auth.RoleAdmin) { - http.Error(rw, "API/REST > only admins are allowed to update a user", http.StatusForbidden) + http.Error(rw, "REST > only admins are allowed to update a user", http.StatusForbidden) return } @@ -893,9 +893,9 @@ func (api *RestApi) updateUser(rw http.ResponseWriter, r *http.Request) { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) return } - rw.Write([]byte("Remove Role Success")) + rw.Write([]byte("REST > Remove Role Success")) } else { - http.Error(rw, "Not Add or Del?", http.StatusInternalServerError) + http.Error(rw, "REST > Not Add or Del?", http.StatusInternalServerError) } } @@ -903,7 +903,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request) rw.Header().Set("Content-Type", "text/plain") key, value := r.FormValue("key"), r.FormValue("value") - fmt.Printf("API/REST > KEY: %#v\nVALUE: %#v\n", key, value) + fmt.Printf("REST > KEY: %#v\nVALUE: %#v\n", key, value) if err := repository.GetUserCfgRepo().UpdateConfig(key, value, auth.GetUser(r.Context())); err != nil { http.Error(rw, err.Error(), http.StatusUnprocessableEntity) @@ -915,7 +915,7 @@ func (api *RestApi) updateConfiguration(rw http.ResponseWriter, r *http.Request) func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) { if api.MachineStateDir == "" { - http.Error(rw, "API/REST > machine state not enabled", http.StatusNotFound) + http.Error(rw, "REST > machine state not enabled", http.StatusNotFound) return } @@ -928,7 +928,7 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) { return } - filename := filepath.Join(dir, fmt.Sprintf("API/REST > %s.json", host)) + filename := filepath.Join(dir, fmt.Sprintf("%s.json", host)) f, err := os.Create(filename) if err != nil { http.Error(rw, err.Error(), http.StatusInternalServerError) @@ -946,12 +946,12 @@ func (api *RestApi) putMachineState(rw http.ResponseWriter, r *http.Request) { func (api *RestApi) getMachineState(rw http.ResponseWriter, r *http.Request) { if api.MachineStateDir == "" { - http.Error(rw, "API/REST > machine state not enabled", http.StatusNotFound) + http.Error(rw, "REST > machine state not enabled", http.StatusNotFound) return } vars := mux.Vars(r) - filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("API/REST > %s.json", vars["host"])) + filename := filepath.Join(api.MachineStateDir, vars["cluster"], fmt.Sprintf("%s.json", vars["host"])) // Sets the content-type and 'Last-Modified' Header and so on automatically http.ServeFile(rw, r, filename) diff --git a/internal/auth/auth.go b/internal/auth/auth.go index c119417..4d5fe5e 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -99,7 +99,7 @@ func Init(db *sqlx.DB, sessKey := os.Getenv("SESSION_KEY") if sessKey == "" { - log.Warn("AUTH/AUTH > environment variable 'SESSION_KEY' not set (will use non-persistent random key)") + log.Warn("environment variable 'SESSION_KEY' not set (will use non-persistent random key)") bytes := make([]byte, 32) if _, err := rand.Read(bytes); err != nil { return nil, err @@ -169,7 +169,7 @@ func (auth *Authentication) Login( user := (*User)(nil) if username != "" { if user, _ = auth.GetUser(username); err != nil { - // log.Warnf("AUTH/AUTH > login of unkown user %#v", username) + // log.Warnf("login of unkown user %#v", username) _ = err } } @@ -181,14 +181,14 @@ func (auth *Authentication) Login( user, err = authenticator.Login(user, rw, r) if err != nil { - log.Warnf("AUTH/AUTH > user '%s' login failed: %s", user.Username, err.Error()) + log.Warnf("user '%s' login failed: %s", user.Username, err.Error()) onfailure(rw, r, err) return } session, err := auth.sessionStore.New(r, "session") if err != nil { - log.Errorf("AUTH/AUTH > session creation failed: %s", err.Error()) + log.Errorf("session creation failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return } @@ -199,18 +199,18 @@ func (auth *Authentication) Login( session.Values["username"] = user.Username session.Values["roles"] = user.Roles if err := auth.sessionStore.Save(r, rw, session); err != nil { - log.Errorf("AUTH/AUTH > session save failed: %s", err.Error()) + log.Errorf("session save failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return } - log.Infof("AUTH/AUTH > login successfull: user: %#v (roles: %v)", user.Username, user.Roles) + log.Infof("login successfull: user: %#v (roles: %v)", user.Username, user.Roles) ctx := context.WithValue(r.Context(), ContextUserKey, user) onsuccess.ServeHTTP(rw, r.WithContext(ctx)) return } - log.Warn("AUTH/AUTH > login failed: no authenticator applied") + log.Warn("login failed: no authenticator applied") onfailure(rw, r, err) }) } @@ -226,7 +226,7 @@ func (auth *Authentication) Auth( for _, authenticator := range auth.authenticators { user, err := authenticator.Auth(rw, r) if err != nil { - log.Warnf("AUTH/AUTH > authentication failed: %s", err.Error()) + log.Warnf("authentication failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusUnauthorized) return } @@ -239,7 +239,7 @@ func (auth *Authentication) Auth( return } - log.Warnf("AUTH/AUTH > authentication failed: %s", "no authenticator applied") + log.Warnf("authentication failed: %s", "no authenticator applied") // http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) onfailure(rw, r, errors.New("unauthorized (login first or use a token)")) }) diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go index ed8e413..798023e 100644 --- a/internal/auth/jwt.go +++ b/internal/auth/jwt.go @@ -41,7 +41,7 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error { pubKey, privKey := os.Getenv("JWT_PUBLIC_KEY"), os.Getenv("JWT_PRIVATE_KEY") if pubKey == "" || privKey == "" { - log.Warn("AUTH/JWT > environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") + log.Warn("environment variables 'JWT_PUBLIC_KEY' or 'JWT_PRIVATE_KEY' not set (token based authentication will not work)") } else { bytes, err := base64.StdEncoding.DecodeString(pubKey) if err != nil { @@ -75,20 +75,20 @@ func (ja *JWTAuthenticator) Init(auth *Authentication, conf interface{}) error { // Warn if other necessary settings are not configured if ja.config != nil { if ja.config.CookieName == "" { - log.Warn("AUTH/JWT > cookieName for JWTs not configured (cross login via JWT cookie will fail)") + log.Warn("cookieName for JWTs not configured (cross login via JWT cookie will fail)") } if !ja.config.ForceJWTValidationViaDatabase { - log.Warn("AUTH/JWT > forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!") + log.Warn("forceJWTValidationViaDatabase not set to true: CC will accept users and roles defined in JWTs regardless of its own database!") } if ja.config.TrustedExternalIssuer == "" { - log.Warn("AUTH/JWT > trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") + log.Warn("trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") } } else { - log.Warn("AUTH/JWT > cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") + log.Warn("cookieName and trustedExternalIssuer for JWTs not configured (cross login via JWT cookie will fail)") } } else { ja.publicKeyCrossLogin = nil - log.Warn("AUTH/JWT > environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)") + log.Warn("environment variable 'CROSS_LOGIN_JWT_PUBLIC_KEY' not set (cross login token based authentication will not work)") } return nil @@ -243,7 +243,7 @@ func (ja *JWTAuthenticator) Auth( // Deny any logins for unknown usernames if err != nil { - log.Warn("AUTH/JWT > Could not find user from JWT in internal database.") + log.Warn("Could not find user from JWT in internal database.") return nil, errors.New("unknown user") } @@ -264,7 +264,7 @@ func (ja *JWTAuthenticator) Auth( // Create a session so that we no longer need the JTW Cookie session, err := ja.auth.sessionStore.New(r, "session") if err != nil { - log.Errorf("AUTH/JWT > session creation failed: %s", err.Error()) + log.Errorf("session creation failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return nil, err } @@ -276,7 +276,7 @@ func (ja *JWTAuthenticator) Auth( session.Values["roles"] = roles if err := ja.auth.sessionStore.Save(r, rw, session); err != nil { - log.Errorf("AUTH/JWT > session save failed: %s", err.Error()) + log.Errorf("session save failed: %s", err.Error()) http.Error(rw, err.Error(), http.StatusInternalServerError) return nil, err } diff --git a/internal/auth/ldap.go b/internal/auth/ldap.go index 6f974b5..1e0235c 100644 --- a/internal/auth/ldap.go +++ b/internal/auth/ldap.go @@ -33,7 +33,7 @@ func (la *LdapAuthenticator) Init( la.syncPassword = os.Getenv("LDAP_ADMIN_PASSWORD") if la.syncPassword == "" { - log.Warn("AUTH/LDAP > environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)") + log.Warn("environment variable 'LDAP_ADMIN_PASSWORD' not set (ldap sync will not work)") } if la.config != nil && la.config.SyncInterval != "" { @@ -49,11 +49,11 @@ func (la *LdapAuthenticator) Init( go func() { ticker := time.NewTicker(interval) for t := range ticker.C { - log.Printf("AUTH/LDAP > sync started at %s", t.Format(time.RFC3339)) + log.Printf("sync started at %s", t.Format(time.RFC3339)) if err := la.Sync(); err != nil { - log.Errorf("AUTH/LDAP > sync failed: %s", err.Error()) + log.Errorf("sync failed: %s", err.Error()) } - log.Print("AUTH/LDAP > sync done") + log.Print("sync done") } }() } @@ -147,13 +147,13 @@ func (la *LdapAuthenticator) Sync() error { for username, where := range users { if where == IN_DB && la.config.SyncDelOldUsers { - log.Debugf("AUTH/LDAP > sync: remove %#v (does not show up in LDAP anymore)", username) + log.Debugf("sync: remove %#v (does not show up in LDAP anymore)", username) if _, err := la.auth.db.Exec(`DELETE FROM user WHERE user.username = ?`, username); err != nil { return err } } else if where == IN_LDAP { name := newnames[username] - log.Debugf("AUTH/LDAP > sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name) + log.Debugf("sync: add %#v (name: %#v, roles: [user], ldap: true)", username, name) if _, err := la.auth.db.Exec(`INSERT INTO user (username, ldap, name, roles) VALUES (?, ?, ?, ?)`, username, 1, name, "[\""+RoleUser+"\"]"); err != nil { return err diff --git a/internal/auth/users.go b/internal/auth/users.go index 009fe35..f890c1b 100644 --- a/internal/auth/users.go +++ b/internal/auth/users.go @@ -67,7 +67,7 @@ func (auth *Authentication) AddUser(user *User) error { return err } - log.Infof("AUTH/USERS > new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource) + log.Infof("new user %#v created (roles: %s, auth-source: %d)", user.Username, rolesJson, user.AuthSource) return nil } diff --git a/internal/config/config.go b/internal/config/config.go index 700413a..b8afc9a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -49,20 +49,20 @@ func Init(flagConfigFile string) { raw, err := os.ReadFile(flagConfigFile) if err != nil { if !os.IsNotExist(err) { - log.Fatalf("CONFIG/CONFIG > ERROR: %v", err) + log.Fatalf("CONFIG ERROR: %v", err) } } else { if err := schema.Validate(schema.Config, bytes.NewReader(raw)); err != nil { - log.Fatalf("CONFIG/CONFIG > Validate config: %v\n", err) + log.Fatalf("Validate config: %v\n", err) } dec := json.NewDecoder(bytes.NewReader(raw)) dec.DisallowUnknownFields() if err := dec.Decode(&Keys); err != nil { - log.Fatalf("CONFIG/CONFIG > could not decode: %v", err) + log.Fatalf("could not decode: %v", err) } if Keys.Clusters == nil || len(Keys.Clusters) < 1 { - log.Fatal("CONFIG/CONFIG > At least one cluster required in config!") + log.Fatal("At least one cluster required in config!") } } } diff --git a/internal/graph/schema.resolvers.go b/internal/graph/schema.resolvers.go index 12d316c..5c927f1 100644 --- a/internal/graph/schema.resolvers.go +++ b/internal/graph/schema.resolvers.go @@ -17,6 +17,7 @@ import ( "github.com/ClusterCockpit/cc-backend/internal/repository" "github.com/ClusterCockpit/cc-backend/pkg/archive" "github.com/ClusterCockpit/cc-backend/pkg/schema" + "github.com/ClusterCockpit/cc-backend/pkg/log" ) // Partitions is the resolver for the partitions field. @@ -51,7 +52,7 @@ func (r *mutationResolver) CreateTag(ctx context.Context, typeArg string, name s // DeleteTag is the resolver for the deleteTag field. func (r *mutationResolver) DeleteTag(ctx context.Context, id string) (string, error) { - panic(fmt.Errorf("GRAPH/RESOLVERS > not implemented: DeleteTag - deleteTag")) + log.Panic(fmt.Errorf("not implemented: DeleteTag - deleteTag")) } // AddTagsToJob is the resolver for the addTagsToJob field. @@ -175,7 +176,7 @@ func (r *queryResolver) JobMetrics(ctx context.Context, id string, metrics []str for name, md := range data { for scope, metric := range md { if metric.Scope != schema.MetricScope(scope) { - panic("GRAPH/RESOLVERS > metric.Scope != schema.MetricScope(scope) : Should not happen!") + log.Panic("metric.Scope != schema.MetricScope(scope) : Should not happen!") } res = append(res, &model.JobMetricWithName{ diff --git a/internal/metricdata/influxdb-v2.go b/internal/metricdata/influxdb-v2.go index 51a7dbb..8c1e428 100644 --- a/internal/metricdata/influxdb-v2.go +++ b/internal/metricdata/influxdb-v2.go @@ -84,7 +84,7 @@ func (idb *InfluxDBv2DataRepository) LoadData( switch scope { case "node": // Get Finest Granularity, Groupy By Measurement and Hostname (== Metric / Node), Calculate Mean for 60s windows - // log.Info("METRICDATA/INFLUXV2 > Scope 'node' requested. ") + // log.Info("Scope 'node' requested. ") query = fmt.Sprintf(` from(bucket: "%s") |> range(start: %s, stop: %s) @@ -97,10 +97,10 @@ func (idb *InfluxDBv2DataRepository) LoadData( idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix+int64(job.Duration)+int64(1))), measurementsCond, hostsCond) case "socket": - log.Info("METRICDATA/INFLUXV2 > Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ") + log.Info("Scope 'socket' requested, but not yet supported: Will return 'node' scope only. ") continue case "core": - log.Info("METRICDATA/INFLUXV2 > Scope 'core' requested, but not yet supported: Will return 'node' scope only. ") + log.Info(" Scope 'core' requested, but not yet supported: Will return 'node' scope only. ") continue // Get Finest Granularity only, Set NULL to 0.0 // query = fmt.Sprintf(` @@ -114,7 +114,7 @@ func (idb *InfluxDBv2DataRepository) LoadData( // idb.formatTime(job.StartTime), idb.formatTime(idb.epochToTime(job.StartTimeUnix + int64(job.Duration) + int64(1) )), // measurementsCond, hostsCond) default: - log.Info("METRICDATA/INFLUXV2 > Unknown Scope requested: Will return 'node' scope. ") + log.Info("Unknown Scope requested: Will return 'node' scope. ") continue // return nil, errors.New("METRICDATA/INFLUXV2 > the InfluxDB metric data repository does not yet support other scopes than 'node'") } @@ -319,7 +319,7 @@ func (idb *InfluxDBv2DataRepository) LoadNodeData( ctx context.Context) (map[string]map[string][]*schema.JobMetric, error) { // TODO : Implement to be used in Analysis- und System/Node-View - log.Infof("METRICDATA/INFLUXV2 > LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes) + log.Infof("LoadNodeData unimplemented for InfluxDBv2DataRepository, Args: cluster %s, metrics %v, nodes %v, scopes %v", cluster, metrics, nodes, scopes) return nil, errors.New("METRICDATA/INFLUXV2 > unimplemented for InfluxDBv2DataRepository") } diff --git a/internal/metricdata/metricdata.go b/internal/metricdata/metricdata.go index 2b68f55..56bcdc6 100644 --- a/internal/metricdata/metricdata.go +++ b/internal/metricdata/metricdata.go @@ -107,7 +107,7 @@ func LoadData(job *schema.Job, jd, err = repo.LoadData(job, metrics, scopes, ctx) if err != nil { if len(jd) != 0 { - log.Errorf("METRICDATA/METRICDATA > partial error: %s", err.Error()) + log.Errorf("partial error: %s", err.Error()) } else { return err, 0, 0 } @@ -229,7 +229,7 @@ func LoadNodeData( data, err := repo.LoadNodeData(cluster, metrics, nodes, scopes, from, to, ctx) if err != nil { if len(data) != 0 { - log.Errorf("METRICDATA/METRICDATA > partial error: %s", err.Error()) + log.Errorf("partial error: %s", err.Error()) } else { return nil, err } diff --git a/internal/metricdata/prometheus.go b/internal/metricdata/prometheus.go index db2bc6f..7d51218 100644 --- a/internal/metricdata/prometheus.go +++ b/internal/metricdata/prometheus.go @@ -184,9 +184,9 @@ func (pdb *PrometheusDataRepository) Init(rawConfig json.RawMessage) error { for metric, templ := range config.Templates { pdb.templates[metric], err = template.New(metric).Parse(templ) if err == nil { - log.Debugf("METRICDATA/PROMETHEUS > Added PromQL template for %s: %s", metric, templ) + log.Debugf("Added PromQL template for %s: %s", metric, templ) } else { - log.Errorf("METRICDATA/PROMETHEUS > Failed to parse PromQL template %s for metric %s", templ, metric) + log.Errorf("Failed to parse PromQL template %s for metric %s", templ, metric) } } return nil @@ -216,7 +216,7 @@ func (pdb *PrometheusDataRepository) FormatQuery( return "", errors.New(fmt.Sprintf("METRICDATA/PROMETHEUS > Error compiling template %v", templ)) } else { query := buf.String() - log.Debugf("METRICDATA/PROMETHEUS > PromQL: %s", query) + log.Debugf("PromQL: %s", query) return query, nil } } else { @@ -283,14 +283,14 @@ func (pdb *PrometheusDataRepository) LoadData( for _, scope := range scopes { if scope != schema.MetricScopeNode { - logOnce.Do(func(){log.Infof("METRICDATA/PROMETHEUS > Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)}) + logOnce.Do(func(){log.Infof("Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)}) continue } for _, metric := range metrics { metricConfig := archive.GetMetricConfig(job.Cluster, metric) if metricConfig == nil { - log.Errorf("METRICDATA/PROMETHEUS > Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster) + log.Errorf("Error in LoadData: Metric %s for cluster %s not configured", metric, job.Cluster) return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error") } query, err := pdb.FormatQuery(metric, scope, nodes, job.Cluster) @@ -307,7 +307,7 @@ func (pdb *PrometheusDataRepository) LoadData( result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) if err != nil { - log.Errorf("METRICDATA/PROMETHEUS > Prometheus query error in LoadData: %v\nQuery: %s", err, query) + log.Errorf("Prometheus query error in LoadData: %v\nQuery: %s", err, query) return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus query error") } if len(warnings) > 0 { @@ -389,13 +389,13 @@ func (pdb *PrometheusDataRepository) LoadNodeData( } for _, scope := range scopes { if scope != schema.MetricScopeNode { - logOnce.Do(func(){log.Infof("METRICDATA/PROMETHEUS > Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)}) + logOnce.Do(func(){log.Infof("Note: Scope '%s' requested, but not yet supported: Will return 'node' scope only.", scope)}) continue } for _, metric := range metrics { metricConfig := archive.GetMetricConfig(cluster, metric) if metricConfig == nil { - log.Errorf("METRICDATA/PROMETHEUS > Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster) + log.Errorf("Error in LoadNodeData: Metric %s for cluster %s not configured", metric, cluster) return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error") } query, err := pdb.FormatQuery(metric, scope, nodes, cluster) @@ -412,11 +412,11 @@ func (pdb *PrometheusDataRepository) LoadNodeData( result, warnings, err := pdb.queryClient.QueryRange(ctx, query, r) if err != nil { - log.Errorf("METRICDATA/PROMETHEUS > Prometheus query error in LoadNodeData: %v\n", err) + log.Errorf("Prometheus query error in LoadNodeData: %v\n", err) return nil, errors.New("METRICDATA/PROMETHEUS > Prometheus querry error") } if len(warnings) > 0 { - log.Warnf("METRICDATA/PROMETHEUS > Warnings: %v\n", warnings) + log.Warnf("Warnings: %v\n", warnings) } step := int64(metricConfig.Timestep) @@ -442,6 +442,6 @@ func (pdb *PrometheusDataRepository) LoadNodeData( } } t1 := time.Since(t0) - log.Debugf("METRICDATA/PROMETHEUS > LoadNodeData of %v nodes took %s", len(data), t1) + log.Debugf("LoadNodeData of %v nodes took %s", len(data), t1) return data, nil } diff --git a/internal/repository/dbConnection.go b/internal/repository/dbConnection.go index 987bf20..86f0762 100644 --- a/internal/repository/dbConnection.go +++ b/internal/repository/dbConnection.go @@ -39,14 +39,14 @@ func Connect(driver string, db string) { } else if driver == "mysql" { dbHandle, err = sqlx.Open("mysql", fmt.Sprintf("%s?multiStatements=true", db)) if err != nil { - log.Fatalf("REPOSITORY/DBCONNECTION > sqlx.Open() error: %v", err) + log.Fatalf("sqlx.Open() error: %v", err) } dbHandle.SetConnMaxLifetime(time.Minute * 3) dbHandle.SetMaxOpenConns(10) dbHandle.SetMaxIdleConns(10) } else { - log.Fatalf("REPOSITORY/DBCONNECTION > unsupported database driver: %s", driver) + log.Fatalf("unsupported database driver: %s", driver) } dbConnInstance = &DBConnection{DB: dbHandle} @@ -55,7 +55,7 @@ func Connect(driver string, db string) { func GetConnection() *DBConnection { if dbConnInstance == nil { - log.Fatalf("REPOSITORY/DBCONNECTION > Database connection not initialized!") + log.Fatalf("Database connection not initialized!") } return dbConnInstance diff --git a/internal/repository/init.go b/internal/repository/init.go index 1358100..08b5565 100644 --- a/internal/repository/init.go +++ b/internal/repository/init.go @@ -186,7 +186,7 @@ func HandleImportFlag(flag string) error { } } - log.Infof("REPOSITORY/INIT > successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id) + log.Infof("successfully imported a new job (jobId: %d, cluster: %s, dbid: %d)", job.JobID, job.Cluster, id) } return nil } @@ -260,34 +260,34 @@ func InitDB() error { job.RawResources, err = json.Marshal(job.Resources) if err != nil { - log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err) + log.Errorf("repository initDB(): %v", err) errorOccured++ continue } job.RawMetaData, err = json.Marshal(job.MetaData) if err != nil { - log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err) + log.Errorf("repository initDB(): %v", err) errorOccured++ continue } if err := SanityChecks(&job.BaseJob); err != nil { - log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err) + log.Errorf("repository initDB(): %v", err) errorOccured++ continue } res, err := stmt.Exec(job) if err != nil { - log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err) + log.Errorf("repository initDB(): %v", err) errorOccured++ continue } id, err := res.LastInsertId() if err != nil { - log.Errorf("REPOSITORY/INIT > repository initDB(): %v", err) + log.Errorf("repository initDB(): %v", err) errorOccured++ continue } @@ -318,7 +318,7 @@ func InitDB() error { } if errorOccured > 0 { - log.Errorf("REPOSITORY/INIT > Error in import of %d jobs!", errorOccured) + log.Errorf("Error in import of %d jobs!", errorOccured) } if err := tx.Commit(); err != nil { diff --git a/internal/repository/job.go b/internal/repository/job.go index ca7c639..76db377 100644 --- a/internal/repository/job.go +++ b/internal/repository/job.go @@ -259,9 +259,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) { err := r.DB.Get(&cnt, qs) //ignore error as it will also occur in delete statement _, err = r.DB.Exec(`DELETE FROM job WHERE job.start_time < ?`, startTime) if err != nil { - log.Warnf("REPOSITORY/JOB > DeleteJobsBefore(%d): error %v", startTime, err) + log.Warnf(" DeleteJobsBefore(%d): error %v", startTime, err) } else { - log.Infof("REPOSITORY/JOB > DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) + log.Infof("DeleteJobsBefore(%d): Deleted %d jobs", startTime, cnt) } return cnt, err } @@ -269,9 +269,9 @@ func (r *JobRepository) DeleteJobsBefore(startTime int64) (int, error) { func (r *JobRepository) DeleteJobById(id int64) error { _, err := r.DB.Exec(`DELETE FROM job WHERE job.id = ?`, id) if err != nil { - log.Warnf("REPOSITORY/JOB > DeleteJobById(%d): error %v", id, err) + log.Warnf("DeleteJobById(%d): error %v", id, err) } else { - log.Infof("REPOSITORY/JOB > DeleteJobById(%d): Success", id) + log.Infof("DeleteJobById(%d): Success", id) } return err } @@ -376,7 +376,7 @@ func (r *JobRepository) archivingWorker(){ // not using meta data, called to load JobMeta into Cache? // will fail if job meta not in repository if _, err := r.FetchMetadata(job); err != nil { - log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error()) + log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) continue } @@ -385,18 +385,18 @@ func (r *JobRepository) archivingWorker(){ // TODO: Maybe use context with cancel/timeout here jobMeta, err := metricdata.ArchiveJob(job, context.Background()) if err != nil { - log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error()) + log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) r.UpdateMonitoringStatus(job.ID, schema.MonitoringStatusArchivingFailed) continue } // Update the jobs database entry one last time: if err := r.MarkArchived(job.ID, schema.MonitoringStatusArchivingSuccessful, jobMeta.Statistics); err != nil { - log.Errorf("REPOSITORY/JOB > archiving job (dbid: %d) failed: %s", job.ID, err.Error()) + log.Errorf("archiving job (dbid: %d) failed: %s", job.ID, err.Error()) continue } - log.Printf("REPOSITORY/JOB > archiving job (dbid: %d) successful", job.ID) + log.Printf("archiving job (dbid: %d) successful", job.ID) r.archivePending.Done() } } @@ -523,7 +523,7 @@ func (r *JobRepository) StopJobsExceedingWalltimeBy(seconds int) error { } if rowsAffected > 0 { - log.Warnf("REPOSITORY/JOB > %d jobs have been marked as failed due to running too long", rowsAffected) + log.Warnf("%d jobs have been marked as failed due to running too long", rowsAffected) } return nil } diff --git a/internal/repository/query.go b/internal/repository/query.go index bc2d108..9b86c59 100644 --- a/internal/repository/query.go +++ b/internal/repository/query.go @@ -54,7 +54,7 @@ func (r *JobRepository) QueryJobs( return nil, err } - log.Debugf("REPOSITORY/QUERY > SQL query: `%s`, args: %#v", sql, args) + log.Debugf("SQL query: `%s`, args: %#v", sql, args) rows, err := query.RunWith(r.stmtCache).Query() if err != nil { return nil, err @@ -209,7 +209,7 @@ var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") func toSnakeCase(str string) string { for _, c := range str { if c == '\'' || c == '\\' { - panic("REPOSITORY/QUERY > toSnakeCase() attack vector!") + log.Panic("toSnakeCase() attack vector!") } } diff --git a/internal/repository/user.go b/internal/repository/user.go index 80adfc8..b96641a 100644 --- a/internal/repository/user.go +++ b/internal/repository/user.go @@ -42,12 +42,12 @@ func GetUserCfgRepo() *UserCfgRepo { FOREIGN KEY (username) REFERENCES user (username) ON DELETE CASCADE ON UPDATE NO ACTION);`) if err != nil { - log.Fatalf("REPOSITORY/USER > db.DB.exec() error: %v", err) + log.Fatalf("db.DB.exec() error: %v", err) } lookupConfigStmt, err := db.DB.Preparex(`SELECT confkey, value FROM configuration WHERE configuration.username = ?`) if err != nil { - log.Fatalf("REPOSITORY/USER > db.DB.Preparex() error: %v", err) + log.Fatalf("db.DB.Preparex() error: %v", err) } userCfgRepoInstance = &UserCfgRepo{ diff --git a/internal/routerConfig/routes.go b/internal/routerConfig/routes.go index fa03b26..9424df7 100644 --- a/internal/routerConfig/routes.go +++ b/internal/routerConfig/routes.go @@ -61,12 +61,12 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { State: []schema.JobState{schema.JobStateRunning}, }}, nil, nil) if err != nil { - log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error()) + log.Errorf("failed to count jobs: %s", err.Error()) runningJobs = map[string]int{} } totalJobs, err := jobRepo.CountGroupedJobs(r.Context(), model.AggregateCluster, nil, nil, nil) if err != nil { - log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error()) + log.Errorf("failed to count jobs: %s", err.Error()) totalJobs = map[string]int{} } from := time.Now().Add(-24 * time.Hour) @@ -75,7 +75,7 @@ func setupHomeRoute(i InfoType, r *http.Request) InfoType { Duration: &schema.IntRange{From: 0, To: graph.ShortJobDuration}, }}, nil, nil) if err != nil { - log.Errorf("ROUTERCONFIG/ROUTES > failed to count jobs: %s", err.Error()) + log.Errorf("failed to count jobs: %s", err.Error()) recentShortJobs = map[string]int{} } @@ -150,7 +150,7 @@ func setupTaglistRoute(i InfoType, r *http.Request) InfoType { tags, counts, err := jobRepo.CountTags(username) tagMap := make(map[string][]map[string]interface{}) if err != nil { - log.Errorf("ROUTERCONFIG/ROUTES > GetTags failed: %s", err.Error()) + log.Errorf("GetTags failed: %s", err.Error()) i["tagmap"] = tagMap return i } diff --git a/internal/runtimeEnv/setup.go b/internal/runtimeEnv/setup.go index 7ac7138..a4d37be 100644 --- a/internal/runtimeEnv/setup.go +++ b/internal/runtimeEnv/setup.go @@ -65,7 +65,7 @@ func LoadEnv(file string) error { case '"': sb.WriteRune('"') default: - return fmt.Errorf("RUNTIME/SETUP > unsupprorted escape sequence in quoted string: backslash %#v", runes[i]) + return fmt.Errorf("RUNTIME/SETUP > unsupported escape sequence in quoted string: backslash %#v", runes[i]) } continue } diff --git a/pkg/archive/fsBackend.go b/pkg/archive/fsBackend.go index 2bdbfce..608573d 100644 --- a/pkg/archive/fsBackend.go +++ b/pkg/archive/fsBackend.go @@ -46,7 +46,7 @@ func loadJobMeta(filename string) (*schema.JobMeta, error) { f, err := os.Open(filename) if err != nil { - log.Errorf("ARCHIVE/FSBACKEND > loadJobMeta() > open file error: %v", err) + log.Errorf("loadJobMeta() > open file error: %v", err) return &schema.JobMeta{}, err } defer f.Close() @@ -58,19 +58,19 @@ func (fsa *FsArchive) Init(rawConfig json.RawMessage) error { var config FsArchiveConfig if err := json.Unmarshal(rawConfig, &config); err != nil { - log.Errorf("ARCHIVE/FSBACKEND > Init() > Unmarshal error: %v", err) + log.Errorf("Init() > Unmarshal error: %v", err) return err } if config.Path == "" { err := fmt.Errorf("ARCHIVE/FSBACKEND > Init() : empty config.Path") - log.Errorf("ARCHIVE/FSBACKEND > Init() > config.Path error: %v", err) + log.Errorf("Init() > config.Path error: %v", err) return err } fsa.path = config.Path entries, err := os.ReadDir(fsa.path) if err != nil { - log.Errorf("ARCHIVE/FSBACKEND > Init() > ReadDir() error: %v", err) + log.Errorf("Init() > ReadDir() error: %v", err) return err } @@ -86,7 +86,7 @@ func (fsa *FsArchive) LoadJobData(job *schema.Job) (schema.JobData, error) { filename := getPath(job, fsa.path, "data.json") f, err := os.Open(filename) if err != nil { - log.Errorf("ARCHIVE/FSBACKEND > LoadJobData() > open file error: %v", err) + log.Errorf("LoadJobData() > open file error: %v", err) return nil, err } defer f.Close() @@ -104,7 +104,7 @@ func (fsa *FsArchive) LoadClusterCfg(name string) (*schema.Cluster, error) { b, err := os.ReadFile(filepath.Join(fsa.path, name, "cluster.json")) if err != nil { - log.Errorf("ARCHIVE/FSBACKEND > LoadClusterCfg() > open file error: %v", err) + log.Errorf("LoadClusterCfg() > open file error: %v", err) return &schema.Cluster{}, err } if config.Keys.Validate { @@ -121,13 +121,13 @@ func (fsa *FsArchive) Iter() <-chan *schema.JobMeta { go func() { clustersDir, err := os.ReadDir(fsa.path) if err != nil { - log.Fatalf("ARCHIVE/FSBACKEND > Reading clusters failed @ cluster dirs: %s", err.Error()) + log.Fatalf("Reading clusters failed @ cluster dirs: %s", err.Error()) } for _, clusterDir := range clustersDir { lvl1Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name())) if err != nil { - log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ lvl1 dirs: %s", err.Error()) + log.Fatalf("Reading jobs failed @ lvl1 dirs: %s", err.Error()) } for _, lvl1Dir := range lvl1Dirs { @@ -138,21 +138,21 @@ func (fsa *FsArchive) Iter() <-chan *schema.JobMeta { lvl2Dirs, err := os.ReadDir(filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name())) if err != nil { - log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ lvl2 dirs: %s", err.Error()) + log.Fatalf("Reading jobs failed @ lvl2 dirs: %s", err.Error()) } for _, lvl2Dir := range lvl2Dirs { dirpath := filepath.Join(fsa.path, clusterDir.Name(), lvl1Dir.Name(), lvl2Dir.Name()) startTimeDirs, err := os.ReadDir(dirpath) if err != nil { - log.Fatalf("ARCHIVE/FSBACKEND > Reading jobs failed @ starttime dirs: %s", err.Error()) + log.Fatalf("Reading jobs failed @ starttime dirs: %s", err.Error()) } for _, startTimeDir := range startTimeDirs { if startTimeDir.IsDir() { job, err := loadJobMeta(filepath.Join(dirpath, startTimeDir.Name(), "meta.json")) if err != nil { - log.Errorf("ARCHIVE/FSBACKEND > error in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) + log.Errorf("error in %s: %s", filepath.Join(dirpath, startTimeDir.Name()), err.Error()) } else { ch <- job } diff --git a/pkg/archive/nodelist.go b/pkg/archive/nodelist.go index 4daecff..c22d99d 100644 --- a/pkg/archive/nodelist.go +++ b/pkg/archive/nodelist.go @@ -64,7 +64,7 @@ type NLExprIntRange struct { func (nle NLExprIntRange) consume(input string) (next string, ok bool) { if !nle.zeroPadded || nle.digits < 1 { - log.Error("ARCHIVE/NODELIST > only zero-padded ranges are allowed") + log.Error("only zero-padded ranges are allowed") return "", false } diff --git a/pkg/log/log.go b/pkg/log/log.go index 0fb8bf7..e7cabb0 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -9,122 +9,274 @@ import ( "io" "log" "os" + "time" ) // Provides a simple way of logging with different levels. -// Time/Data are not logged on purpose because systemd adds -// them for us. +// Time/Date are not logged because systemd adds +// them for us (Default, can be changed by flag '--logdate true'). // // Uses these prefixes: https://www.freedesktop.org/software/systemd/man/sd-daemon.html +var logDateTime bool +var logLevel string + var ( DebugWriter io.Writer = os.Stderr + NoteWriter io.Writer = os.Stderr InfoWriter io.Writer = os.Stderr WarnWriter io.Writer = os.Stderr ErrWriter io.Writer = os.Stderr + CritWriter io.Writer = os.Stderr ) var ( - DebugPrefix string = "<7>[DEBUG] " - InfoPrefix string = "<6>[INFO] " - WarnPrefix string = "<4>[WARNING] " - ErrPrefix string = "<3>[ERROR] " + DebugPrefix string = "<7>[DEBUG] " + InfoPrefix string = "<6>[INFO] " + NotePrefix string = "<5>[NOTICE] " + WarnPrefix string = "<4>[WARNING] " + ErrPrefix string = "<3>[ERROR] " + CritPrefix string = "<2>[CRITICAL] " ) var ( + // No Time/Date DebugLog *log.Logger = log.New(DebugWriter, DebugPrefix, 0) - InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, 0) - WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, 0) - ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, 0) + InfoLog *log.Logger = log.New(InfoWriter, InfoPrefix, 0) + NoteLog *log.Logger = log.New(NoteWriter, NotePrefix, log.Lshortfile) + WarnLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.Lshortfile) + ErrLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.Llongfile) + CritLog *log.Logger = log.New(CritWriter, CritPrefix, log.Llongfile) + // Log Time/Date + DebugTimeLog *log.Logger = log.New(DebugWriter, DebugPrefix, log.LstdFlags) + InfoTimeLog *log.Logger = log.New(InfoWriter, InfoPrefix, log.LstdFlags) + NoteTimeLog *log.Logger = log.New(NoteWriter, NotePrefix, log.LstdFlags|log.Lshortfile) + WarnTimeLog *log.Logger = log.New(WarnWriter, WarnPrefix, log.LstdFlags|log.Lshortfile) + ErrTimeLog *log.Logger = log.New(ErrWriter, ErrPrefix, log.LstdFlags|log.Llongfile) + CritTimeLog *log.Logger = log.New(CritWriter, CritPrefix, log.LstdFlags|log.Llongfile) ) -func init() { - if lvl, ok := os.LookupEnv("LOGLEVEL"); ok { - switch lvl { +/* CONFIG */ + +func SetLogLevel(lvl string) { + // fmt.Printf("pkg/log: Set LOGLEVEL -> %s\n", lvl) + switch lvl { + case "crit": + ErrWriter = io.Discard + fallthrough case "err", "fatal": WarnWriter = io.Discard fallthrough case "warn": InfoWriter = io.Discard fallthrough + case "notice": + NoteWriter = io.Discard + fallthrough case "info": DebugWriter = io.Discard + break case "debug": // Nothing to do... + break default: - Warnf("environment variable LOGLEVEL has invalid value %#v", lvl) - } + fmt.Printf("pkg/log: Flag 'loglevel' has invalid value %#v\npkg/log: Will use default loglevel 'debug'\n", lvl) + SetLogLevel("debug") } } -func Debug(v ...interface{}) { - if DebugWriter != io.Discard { - DebugLog.Print(v...) - } +func SetLogDateTime(logdate bool) { + //fmt.Printf("pkg/log: Set DATEBOOL -> %v\n", logdate) + logDateTime = logdate } -func Info(v ...interface{}) { - if InfoWriter != io.Discard { - InfoLog.Print(v...) - } +/* PRINT */ + +// Private helper +func printStr(v ...interface{}) string { + return fmt.Sprint(v...) } func Print(v ...interface{}) { Info(v...) } +func Debug(v ...interface{}) { + if DebugWriter != io.Discard { + out := printStr(v...) + if logDateTime { + DebugTimeLog.Output(2, out) + } else { + DebugLog.Output(2, out) + } + } +} + +func Info(v ...interface{}) { + if InfoWriter != io.Discard { + out := printStr(v...) + if logDateTime { + InfoTimeLog.Output(2, out) + } else { + InfoLog.Output(2, out) + } + } +} + +func Note(v ...interface{}) { + if NoteWriter != io.Discard { + out := printStr(v...) + if logDateTime { + NoteTimeLog.Output(2, out) + } else { + NoteLog.Output(2, out) + } + } +} + func Warn(v ...interface{}) { if WarnWriter != io.Discard { - WarnLog.Print(v...) + out := printStr(v...) + if logDateTime { + WarnTimeLog.Output(2, out) + } else { + WarnLog.Output(2, out) + } } } func Error(v ...interface{}) { if ErrWriter != io.Discard { - ErrLog.Print(v...) + out := printStr(v...) + if logDateTime { + ErrTimeLog.Output(2, out) + } else { + ErrLog.Output(2, out) + } } } +// Writes panic stacktrace, keeps application alive +func Panic(v ...interface{}) { + Error(v...) + panic("Panic triggered ...") +} + +// Writes error log, stops application func Fatal(v ...interface{}) { Error(v...) os.Exit(1) } -func Debugf(format string, v ...interface{}) { - if DebugWriter != io.Discard { - DebugLog.Printf(format, v...) +func Crit(v ...interface{}) { + if CritWriter != io.Discard { + out := printStr(v...) + if logDateTime { + CritTimeLog.Output(2, out) + } else { + CritLog.Output(2, out) + } } } -func Infof(format string, v ...interface{}) { - if InfoWriter != io.Discard { - InfoLog.Printf(format, v...) - } +/* PRINT FORMAT*/ + +// Private helper +func printfStr(format string, v ...interface{}) string { + return fmt.Sprintf(format, v...) } func Printf(format string, v ...interface{}) { Infof(format, v...) } -func Finfof(w io.Writer, format string, v ...interface{}) { - if w != io.Discard { - fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...) +func Debugf(format string, v ...interface{}) { + if DebugWriter != io.Discard { + out := printfStr(format, v...) + if logDateTime { + DebugTimeLog.Output(2, out) + } else { + DebugLog.Output(2, out) + } + } +} + +func Infof(format string, v ...interface{}) { + if InfoWriter != io.Discard { + out := printfStr(format, v...) + if logDateTime { + InfoTimeLog.Output(2, out) + } else { + InfoLog.Output(2, out) + } + } +} + +func Notef(format string, v ...interface{}) { + if NoteWriter != io.Discard { + out := printfStr(format, v...) + if logDateTime { + NoteTimeLog.Output(2, out) + } else { + NoteLog.Output(2, out) + } } } func Warnf(format string, v ...interface{}) { if WarnWriter != io.Discard { - WarnLog.Printf(format, v...) + out := printfStr(format, v...) + if logDateTime { + WarnTimeLog.Output(2, out) + } else { + WarnLog.Output(2, out) + } } } func Errorf(format string, v ...interface{}) { if ErrWriter != io.Discard { - ErrLog.Printf(format, v...) + out := printfStr(format, v...) + if logDateTime { + ErrTimeLog.Output(2, out) + } else { + ErrLog.Output(2, out) + } } } +// Writes panic stacktrace, keeps application alive +func Panicf(format string, v ...interface{}) { + Errorf(format, v...) + panic("Panic triggered ...") +} + +// Writes error log, stops application func Fatalf(format string, v ...interface{}) { Errorf(format, v...) os.Exit(1) } + +func Critf(format string, v ...interface{}) { + if CritWriter != io.Discard { + out := printfStr(format, v...) + if logDateTime { + CritTimeLog.Output(2, out) + } else { + CritLog.Output(2, out) + } + } +} + +/* SPECIAL */ + +func Finfof(w io.Writer, format string, v ...interface{}) { + if w != io.Discard { + if logDateTime { + currentTime := time.Now() + fmt.Fprintf(InfoWriter, currentTime.String()+InfoPrefix+format+"\n", v...) + } else { + fmt.Fprintf(InfoWriter, InfoPrefix+format+"\n", v...) + } + } +} diff --git a/pkg/schema/validate.go b/pkg/schema/validate.go index 52730c0..b3eee6e 100644 --- a/pkg/schema/validate.go +++ b/pkg/schema/validate.go @@ -54,7 +54,7 @@ func Validate(k Kind, r io.Reader) (err error) { var v interface{} if err := json.NewDecoder(r).Decode(&v); err != nil { - log.Errorf("SCHEMA/VALIDATE > Failed to decode %v", err) + log.Errorf("Failed to decode %v", err) return err }