File size: 5,034 Bytes
1e92f2d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
package cleanupmanager
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"log/slog"
"slices"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/riverqueue/river"
"github.com/target/goalert/config"
"github.com/target/goalert/gadb"
"github.com/target/goalert/schedule"
)
type SchedDataArgs struct {
ScheduleID uuid.UUID
}
func (SchedDataArgs) Kind() string { return "cleanup-manager-sched-data" }
type SchedDataLFW struct{}
func (SchedDataLFW) Kind() string { return "cleanup-manager-sched-data-lfw" }
// LookForWorkScheduleData will automatically look for schedules that need their JSON data cleaned up and insert them into the queue.
func (db *DB) LookForWorkScheduleData(ctx context.Context, j *river.Job[SchedDataLFW]) error {
cfg := config.FromContext(ctx)
if cfg.Maintenance.ScheduleCleanupDays <= 0 {
return nil
}
var outOfDate []uuid.UUID
err := db.lock.WithTxShared(ctx, func(ctx context.Context, tx *sql.Tx) error {
var err error
// Grab schedules that haven't been cleaned up in the last 30 days.
outOfDate, err = gadb.New(tx).CleanupMgrScheduleNeedsCleanup(ctx, 30)
return err
})
if errors.Is(err, sql.ErrNoRows) {
return nil
}
if err != nil {
return err
}
var params []river.InsertManyParams
for _, id := range outOfDate {
params = append(params, river.InsertManyParams{
Args: SchedDataArgs{ScheduleID: id},
InsertOpts: &river.InsertOpts{
Queue: QueueName,
Priority: PriorityTempSched,
UniqueOpts: river.UniqueOpts{ByArgs: true},
},
})
}
if len(params) == 0 {
return nil
}
_, err = river.ClientFromContext[pgx.Tx](ctx).InsertMany(ctx, params)
if err != nil {
return fmt.Errorf("insert many: %w", err)
}
return nil
}
// CleanupScheduleData will automatically cleanup schedule data.
// - Remove temporary-schedule shifts for users that no longer exist.
// - Remove temporary-schedule shifts that occur in the past.
func (db *DB) CleanupScheduleData(ctx context.Context, j *river.Job[SchedDataArgs]) error {
cfg := config.FromContext(ctx)
if cfg.Maintenance.ScheduleCleanupDays <= 0 {
return nil
}
log := db.logger.With(slog.String("schedule_id", j.Args.ScheduleID.String()))
err := db.lock.WithTxShared(ctx, func(ctx context.Context, tx *sql.Tx) (err error) {
// Grab the next schedule that hasn't been cleaned up in the last 30 days.
rawData, err := gadb.New(tx).CleanupMgrScheduleData(ctx, j.Args.ScheduleID)
if errors.Is(err, sql.ErrNoRows) {
return nil
}
if err != nil {
return fmt.Errorf("get schedule data: %w", err)
}
gdb := gadb.New(tx)
var data schedule.Data
err = json.Unmarshal(rawData, &data)
if err != nil {
return fmt.Errorf("unmarshal schedule data: %w", err)
}
// We want to remove shifts for users that no longer exist, so to do that we'll get the set of users from the schedule data and verify them.
users := collectUsers(data)
var validUsers []uuid.UUID
if len(users) > 0 {
validUsers, err = gdb.CleanupMgrVerifyUsers(ctx, users)
if err != nil {
return fmt.Errorf("lookup valid users: %w", err)
}
}
now, err := gdb.Now(ctx)
if err != nil {
return fmt.Errorf("get current time: %w", err)
}
changed := cleanupData(&data, validUsers, now)
if !changed {
return gdb.CleanupMgrScheduleDataSkip(ctx, j.Args.ScheduleID)
}
rawData, err = json.Marshal(data)
if err != nil {
return fmt.Errorf("marshal schedule data: %w", err)
}
log.InfoContext(ctx, "Updated schedule data.")
return gdb.CleanupMgrUpdateScheduleData(ctx,
gadb.CleanupMgrUpdateScheduleDataParams{
ScheduleID: j.Args.ScheduleID,
Data: rawData,
})
})
if err != nil {
return err
}
return nil
}
// cleanupData will cleanup the schedule data, removing temporary-schedules that occur in the past; and removing shifts for users that no longer exist.
func cleanupData(data *schedule.Data, validUsers []uuid.UUID, now time.Time) (changed bool) {
newTempSched := data.V1.TemporarySchedules[:0]
for _, temp := range data.V1.TemporarySchedules {
if temp.End.Before(now) {
changed = true
continue
}
cleanShifts := temp.Shifts[:0]
for _, shift := range temp.Shifts {
id, err := uuid.Parse(shift.UserID)
if err != nil {
changed = true
// invalid user id/shift, delete it
continue
}
if !slices.Contains(validUsers, id) {
changed = true
continue
}
cleanShifts = append(cleanShifts, shift)
}
temp.Shifts = cleanShifts
newTempSched = append(newTempSched, temp)
}
data.V1.TemporarySchedules = newTempSched
return changed
}
// collectUsers will collect all user ids from the schedule data.
func collectUsers(data schedule.Data) (users []uuid.UUID) {
for _, sched := range data.V1.TemporarySchedules {
for _, shift := range sched.Shifts {
id, err := uuid.Parse(shift.UserID)
if err != nil {
// invalid id, skip it
continue
}
if slices.Contains(users, id) {
continue
}
users = append(users, id)
}
}
return users
}
|