871 lines
26 KiB
C
871 lines
26 KiB
C
#include <string.h>
|
|
#include <stdio.h>
|
|
#include <time.h>
|
|
#include <sys/time.h>
|
|
#include "freertos/FreeRTOS.h"
|
|
#include "freertos/task.h"
|
|
#include "freertos/semphr.h"
|
|
#include "esp_log.h"
|
|
#include "esp_sntp.h"
|
|
#include "nvs_flash.h"
|
|
#include "nvs.h"
|
|
#include "scheduler.h"
|
|
|
|
static const char *TAG = "SCHEDULER";
|
|
|
|
// NVS namespace
|
|
#define SCHEDULER_NVS_NAMESPACE "scheduler"
|
|
|
|
// Scheduler state
|
|
typedef struct {
|
|
bool initialized;
|
|
bool time_synchronized;
|
|
time_t last_sync_time;
|
|
bool holiday_mode;
|
|
|
|
// Schedules storage
|
|
schedule_config_t schedules[SCHEDULER_MAX_PUMPS][SCHEDULER_MAX_SCHEDULES_PER_PUMP];
|
|
|
|
// Task handle
|
|
TaskHandle_t scheduler_task;
|
|
SemaphoreHandle_t mutex;
|
|
|
|
// Callbacks
|
|
schedule_trigger_callback_t trigger_callback;
|
|
schedule_status_callback_t status_callback;
|
|
} scheduler_state_t;
|
|
|
|
static scheduler_state_t s_scheduler = {0};
|
|
|
|
// Forward declarations
|
|
static void scheduler_task(void *pvParameters);
|
|
static esp_err_t save_schedule_to_nvs(uint8_t pump_id, uint8_t schedule_id);
|
|
static esp_err_t load_schedule_from_nvs(uint8_t pump_id, uint8_t schedule_id);
|
|
static esp_err_t save_global_settings(void);
|
|
static esp_err_t load_global_settings(void);
|
|
static void check_and_execute_schedules(void);
|
|
static bool should_run_now(const schedule_config_t *config, time_t current_time);
|
|
|
|
// NTP sync callback
|
|
static void time_sync_notification_cb(struct timeval *tv)
|
|
{
|
|
ESP_LOGI(TAG, "Time synchronized via NTP");
|
|
s_scheduler.time_synchronized = true;
|
|
s_scheduler.last_sync_time = tv->tv_sec;
|
|
}
|
|
|
|
esp_err_t scheduler_init(void)
|
|
{
|
|
if (s_scheduler.initialized) {
|
|
return ESP_OK;
|
|
}
|
|
|
|
ESP_LOGI(TAG, "Initializing scheduler");
|
|
|
|
// Create mutex
|
|
s_scheduler.mutex = xSemaphoreCreateMutex();
|
|
if (s_scheduler.mutex == NULL) {
|
|
ESP_LOGE(TAG, "Failed to create mutex");
|
|
return ESP_ERR_NO_MEM;
|
|
}
|
|
|
|
// Initialize schedules array
|
|
memset(s_scheduler.schedules, 0, sizeof(s_scheduler.schedules));
|
|
|
|
// Load schedules from NVS
|
|
for (int pump = 0; pump < SCHEDULER_MAX_PUMPS; pump++) {
|
|
for (int sched = 0; sched < SCHEDULER_MAX_SCHEDULES_PER_PUMP; sched++) {
|
|
load_schedule_from_nvs(pump + 1, sched);
|
|
}
|
|
}
|
|
|
|
// Load global settings
|
|
load_global_settings();
|
|
|
|
// Initialize SNTP for time synchronization
|
|
ESP_LOGI(TAG, "Initializing SNTP");
|
|
esp_sntp_setoperatingmode(SNTP_OPMODE_POLL);
|
|
esp_sntp_setservername(0, "pool.ntp.org");
|
|
esp_sntp_setservername(1, "time.nist.gov");
|
|
esp_sntp_setservername(2, "time.google.com");
|
|
sntp_set_time_sync_notification_cb(time_sync_notification_cb);
|
|
esp_sntp_init();
|
|
|
|
// Set timezone (adjust as needed)
|
|
setenv("TZ", "MST7MDT,M3.2.0,M11.1.0", 1); // Mountain Time (Denver)
|
|
tzset();
|
|
|
|
// Create scheduler task
|
|
if (xTaskCreate(scheduler_task, "scheduler", 4096, NULL, 5, &s_scheduler.scheduler_task) != pdPASS) {
|
|
ESP_LOGE(TAG, "Failed to create scheduler task");
|
|
vSemaphoreDelete(s_scheduler.mutex);
|
|
return ESP_ERR_NO_MEM;
|
|
}
|
|
|
|
s_scheduler.initialized = true;
|
|
ESP_LOGI(TAG, "Scheduler initialized successfully");
|
|
|
|
return ESP_OK;
|
|
}
|
|
|
|
esp_err_t scheduler_deinit(void)
|
|
{
|
|
if (!s_scheduler.initialized) {
|
|
return ESP_OK;
|
|
}
|
|
|
|
// Stop SNTP
|
|
esp_sntp_stop();
|
|
|
|
// Delete task
|
|
if (s_scheduler.scheduler_task) {
|
|
vTaskDelete(s_scheduler.scheduler_task);
|
|
}
|
|
|
|
// Delete mutex
|
|
if (s_scheduler.mutex) {
|
|
vSemaphoreDelete(s_scheduler.mutex);
|
|
}
|
|
|
|
s_scheduler.initialized = false;
|
|
return ESP_OK;
|
|
}
|
|
|
|
esp_err_t scheduler_add_schedule(uint8_t pump_id, uint8_t schedule_id,
|
|
const schedule_config_t *config)
|
|
{
|
|
if (!s_scheduler.initialized || !config) {
|
|
return ESP_ERR_INVALID_STATE;
|
|
}
|
|
|
|
if (pump_id < 1 || pump_id > SCHEDULER_MAX_PUMPS ||
|
|
schedule_id >= SCHEDULER_MAX_SCHEDULES_PER_PUMP) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
if (config->type >= SCHEDULE_TYPE_MAX) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
// Copy configuration
|
|
memcpy(&s_scheduler.schedules[pump_id - 1][schedule_id], config, sizeof(schedule_config_t));
|
|
|
|
// Calculate next run time
|
|
if (config->enabled && s_scheduler.time_synchronized) {
|
|
time_t now = scheduler_get_current_time();
|
|
s_scheduler.schedules[pump_id - 1][schedule_id].next_run =
|
|
scheduler_calculate_next_run(config, now);
|
|
}
|
|
|
|
// Save to NVS
|
|
esp_err_t ret = save_schedule_to_nvs(pump_id, schedule_id);
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
if (ret == ESP_OK) {
|
|
ESP_LOGI(TAG, "Added schedule %d for pump %d", schedule_id, pump_id);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
esp_err_t scheduler_get_schedule(uint8_t pump_id, uint8_t schedule_id,
|
|
schedule_config_t *config)
|
|
{
|
|
if (!s_scheduler.initialized || !config) {
|
|
return ESP_ERR_INVALID_STATE;
|
|
}
|
|
|
|
if (pump_id < 1 || pump_id > SCHEDULER_MAX_PUMPS ||
|
|
schedule_id >= SCHEDULER_MAX_SCHEDULES_PER_PUMP) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
memcpy(config, &s_scheduler.schedules[pump_id - 1][schedule_id], sizeof(schedule_config_t));
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
return ESP_OK;
|
|
}
|
|
|
|
esp_err_t scheduler_remove_schedule(uint8_t pump_id, uint8_t schedule_id)
|
|
{
|
|
if (!s_scheduler.initialized) {
|
|
return ESP_ERR_INVALID_STATE;
|
|
}
|
|
|
|
if (pump_id < 1 || pump_id > SCHEDULER_MAX_PUMPS ||
|
|
schedule_id >= SCHEDULER_MAX_SCHEDULES_PER_PUMP) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
// Clear schedule
|
|
memset(&s_scheduler.schedules[pump_id - 1][schedule_id], 0, sizeof(schedule_config_t));
|
|
|
|
// Remove from NVS
|
|
nvs_handle_t nvs_handle;
|
|
esp_err_t ret = nvs_open(SCHEDULER_NVS_NAMESPACE, NVS_READWRITE, &nvs_handle);
|
|
if (ret == ESP_OK) {
|
|
char key[32];
|
|
snprintf(key, sizeof(key), "sched_%d_%d", pump_id, schedule_id);
|
|
nvs_erase_key(nvs_handle, key);
|
|
nvs_commit(nvs_handle);
|
|
nvs_close(nvs_handle);
|
|
}
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
ESP_LOGI(TAG, "Removed schedule %d for pump %d", schedule_id, pump_id);
|
|
return ret;
|
|
}
|
|
|
|
esp_err_t scheduler_enable_schedule(uint8_t pump_id, uint8_t schedule_id, bool enable)
|
|
{
|
|
if (!s_scheduler.initialized) {
|
|
return ESP_ERR_INVALID_STATE;
|
|
}
|
|
|
|
if (pump_id < 1 || pump_id > SCHEDULER_MAX_PUMPS ||
|
|
schedule_id >= SCHEDULER_MAX_SCHEDULES_PER_PUMP) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
s_scheduler.schedules[pump_id - 1][schedule_id].enabled = enable;
|
|
|
|
// Recalculate next run time if enabling
|
|
if (enable && s_scheduler.time_synchronized) {
|
|
time_t now = scheduler_get_current_time();
|
|
s_scheduler.schedules[pump_id - 1][schedule_id].next_run =
|
|
scheduler_calculate_next_run(&s_scheduler.schedules[pump_id - 1][schedule_id], now);
|
|
}
|
|
|
|
esp_err_t ret = save_schedule_to_nvs(pump_id, schedule_id);
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
ESP_LOGI(TAG, "%s schedule %d for pump %d", enable ? "Enabled" : "Disabled",
|
|
schedule_id, pump_id);
|
|
return ret;
|
|
}
|
|
|
|
esp_err_t scheduler_set_time(time_t current_time)
|
|
{
|
|
struct timeval tv = {
|
|
.tv_sec = current_time,
|
|
.tv_usec = 0
|
|
};
|
|
|
|
settimeofday(&tv, NULL);
|
|
|
|
s_scheduler.time_synchronized = true;
|
|
s_scheduler.last_sync_time = current_time;
|
|
|
|
// Recalculate all next run times
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
for (int pump = 0; pump < SCHEDULER_MAX_PUMPS; pump++) {
|
|
for (int sched = 0; sched < SCHEDULER_MAX_SCHEDULES_PER_PUMP; sched++) {
|
|
if (s_scheduler.schedules[pump][sched].enabled) {
|
|
s_scheduler.schedules[pump][sched].next_run =
|
|
scheduler_calculate_next_run(&s_scheduler.schedules[pump][sched], current_time);
|
|
}
|
|
}
|
|
}
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
ESP_LOGI(TAG, "Time set manually to %ld", current_time);
|
|
return ESP_OK;
|
|
}
|
|
|
|
esp_err_t scheduler_sync_time_ntp(void)
|
|
{
|
|
if (esp_sntp_get_sync_status() == SNTP_SYNC_STATUS_IN_PROGRESS) {
|
|
return ESP_ERR_NOT_FINISHED;
|
|
}
|
|
|
|
// Trigger sync
|
|
esp_sntp_restart();
|
|
|
|
return ESP_OK;
|
|
}
|
|
|
|
bool scheduler_is_time_synchronized(void)
|
|
{
|
|
return s_scheduler.time_synchronized;
|
|
}
|
|
|
|
time_t scheduler_get_current_time(void)
|
|
{
|
|
time_t now;
|
|
time(&now);
|
|
return now;
|
|
}
|
|
|
|
esp_err_t scheduler_set_holiday_mode(bool enabled)
|
|
{
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
s_scheduler.holiday_mode = enabled;
|
|
esp_err_t ret = save_global_settings();
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
ESP_LOGI(TAG, "Holiday mode %s", enabled ? "enabled" : "disabled");
|
|
return ret;
|
|
}
|
|
|
|
bool scheduler_get_holiday_mode(void)
|
|
{
|
|
return s_scheduler.holiday_mode;
|
|
}
|
|
|
|
esp_err_t scheduler_get_status(scheduler_status_t *status)
|
|
{
|
|
if (!status) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
status->holiday_mode = s_scheduler.holiday_mode;
|
|
status->time_synchronized = s_scheduler.time_synchronized;
|
|
status->last_sync_time = s_scheduler.last_sync_time;
|
|
|
|
// Count active schedules
|
|
status->active_schedules = 0;
|
|
for (int pump = 0; pump < SCHEDULER_MAX_PUMPS; pump++) {
|
|
for (int sched = 0; sched < SCHEDULER_MAX_SCHEDULES_PER_PUMP; sched++) {
|
|
if (s_scheduler.schedules[pump][sched].enabled &&
|
|
s_scheduler.schedules[pump][sched].type != SCHEDULE_TYPE_DISABLED) {
|
|
status->active_schedules++;
|
|
}
|
|
}
|
|
}
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
return ESP_OK;
|
|
}
|
|
|
|
time_t scheduler_calculate_next_run(const schedule_config_t *config, time_t from_time)
|
|
{
|
|
if (!config || config->type == SCHEDULE_TYPE_DISABLED || !config->enabled) {
|
|
return 0;
|
|
}
|
|
|
|
struct tm timeinfo;
|
|
localtime_r(&from_time, &timeinfo);
|
|
|
|
switch (config->type) {
|
|
case SCHEDULE_TYPE_INTERVAL:
|
|
// Simple interval from last run or from now
|
|
if (config->last_run > 0) {
|
|
return config->last_run + (config->interval_minutes * 60);
|
|
} else {
|
|
return from_time + (config->interval_minutes * 60);
|
|
}
|
|
|
|
case SCHEDULE_TYPE_TIME_OF_DAY:
|
|
{
|
|
// Daily at specific time
|
|
struct tm next_time = timeinfo;
|
|
next_time.tm_hour = config->hour;
|
|
next_time.tm_min = config->minute;
|
|
next_time.tm_sec = 0;
|
|
|
|
time_t next_run = mktime(&next_time);
|
|
|
|
// If time has passed today, schedule for tomorrow
|
|
if (next_run <= from_time) {
|
|
next_time.tm_mday++;
|
|
next_run = mktime(&next_time);
|
|
}
|
|
|
|
return next_run;
|
|
}
|
|
|
|
case SCHEDULE_TYPE_DAYS_TIME:
|
|
{
|
|
// Specific days at specific time
|
|
struct tm next_time = timeinfo;
|
|
next_time.tm_hour = config->hour;
|
|
next_time.tm_min = config->minute;
|
|
next_time.tm_sec = 0;
|
|
|
|
// Find next matching day
|
|
for (int days_ahead = 0; days_ahead < 8; days_ahead++) {
|
|
struct tm check_time = next_time;
|
|
check_time.tm_mday += days_ahead;
|
|
time_t check_timestamp = mktime(&check_time);
|
|
localtime_r(&check_timestamp, &check_time);
|
|
|
|
// Check if this day matches our mask
|
|
uint8_t day_bit = (1 << check_time.tm_wday);
|
|
if ((config->days_mask & day_bit) && check_timestamp > from_time) {
|
|
return check_timestamp;
|
|
}
|
|
}
|
|
|
|
return 0; // No matching day found (shouldn't happen with valid mask)
|
|
}
|
|
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
// Task that checks and executes schedules
|
|
static void scheduler_task(void *pvParameters)
|
|
{
|
|
ESP_LOGI(TAG, "Scheduler task started");
|
|
|
|
while (1) {
|
|
// Wait 30 seconds between checks
|
|
vTaskDelay(pdMS_TO_TICKS(30000));
|
|
|
|
if (!s_scheduler.time_synchronized) {
|
|
ESP_LOGD(TAG, "Waiting for time synchronization...");
|
|
continue;
|
|
}
|
|
|
|
if (s_scheduler.holiday_mode) {
|
|
ESP_LOGD(TAG, "Holiday mode active, skipping schedules");
|
|
continue;
|
|
}
|
|
|
|
check_and_execute_schedules();
|
|
}
|
|
}
|
|
|
|
static void check_and_execute_schedules(void)
|
|
{
|
|
time_t now = scheduler_get_current_time();
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
for (int pump = 0; pump < SCHEDULER_MAX_PUMPS; pump++) {
|
|
for (int sched = 0; sched < SCHEDULER_MAX_SCHEDULES_PER_PUMP; sched++) {
|
|
schedule_config_t *schedule = &s_scheduler.schedules[pump][sched];
|
|
|
|
if (!schedule->enabled || schedule->type == SCHEDULE_TYPE_DISABLED) {
|
|
continue;
|
|
}
|
|
|
|
// Check if it's time to run
|
|
if (should_run_now(schedule, now)) {
|
|
ESP_LOGI(TAG, "Triggering schedule %d for pump %d", sched, pump + 1);
|
|
|
|
// Update last run time
|
|
schedule->last_run = now;
|
|
|
|
// Calculate next run time
|
|
schedule->next_run = scheduler_calculate_next_run(schedule, now);
|
|
|
|
// Save updated schedule
|
|
save_schedule_to_nvs(pump + 1, sched);
|
|
|
|
// Call trigger callback
|
|
if (s_scheduler.trigger_callback) {
|
|
s_scheduler.trigger_callback(pump + 1, sched,
|
|
schedule->duration_ms,
|
|
schedule->speed_percent);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
}
|
|
|
|
static bool should_run_now(const schedule_config_t *config, time_t current_time)
|
|
{
|
|
if (!config || !config->enabled || config->type == SCHEDULE_TYPE_DISABLED) {
|
|
return false;
|
|
}
|
|
|
|
// Don't run if we've run in the last minute (prevent double triggers)
|
|
if (config->last_run > 0 && (current_time - config->last_run) < 60) {
|
|
return false;
|
|
}
|
|
|
|
switch (config->type) {
|
|
case SCHEDULE_TYPE_INTERVAL:
|
|
// Check if interval has elapsed
|
|
if (config->last_run == 0) {
|
|
// First run
|
|
return true;
|
|
}
|
|
return (current_time - config->last_run) >= (config->interval_minutes * 60);
|
|
|
|
case SCHEDULE_TYPE_TIME_OF_DAY:
|
|
case SCHEDULE_TYPE_DAYS_TIME:
|
|
// Check if we're within a minute of the scheduled time
|
|
if (config->next_run > 0 &&
|
|
current_time >= config->next_run &&
|
|
current_time < (config->next_run + 60)) {
|
|
return true;
|
|
}
|
|
break;
|
|
|
|
case SCHEDULE_TYPE_DISABLED:
|
|
case SCHEDULE_TYPE_MAX:
|
|
default:
|
|
// Should never reach here due to initial check, but needed for compiler
|
|
break;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
// JSON serialization
|
|
esp_err_t scheduler_schedule_to_json(uint8_t pump_id, uint8_t schedule_id,
|
|
char *buffer, size_t buffer_size)
|
|
{
|
|
if (!buffer || buffer_size == 0) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
schedule_config_t config;
|
|
esp_err_t ret = scheduler_get_schedule(pump_id, schedule_id, &config);
|
|
if (ret != ESP_OK) {
|
|
return ret;
|
|
}
|
|
|
|
// Build JSON manually without cJSON library
|
|
int written = 0;
|
|
written = snprintf(buffer, buffer_size,
|
|
"{\"pump_id\":%d,\"schedule_id\":%d,\"type\":\"%s\",\"enabled\":%s,",
|
|
pump_id, schedule_id,
|
|
scheduler_get_type_string(config.type),
|
|
config.enabled ? "true" : "false");
|
|
|
|
// Add type-specific fields
|
|
switch (config.type) {
|
|
case SCHEDULE_TYPE_INTERVAL:
|
|
written += snprintf(buffer + written, buffer_size - written,
|
|
"\"interval_minutes\":%lu,", config.interval_minutes);
|
|
break;
|
|
|
|
case SCHEDULE_TYPE_TIME_OF_DAY:
|
|
written += snprintf(buffer + written, buffer_size - written,
|
|
"\"hour\":%d,\"minute\":%d,", config.hour, config.minute);
|
|
break;
|
|
|
|
case SCHEDULE_TYPE_DAYS_TIME:
|
|
{
|
|
char days_str[64];
|
|
scheduler_get_days_string(config.days_mask, days_str, sizeof(days_str));
|
|
written += snprintf(buffer + written, buffer_size - written,
|
|
"\"hour\":%d,\"minute\":%d,\"days_mask\":%d,\"days\":\"%s\",",
|
|
config.hour, config.minute, config.days_mask, days_str);
|
|
break;
|
|
}
|
|
|
|
case SCHEDULE_TYPE_DISABLED:
|
|
case SCHEDULE_TYPE_MAX:
|
|
default:
|
|
// No additional fields for disabled type
|
|
break;
|
|
}
|
|
|
|
// Add common fields
|
|
written += snprintf(buffer + written, buffer_size - written,
|
|
"\"duration_ms\":%lu,\"speed_percent\":%d",
|
|
config.duration_ms, config.speed_percent);
|
|
|
|
// Add runtime info if available
|
|
if (config.last_run > 0) {
|
|
written += snprintf(buffer + written, buffer_size - written,
|
|
",\"last_run\":%lld", (long long)config.last_run);
|
|
}
|
|
if (config.next_run > 0) {
|
|
struct tm timeinfo;
|
|
localtime_r(&config.next_run, &timeinfo);
|
|
char time_str[64];
|
|
strftime(time_str, sizeof(time_str), "%Y-%m-%d %H:%M:%S", &timeinfo);
|
|
written += snprintf(buffer + written, buffer_size - written,
|
|
",\"next_run\":%lld,\"next_run_str\":\"%s\"",
|
|
(long long)config.next_run, time_str);
|
|
}
|
|
|
|
// Close JSON
|
|
written += snprintf(buffer + written, buffer_size - written, "}");
|
|
|
|
return (written < buffer_size) ? ESP_OK : ESP_ERR_INVALID_SIZE;
|
|
}
|
|
|
|
esp_err_t scheduler_json_to_schedule(const char *json, uint8_t pump_id, uint8_t schedule_id)
|
|
{
|
|
if (!json) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
schedule_config_t config = {0};
|
|
|
|
// Simple JSON parsing without cJSON
|
|
// Look for key patterns in the JSON string
|
|
const char *p;
|
|
|
|
// Parse type
|
|
p = strstr(json, "\"type\":");
|
|
if (p) {
|
|
p += 7; // Skip "type":
|
|
while (*p == ' ' || *p == '"') p++;
|
|
if (strncmp(p, "disabled", 8) == 0) {
|
|
config.type = SCHEDULE_TYPE_DISABLED;
|
|
} else if (strncmp(p, "interval", 8) == 0) {
|
|
config.type = SCHEDULE_TYPE_INTERVAL;
|
|
} else if (strncmp(p, "time_of_day", 11) == 0) {
|
|
config.type = SCHEDULE_TYPE_TIME_OF_DAY;
|
|
} else if (strncmp(p, "days_time", 9) == 0) {
|
|
config.type = SCHEDULE_TYPE_DAYS_TIME;
|
|
}
|
|
}
|
|
|
|
// Parse enabled
|
|
p = strstr(json, "\"enabled\":");
|
|
if (p) {
|
|
p += 10;
|
|
while (*p == ' ') p++;
|
|
config.enabled = (strncmp(p, "true", 4) == 0);
|
|
}
|
|
|
|
// Parse interval_minutes for interval type
|
|
if (config.type == SCHEDULE_TYPE_INTERVAL) {
|
|
p = strstr(json, "\"interval_minutes\":");
|
|
if (p) {
|
|
p += 19;
|
|
config.interval_minutes = atoi(p);
|
|
}
|
|
}
|
|
|
|
// Parse hour and minute for time-based types
|
|
if (config.type == SCHEDULE_TYPE_TIME_OF_DAY || config.type == SCHEDULE_TYPE_DAYS_TIME) {
|
|
p = strstr(json, "\"hour\":");
|
|
if (p) {
|
|
p += 7;
|
|
config.hour = atoi(p);
|
|
}
|
|
|
|
p = strstr(json, "\"minute\":");
|
|
if (p) {
|
|
p += 9;
|
|
config.minute = atoi(p);
|
|
}
|
|
|
|
// Parse days_mask for days_time type
|
|
if (config.type == SCHEDULE_TYPE_DAYS_TIME) {
|
|
p = strstr(json, "\"days_mask\":");
|
|
if (p) {
|
|
p += 12;
|
|
config.days_mask = atoi(p);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Parse duration_ms
|
|
p = strstr(json, "\"duration_ms\":");
|
|
if (p) {
|
|
p += 14;
|
|
config.duration_ms = atoi(p);
|
|
}
|
|
|
|
// Parse speed_percent
|
|
p = strstr(json, "\"speed_percent\":");
|
|
if (p) {
|
|
p += 16;
|
|
config.speed_percent = atoi(p);
|
|
}
|
|
|
|
// Add the schedule
|
|
return scheduler_add_schedule(pump_id, schedule_id, &config);
|
|
}
|
|
|
|
// NVS persistence
|
|
static esp_err_t save_schedule_to_nvs(uint8_t pump_id, uint8_t schedule_id)
|
|
{
|
|
nvs_handle_t nvs_handle;
|
|
esp_err_t ret = nvs_open(SCHEDULER_NVS_NAMESPACE, NVS_READWRITE, &nvs_handle);
|
|
if (ret != ESP_OK) {
|
|
return ret;
|
|
}
|
|
|
|
char key[32];
|
|
snprintf(key, sizeof(key), "sched_%d_%d", pump_id, schedule_id);
|
|
|
|
// Don't save runtime fields
|
|
schedule_config_t config = s_scheduler.schedules[pump_id - 1][schedule_id];
|
|
config.last_run = 0;
|
|
config.next_run = 0;
|
|
|
|
ret = nvs_set_blob(nvs_handle, key, &config, sizeof(schedule_config_t));
|
|
|
|
if (ret == ESP_OK) {
|
|
nvs_commit(nvs_handle);
|
|
}
|
|
|
|
nvs_close(nvs_handle);
|
|
return ret;
|
|
}
|
|
|
|
static esp_err_t load_schedule_from_nvs(uint8_t pump_id, uint8_t schedule_id)
|
|
{
|
|
nvs_handle_t nvs_handle;
|
|
esp_err_t ret = nvs_open(SCHEDULER_NVS_NAMESPACE, NVS_READONLY, &nvs_handle);
|
|
if (ret != ESP_OK) {
|
|
return ret;
|
|
}
|
|
|
|
char key[32];
|
|
snprintf(key, sizeof(key), "sched_%d_%d", pump_id, schedule_id);
|
|
|
|
size_t length = sizeof(schedule_config_t);
|
|
ret = nvs_get_blob(nvs_handle, key, &s_scheduler.schedules[pump_id - 1][schedule_id], &length);
|
|
|
|
nvs_close(nvs_handle);
|
|
|
|
if (ret == ESP_OK) {
|
|
ESP_LOGI(TAG, "Loaded schedule %d for pump %d from NVS", schedule_id, pump_id);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static esp_err_t save_global_settings(void)
|
|
{
|
|
nvs_handle_t nvs_handle;
|
|
esp_err_t ret = nvs_open(SCHEDULER_NVS_NAMESPACE, NVS_READWRITE, &nvs_handle);
|
|
if (ret != ESP_OK) {
|
|
return ret;
|
|
}
|
|
|
|
ret = nvs_set_u8(nvs_handle, "holiday_mode", s_scheduler.holiday_mode ? 1 : 0);
|
|
|
|
if (ret == ESP_OK) {
|
|
nvs_commit(nvs_handle);
|
|
}
|
|
|
|
nvs_close(nvs_handle);
|
|
return ret;
|
|
}
|
|
|
|
static esp_err_t load_global_settings(void)
|
|
{
|
|
nvs_handle_t nvs_handle;
|
|
esp_err_t ret = nvs_open(SCHEDULER_NVS_NAMESPACE, NVS_READONLY, &nvs_handle);
|
|
if (ret != ESP_OK) {
|
|
return ret;
|
|
}
|
|
|
|
uint8_t holiday_mode = 0;
|
|
ret = nvs_get_u8(nvs_handle, "holiday_mode", &holiday_mode);
|
|
if (ret == ESP_OK) {
|
|
s_scheduler.holiday_mode = (holiday_mode != 0);
|
|
}
|
|
|
|
nvs_close(nvs_handle);
|
|
return ret;
|
|
}
|
|
|
|
// Utility functions
|
|
const char* scheduler_get_type_string(schedule_type_t type)
|
|
{
|
|
switch (type) {
|
|
case SCHEDULE_TYPE_DISABLED: return "disabled";
|
|
case SCHEDULE_TYPE_INTERVAL: return "interval";
|
|
case SCHEDULE_TYPE_TIME_OF_DAY: return "time_of_day";
|
|
case SCHEDULE_TYPE_DAYS_TIME: return "days_time";
|
|
default: return "unknown";
|
|
}
|
|
}
|
|
|
|
const char* scheduler_get_days_string(uint8_t days_mask, char *buffer, size_t size)
|
|
{
|
|
if (!buffer || size == 0) {
|
|
return "";
|
|
}
|
|
|
|
buffer[0] = '\0';
|
|
|
|
if (days_mask == SCHEDULE_DAY_ALL) {
|
|
strlcpy(buffer, "Daily", size);
|
|
return buffer;
|
|
}
|
|
|
|
if (days_mask == SCHEDULE_DAY_WEEKDAYS) {
|
|
strlcpy(buffer, "Weekdays", size);
|
|
return buffer;
|
|
}
|
|
|
|
if (days_mask == SCHEDULE_DAY_WEEKEND) {
|
|
strlcpy(buffer, "Weekends", size);
|
|
return buffer;
|
|
}
|
|
|
|
// Build custom day string
|
|
const char *days[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
|
|
bool first = true;
|
|
|
|
for (int i = 0; i < 7; i++) {
|
|
if (days_mask & (1 << i)) {
|
|
if (!first) {
|
|
strlcat(buffer, ",", size);
|
|
}
|
|
strlcat(buffer, days[i], size);
|
|
first = false;
|
|
}
|
|
}
|
|
|
|
return buffer;
|
|
}
|
|
|
|
// Callbacks
|
|
void scheduler_register_trigger_callback(schedule_trigger_callback_t callback)
|
|
{
|
|
s_scheduler.trigger_callback = callback;
|
|
}
|
|
|
|
void scheduler_register_status_callback(schedule_status_callback_t callback)
|
|
{
|
|
s_scheduler.status_callback = callback;
|
|
}
|
|
|
|
// Manual trigger for testing
|
|
esp_err_t scheduler_trigger_schedule(uint8_t pump_id, uint8_t schedule_id)
|
|
{
|
|
if (!s_scheduler.initialized) {
|
|
return ESP_ERR_INVALID_STATE;
|
|
}
|
|
|
|
if (pump_id < 1 || pump_id > SCHEDULER_MAX_PUMPS ||
|
|
schedule_id >= SCHEDULER_MAX_SCHEDULES_PER_PUMP) {
|
|
return ESP_ERR_INVALID_ARG;
|
|
}
|
|
|
|
xSemaphoreTake(s_scheduler.mutex, portMAX_DELAY);
|
|
|
|
schedule_config_t *schedule = &s_scheduler.schedules[pump_id - 1][schedule_id];
|
|
|
|
if (schedule->type == SCHEDULE_TYPE_DISABLED || !schedule->enabled) {
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
return ESP_ERR_INVALID_STATE;
|
|
}
|
|
|
|
ESP_LOGI(TAG, "Manual trigger of schedule %d for pump %d", schedule_id, pump_id);
|
|
|
|
// Call trigger callback
|
|
if (s_scheduler.trigger_callback) {
|
|
s_scheduler.trigger_callback(pump_id, schedule_id,
|
|
schedule->duration_ms,
|
|
schedule->speed_percent);
|
|
}
|
|
|
|
xSemaphoreGive(s_scheduler.mutex);
|
|
|
|
return ESP_OK;
|
|
} |