Compare commits
7 Commits
Author | SHA1 | Date |
---|---|---|
|
a230ad986d | 1 year ago |
|
1c968ff407 | 1 year ago |
|
9a41842870 | 1 year ago |
|
c0686b9aa0 | 1 year ago |
|
3bf3882359 | 1 year ago |
|
0bc835ecc5 | 1 year ago |
|
7b77918996 | 1 year ago |
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
# rsync -ru --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@c220g2-011017.wisc.cloudlab.us:/users/emil/sledge-server/tests/
|
||||
# rsync -ru --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@c220g2-011016.wisc.cloudlab.us:/users/emil/sledge-client/tests/
|
||||
|
||||
# rsync -ru --progress --exclude={'thirdparty','res','err.dat','out*','*.log'} ./tests ./runtime emil@c220g2-011314.wisc.cloudlab.us:/users/emil/sledge-server/
|
||||
# rsync -ru --progress --exclude={'res','err.dat','out*','*.log'} ./tests emil@c220g2-011323.wisc.cloudlab.us:/users/emil/sledge-client/
|
||||
|
||||
rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','input*'} ./tests ./runtime emil@128.105.145.72:/users/emil/sledge-server/
|
||||
rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','mt-juan/input-cnn','mt-emil/input-cnn'} ./tests ./runtime emil@128.105.145.71:/users/emil/sledge-client/
|
||||
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','mt-juan/input-cnn','mt-emil/input-cnn'} ./tests ./runtime emil@128.105.145.70:/users/emil/sledge-client/
|
||||
rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log','input*'} ./tests ./runtime emil@128.105.145.70:/users/emil/sledge-server/
|
||||
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log'} ./tests ./runtime emil@128.105.145.132:/users/emil/sledge-client/
|
||||
|
||||
# If on a network where only 443 is allowed use this (after allowing port forwarding ssh to 443 on the server):
|
||||
# rsync -ru -e 'ssh -p 443' --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@server:/users/emil/sledge-server/tests/
|
||||
# rsync -ru -e 'ssh -p 443' --progress --exclude={'res','out.txt','out.dat','err.dat'} ./tests/* emil@client:/users/emil/sledge-client/tests/
|
||||
|
||||
|
||||
# lab-dell (don't forget to provide the private key in the config file inside .ssh folder)
|
||||
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log'} ./tests ./runtime lab@161.253.75.227:/home/lab/sledge-emil/
|
||||
|
||||
# CMU (don't forget to provide the private key in the config file inside .ssh folder)
|
||||
# rsync -ru --progress --exclude={'thirdparty','res','err.dat','out*','*.log'} ./tests ./runtime gwu@arena0.andrew.cmu.edu:/home/gwu/sledge/
|
||||
|
||||
# esma
|
||||
# rsync -ru --progress --exclude={'thirdparty','res-*','err.dat','out*','*.log'} ./tests emil@161.253.75.224:/home/emil/sledge-client/
|
@ -0,0 +1,75 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "tenant.h"
|
||||
#include "message.h"
|
||||
|
||||
#define DBF_USE_LINKEDLIST
|
||||
// static const bool USING_AGGREGATED_GLOBAL_DBF = true;
|
||||
|
||||
/* Returns pointer back if successful, null otherwise */
|
||||
// extern void *global_dbf;
|
||||
extern void **global_virt_worker_dbfs;
|
||||
extern void *global_worker_dbf;
|
||||
|
||||
struct demand_node {
|
||||
struct ps_list list;
|
||||
uint64_t abs_deadline;
|
||||
uint64_t demand;
|
||||
// uint64_t demand_sum;
|
||||
// struct sandbox_metadata *sandbox_meta;
|
||||
struct tenant *tenant;
|
||||
};
|
||||
|
||||
typedef enum dbf_update_mode
|
||||
{
|
||||
DBF_CHECK_AND_ADD_DEMAND, /* normal mode for adding new sandbox demands */
|
||||
DBF_FORCE_ADD_NEW_SANDBOX_DEMAND, /* work-conservation mode*/
|
||||
DBF_FORCE_ADD_MANUAL_DEMAND, /* work-conservation mode*/
|
||||
DBF_REDUCE_EXISTING_DEMAND, /* normal mode for reducing existing sandbox demands */
|
||||
// DBF_CHECK_EXISTING_SANDBOX_EXTRA_DEMAND, /* special case when a sandbox goes over its expected exec */
|
||||
DBF_DELETE_EXISTING_DEMAND /* normal mode for removing existing sandbox demand */
|
||||
} dbf_update_mode_t;
|
||||
|
||||
typedef int (*dbf_get_worker_idx_fn_t)(void *);
|
||||
typedef uint64_t (*dbf_get_time_of_oversupply_fn_t)(void *);
|
||||
typedef void (*dbf_print_fn_t)(void *, uint64_t);
|
||||
typedef bool (*dbf_try_update_demand_fn_t)(void *, uint64_t, uint64_t, uint64_t, uint64_t, dbf_update_mode_t, void *, struct sandbox_metadata *sandbox_meta);
|
||||
typedef uint64_t (*dbf_get_demand_overgone_its_supply_at_fn_t)(void *, uint64_t, uint64_t, uint64_t);
|
||||
typedef void (*dbf_free_fn_t)(void *);
|
||||
|
||||
struct dbf_config {
|
||||
dbf_get_worker_idx_fn_t get_worker_idx_fn;
|
||||
// dbf_get_max_relative_dl_fn_t get_max_relative_dl_fn;
|
||||
dbf_get_time_of_oversupply_fn_t get_time_of_oversupply_fn;
|
||||
dbf_print_fn_t print_fn;
|
||||
// dbf_grow_fn_t grow_fn;
|
||||
dbf_try_update_demand_fn_t try_update_demand_fn;
|
||||
dbf_get_demand_overgone_its_supply_at_fn_t get_demand_overgone_its_supply_at_fn;
|
||||
dbf_free_fn_t free_fn;
|
||||
};
|
||||
|
||||
int dbf_get_worker_idx(void *);
|
||||
// uint64_t dbf_get_max_relative_dl(void *);
|
||||
uint64_t dbf_get_time_of_oversupply(void *);
|
||||
void dbf_print(void *, uint64_t);
|
||||
// void *dbf_grow(void *, uint64_t);
|
||||
bool dbf_try_update_demand(void *, uint64_t, uint64_t, uint64_t, uint64_t, dbf_update_mode_t, void *, struct sandbox_metadata *sandbox_meta);
|
||||
uint64_t dbf_get_demand_overgone_its_supply_at(void *, uint64_t, uint64_t, uint64_t);
|
||||
void dbf_free(void *);
|
||||
|
||||
void dbf_plug_functions(struct dbf_config *config);
|
||||
|
||||
void *dbf_list_initialize(uint32_t, uint8_t, int, struct tenant *);
|
||||
void *dbf_array_initialize(uint32_t, uint8_t, int, struct tenant *);
|
||||
void *dbf_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant);
|
||||
|
||||
|
||||
bool
|
||||
dbf_list_try_add_new_demand(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t adjustment, struct sandbox_metadata *sm);
|
||||
|
||||
void
|
||||
dbf_list_force_add_extra_slack(void *dbf_raw, struct sandbox_metadata *sm, uint64_t adjustment);
|
||||
|
||||
void
|
||||
dbf_list_reduce_demand(struct sandbox_metadata *sm, uint64_t adjustment, bool delete_node);
|
@ -0,0 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include "global_request_scheduler.h"
|
||||
|
||||
void global_request_scheduler_mtdbf_initialize();
|
@ -0,0 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "module.h"
|
||||
|
||||
void local_runqueue_mtdbf_initialize();
|
||||
|
||||
size_t queue_length();
|
@ -0,0 +1,33 @@
|
||||
#ifndef MESSAGE_H
|
||||
#define MESSAGE_H
|
||||
|
||||
#include "runtime.h"
|
||||
|
||||
typedef enum
|
||||
{
|
||||
MESSAGE_CFW_PULLED_NEW_SANDBOX,
|
||||
MESSAGE_CFW_REDUCE_DEMAND,
|
||||
MESSAGE_CFW_DELETE_SANDBOX, /* normal mode for deleting new sandbox demands */
|
||||
MESSAGE_CFW_EXTRA_DEMAND_REQUEST,
|
||||
MESSAGE_CFW_WRITEBACK_PREEMPTION,
|
||||
MESSAGE_CFW_WRITEBACK_OVERSHOOT,
|
||||
|
||||
MESSAGE_CTW_SHED_CURRENT_JOB
|
||||
} message_type_t;
|
||||
|
||||
struct message {
|
||||
uint64_t sandbox_id;
|
||||
uint64_t adjustment;
|
||||
uint64_t total_running_duration;
|
||||
uint64_t remaining_exec;
|
||||
uint64_t timestamp;
|
||||
struct sandbox *sandbox;
|
||||
struct sandbox_metadata *sandbox_meta;
|
||||
message_type_t message_type;
|
||||
int sender_worker_idx;
|
||||
uint8_t state;
|
||||
bool exceeded_estimation;
|
||||
}; // PAGE_ALIGNED;
|
||||
|
||||
|
||||
#endif /* MESSAGE_H */
|
@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define TRAFFIC_CONTROL
|
||||
// #define LOG_TRAFFIC_CONTROL
|
||||
|
||||
typedef struct tenant tenant; // TODO: Why get circular dependency here?
|
||||
typedef struct sandbox_metadata sandbox_metadata;
|
||||
typedef enum dbf_update_mode dbf_update_mode_t;
|
||||
|
||||
void traffic_control_initialize(void);
|
||||
void traffic_control_log_decision(const int admissions_case_num, const bool admitted);
|
||||
uint64_t traffic_control_decide(struct sandbox_metadata *sandbox_meta, uint64_t start_time, uint64_t estimated_execution, int *denial_code, int *worker_id_v);
|
||||
uint64_t traffic_control_shed_work(struct tenant *tenant_to_exclude, uint64_t time_of_oversupply, int *worker_id_virt_just_shed, bool weak_shed);
|
@ -0,0 +1,335 @@
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include "tenant.h"
|
||||
#include "runtime.h"
|
||||
#include "arch/getcycles.h"
|
||||
#include "math.h"
|
||||
#include "message.h"
|
||||
#include "panic.h"
|
||||
#include "dbf.h"
|
||||
|
||||
struct tenant;
|
||||
|
||||
struct dbf_array {
|
||||
struct tenant *tenant;
|
||||
int worker_idx;
|
||||
// uint32_t idx_oversupply;
|
||||
uint64_t max_relative_deadline;
|
||||
uint64_t base_supply; /* supply amount for time 1 */
|
||||
uint64_t time_of_oversupply;
|
||||
uint64_t max_absolute_deadline;
|
||||
|
||||
uint32_t capacity;
|
||||
uint64_t demands[];
|
||||
};
|
||||
|
||||
static inline int
|
||||
dbf_array_get_worker_idx(void * dbf_raw)
|
||||
{
|
||||
assert(dbf_raw);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
return dbf->worker_idx;
|
||||
}
|
||||
|
||||
/*static inline uint64_t
|
||||
dbf_array_get_max_relative_dl(void * dbf_raw)
|
||||
{
|
||||
assert(dbf_raw);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
return dbf->max_relative_deadline;
|
||||
}*/
|
||||
|
||||
static inline uint64_t
|
||||
dbf_array_get_time_of_oversupply(void * dbf_raw)
|
||||
{
|
||||
assert(dbf_raw);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
return dbf->time_of_oversupply;
|
||||
}
|
||||
|
||||
static void
|
||||
dbf_array_print(void *dbf_raw, uint64_t start_time)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
|
||||
printf("DBF INFO:\n\
|
||||
\t WorkerIDX: \t%d\n\
|
||||
\t Capacity: \t%u\n\
|
||||
\t Max Rel DL: \t%lu\n\
|
||||
\t Max Abs DL (ms): \t%lu\n\
|
||||
\t Basic Supply: \t%lu\n\n",
|
||||
dbf->worker_idx, dbf->capacity, dbf->max_relative_deadline, dbf->max_absolute_deadline/runtime_quantum, dbf->base_supply);
|
||||
|
||||
for (int i = 0; i < dbf->capacity; i++) {
|
||||
if (dbf->demands[i] > 0) printf("demands[%d] = %lu\n", i, dbf->demands[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// static void *
|
||||
// dbf_array_grow(void *dbf_raw, uint64_t new_max_relative_deadline)
|
||||
// {
|
||||
// assert(dbf_raw != NULL);
|
||||
// struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
|
||||
// uint32_t new_capacity = new_max_relative_deadline / runtime_quantum /* * 2 */; // NOT adding 1 for final leftovers
|
||||
|
||||
// struct dbf_array *new_dbf = realloc(dbf, sizeof(struct dbf_array) + sizeof(uint64_t) * new_capacity);
|
||||
// if (new_dbf == NULL) panic("Failed to grow dbf\n");
|
||||
|
||||
// memset(new_dbf->demands, 0, new_capacity * sizeof(uint64_t));
|
||||
|
||||
// new_dbf->capacity = new_capacity;
|
||||
// new_dbf->max_relative_deadline = new_max_relative_deadline;
|
||||
|
||||
// return new_dbf;
|
||||
// }
|
||||
|
||||
/*
|
||||
static bool
|
||||
dbf_array_check_supply_quick(struct dbf_array *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t adjustment)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
// assert(start_time < abs_deadline);
|
||||
if (start_time >= abs_deadline) return true;
|
||||
|
||||
const uint32_t live_deadline_len = ceil((abs_deadline - start_time) / (double)runtime_quantum);
|
||||
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
|
||||
const uint64_t max_supply_at_deadline = live_deadline_len * dbf->base_supply;
|
||||
|
||||
return (dbf->demands[abs_deadline_idx] + adjustment <= max_supply_at_deadline);
|
||||
}
|
||||
*/
|
||||
|
||||
static bool
|
||||
dbf_array_try_update_demand(void *dbf_raw, uint64_t start_time, uint64_t route_relative_deadline,
|
||||
uint64_t abs_deadline, uint64_t adjustment, dbf_update_mode_t dbf_update_mode,
|
||||
void *new_message_raw, struct sandbox_metadata *sandbox_meta)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
assert(start_time < abs_deadline);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
struct message *new_message = (struct message *) new_message_raw;
|
||||
|
||||
if (abs_deadline > dbf->max_absolute_deadline) dbf->max_absolute_deadline = abs_deadline;
|
||||
|
||||
if (adjustment == 0) goto done;
|
||||
|
||||
// const uint32_t max_relative_deadline_len = dbf->max_relative_deadline / runtime_quantum;
|
||||
const uint32_t live_deadline_len = round((abs_deadline - start_time) / (double)runtime_quantum);
|
||||
// const uint32_t live_deadline_len = (abs_deadline - start_time) / runtime_quantum;
|
||||
// const uint32_t live_deadline_len = abs_deadline/runtime_quantum - start_time/runtime_quantum;
|
||||
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
|
||||
// const uint32_t start_time_idx = (start_time / runtime_quantum) % dbf->capacity;
|
||||
|
||||
// if (start_time_idx == abs_deadline_idx) goto done;
|
||||
|
||||
// assert(live_deadline_len <= max_relative_deadline_len);
|
||||
assert(live_deadline_len <= dbf->capacity);
|
||||
|
||||
bool demand_is_below_supply = true;
|
||||
|
||||
for (uint32_t i = abs_deadline_idx, iter = 0; i < abs_deadline_idx + dbf->capacity /*iter< (start_time_idx-abs_deadline_idx+dbf->capacity)%dbf->capacity*/; i++, iter++) {
|
||||
uint32_t circular_i = i % dbf->capacity;
|
||||
|
||||
const uint64_t max_supply_at_time_i = (live_deadline_len + iter) * dbf->base_supply;
|
||||
const uint64_t prev_demand = dbf->demands[circular_i];
|
||||
|
||||
switch (dbf_update_mode) {
|
||||
case DBF_CHECK_AND_ADD_DEMAND:
|
||||
dbf->demands[circular_i] += adjustment;
|
||||
|
||||
if (dbf->demands[circular_i] > max_supply_at_time_i) {
|
||||
/* Undo DBF adding if over supply detected */
|
||||
for (uint32_t j = abs_deadline_idx; j <= i; j++) {
|
||||
dbf->demands[j % dbf->capacity] -= adjustment;
|
||||
}
|
||||
dbf->time_of_oversupply = iter;
|
||||
goto err_demand_over_supply;
|
||||
}
|
||||
break;
|
||||
/*case DBF_CHECK_EXISTING_SANDBOX_EXTRA_DEMAND:
|
||||
if (dbf->demands[circular_i] + adjustment > max_supply_at_time_i) {
|
||||
dbf->time_of_oversupply = iter;
|
||||
goto err_demand_over_supply;
|
||||
}
|
||||
break;*/
|
||||
case DBF_FORCE_ADD_NEW_SANDBOX_DEMAND:
|
||||
/* [Work Conservation Scenario] Only applicable for tenant and global dbf! */
|
||||
assert(dbf->worker_idx < 0);
|
||||
|
||||
dbf->demands[circular_i] += adjustment;
|
||||
assert(prev_demand < dbf->demands[circular_i]);
|
||||
|
||||
if (demand_is_below_supply && dbf->demands[circular_i] > max_supply_at_time_i) {
|
||||
dbf->time_of_oversupply = iter;
|
||||
demand_is_below_supply = false;
|
||||
}
|
||||
break;
|
||||
case DBF_REDUCE_EXISTING_DEMAND:
|
||||
dbf->demands[circular_i] -= adjustment;
|
||||
if (prev_demand < dbf->demands[circular_i]) {
|
||||
printf("DBF_REDUCE_EXISTING_DEMAND\n");
|
||||
printf("Worker ID: %d\n", dbf->worker_idx);
|
||||
// printf("Tenant Reservation: %u\n", new_message->reserv);
|
||||
printf("Sandbox ID: %lu\n", new_message->sandbox_id);
|
||||
// printf("Sandbox Response Code: %u\n", new_message->sandbox_response_code);
|
||||
printf("Basic supply: %lu\n", dbf->base_supply);
|
||||
printf("Cap=%u\n", dbf->capacity);
|
||||
printf("Abs_dest_idx=%u\n", abs_deadline_idx);
|
||||
printf("live_deadline_len=%u\n", live_deadline_len);
|
||||
printf("i=%u, cir_i = %u, iter = %u\n", i, circular_i, iter);
|
||||
printf("max_supply_at_time_i = %lu\n\n", max_supply_at_time_i);
|
||||
printf("Prev_demand[%u]=%lu\n\n", circular_i, prev_demand);
|
||||
printf("demand[%u]=%lu\n\n", circular_i, dbf->demands[circular_i]);
|
||||
// printf("sandbox_state=%u, if_case=%d\n", new_message->state, new_message->if_case);
|
||||
printf("exceeded_estimation=%d\n", new_message->exceeded_estimation);
|
||||
printf("Adjustment=%lu\n", adjustment);
|
||||
// printf("last_exec_duration=%lu, prev_rem_exec=%ld, rem_exec=%ld\n",
|
||||
// new_message->last_exec_dur, new_message->prev_rem_exec,
|
||||
// new_message->remaining_execution);
|
||||
|
||||
dbf_print(dbf, start_time);
|
||||
panic("Interger Underflow -> Tried reducing demand, but it actually went over supply!");
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return demand_is_below_supply;
|
||||
err_demand_over_supply:
|
||||
demand_is_below_supply = false;
|
||||
goto done;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
dbf_array_get_demand_overgone_its_supply_at(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t time_of_oversupply)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
|
||||
time_of_oversupply = abs_deadline + time_of_oversupply*runtime_quantum;
|
||||
|
||||
if (time_of_oversupply > dbf->max_absolute_deadline) {
|
||||
printf("abs: %lu, time_of_oversupply: %lu, dbf_abs: %lu\n", abs_deadline, time_of_oversupply, dbf->max_absolute_deadline);
|
||||
time_of_oversupply = dbf->max_absolute_deadline;
|
||||
}
|
||||
|
||||
assert(start_time < time_of_oversupply);
|
||||
|
||||
// const uint32_t live_deadline_len = (time_of_oversupply - start_time) / runtime_quantum;
|
||||
// const uint32_t live_deadline_len = time_of_oversupply/runtime_quantum - start_time/runtime_quantum;
|
||||
const uint32_t live_deadline_len = round((time_of_oversupply - start_time) / (double)runtime_quantum);
|
||||
const uint32_t abs_deadline_idx = (time_of_oversupply / runtime_quantum) % dbf->capacity;
|
||||
|
||||
uint64_t demand_overgone = 0;
|
||||
uint32_t circular_i = (time_of_oversupply/runtime_quantum) % dbf->capacity;
|
||||
|
||||
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply;
|
||||
const uint64_t curr_demand_at_time_i = dbf->demands[circular_i];
|
||||
|
||||
if (curr_demand_at_time_i > max_supply_at_time_i) {
|
||||
demand_overgone = curr_demand_at_time_i - max_supply_at_time_i;
|
||||
}
|
||||
|
||||
return demand_overgone;
|
||||
}
|
||||
|
||||
/*
|
||||
static uint64_t
|
||||
dbf_array_get_demand_overgone_its_supply_at(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t time_of_oversupply)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
assert(start_time < abs_deadline);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
|
||||
const uint32_t live_deadline_len = ceil((abs_deadline - start_time) / (double)runtime_quantum);
|
||||
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
|
||||
|
||||
uint64_t demand_overgone = 0;
|
||||
uint32_t circular_i = (abs_deadline_idx + time_of_oversupply) % dbf->capacity;
|
||||
|
||||
const uint64_t max_supply_at_time_i = (live_deadline_len + time_of_oversupply) * dbf->base_supply;
|
||||
const uint64_t curr_demand_at_time_i = dbf->demands[circular_i];
|
||||
|
||||
if (curr_demand_at_time_i > max_supply_at_time_i) {
|
||||
demand_overgone = curr_demand_at_time_i - max_supply_at_time_i;
|
||||
}
|
||||
|
||||
return demand_overgone;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
dbf_array_get_demand_overgone_its_supply_at__BAK(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
assert(start_time < abs_deadline);
|
||||
struct dbf_array *dbf = (struct dbf_array *)dbf_raw;
|
||||
|
||||
const uint32_t live_deadline_len = ceil((abs_deadline - start_time) / (double)runtime_quantum);
|
||||
const uint32_t absolute_arrival_idx = start_time / runtime_quantum % dbf->capacity;
|
||||
|
||||
uint64_t demand_overgone = 0;
|
||||
|
||||
const uint32_t abs_deadline_idx = (abs_deadline / runtime_quantum) % dbf->capacity;
|
||||
|
||||
// assert(live_deadline_len<=route_relative_deadline_len);
|
||||
|
||||
for (uint32_t i = abs_deadline_idx, iter = 0; i < abs_deadline_idx + live_deadline_len; i++, iter++) {
|
||||
uint32_t circular_i = i % dbf->capacity;
|
||||
|
||||
const uint64_t max_supply_at_time_i = (live_deadline_len + iter) * dbf->base_supply;
|
||||
const uint64_t curr_demand_at_time_i = dbf->demands[circular_i];
|
||||
|
||||
if (curr_demand_at_time_i > max_supply_at_time_i) {
|
||||
if (curr_demand_at_time_i - max_supply_at_time_i > demand_overgone) {
|
||||
demand_overgone = curr_demand_at_time_i - max_supply_at_time_i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return demand_overgone;
|
||||
}
|
||||
*/
|
||||
static void
|
||||
dbf_array_free(void *dbf)
|
||||
{
|
||||
assert(dbf != NULL);
|
||||
|
||||
free(dbf);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
dbf_array_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant)
|
||||
{
|
||||
struct dbf_config config = {
|
||||
.get_worker_idx_fn = dbf_array_get_worker_idx,
|
||||
// .get_max_relative_dl_fn = dbf_array_get_max_relative_dl,
|
||||
// .get_idx_oversuplly_fn = dbf_array_get_idx_oversuplly,
|
||||
.get_time_of_oversupply_fn = dbf_array_get_time_of_oversupply,
|
||||
.print_fn = dbf_array_print,
|
||||
// .print_supply_fn = dbf_array_print_suply,
|
||||
// .grow_fn = dbf_array_grow,
|
||||
.try_update_demand_fn = dbf_array_try_update_demand,
|
||||
.get_demand_overgone_its_supply_at_fn = dbf_array_get_demand_overgone_its_supply_at,
|
||||
.free_fn = dbf_array_free
|
||||
};
|
||||
|
||||
dbf_plug_functions(&config);
|
||||
|
||||
assert(runtime_max_deadline > 0);
|
||||
|
||||
uint32_t capacity = runtime_max_deadline / runtime_quantum /* * 2 */; // NOT adding 1 for final leftovers
|
||||
struct dbf_array *dbf = (struct dbf_array *)calloc(1, sizeof(struct dbf_array) + sizeof(uint64_t) * capacity);
|
||||
|
||||
dbf->capacity = capacity;
|
||||
dbf->max_relative_deadline = runtime_max_deadline;
|
||||
dbf->worker_idx = worker_idx;
|
||||
// uint32_t cpu_factor = (num_of_workers == 1) ? 1 : num_of_workers * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 100;
|
||||
dbf->base_supply = runtime_quantum * num_of_workers * reservation_percentile * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 10000;
|
||||
|
||||
return dbf;
|
||||
}
|
@ -0,0 +1,80 @@
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include "dbf.h"
|
||||
|
||||
static struct dbf_config dbf_conf;
|
||||
// void *global_dbf_temp;
|
||||
|
||||
int
|
||||
dbf_get_worker_idx(void *dbf)
|
||||
{
|
||||
assert(dbf_conf.get_worker_idx_fn != NULL);
|
||||
return dbf_conf.get_worker_idx_fn(dbf);
|
||||
}
|
||||
|
||||
// uint64_t
|
||||
// dbf_get_max_relative_dl(void *dbf)
|
||||
// {
|
||||
// assert(dbf_conf.get_max_relative_dl_fn != NULL);
|
||||
// return dbf_conf.get_max_relative_dl_fn(dbf);
|
||||
// }
|
||||
|
||||
uint64_t
|
||||
dbf_get_time_of_oversupply(void *dbf)
|
||||
{
|
||||
assert(dbf_conf.get_time_of_oversupply_fn != NULL);
|
||||
return dbf_conf.get_time_of_oversupply_fn(dbf);
|
||||
}
|
||||
|
||||
void
|
||||
dbf_print(void *dbf, uint64_t start_time)
|
||||
{
|
||||
assert(dbf_conf.print_fn != NULL);
|
||||
return dbf_conf.print_fn(dbf, start_time);
|
||||
}
|
||||
|
||||
// void *
|
||||
// dbf_grow(void *dbf, uint64_t new_max_relative_deadline)
|
||||
// {
|
||||
// assert(dbf_conf.grow_fn != NULL);
|
||||
// return dbf_conf.grow_fn(dbf, new_max_relative_deadline);
|
||||
// }
|
||||
|
||||
bool
|
||||
dbf_try_update_demand(void *dbf, uint64_t start_time, uint64_t route_relative_deadline, uint64_t abs_deadline,
|
||||
uint64_t adjustment, dbf_update_mode_t dbf_update_mode, void *new_message, struct sandbox_metadata *sandbox_meta)
|
||||
{
|
||||
assert(dbf_conf.try_update_demand_fn != NULL);
|
||||
return dbf_conf.try_update_demand_fn(dbf, start_time, route_relative_deadline, abs_deadline, adjustment,
|
||||
dbf_update_mode, new_message, sandbox_meta);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
dbf_get_demand_overgone_its_supply_at(void *dbf, uint64_t start_time, uint64_t abs_deadline, uint64_t time_of_oversupply)
|
||||
{
|
||||
assert(dbf_conf.get_demand_overgone_its_supply_at_fn != NULL);
|
||||
return dbf_conf.get_demand_overgone_its_supply_at_fn(dbf, start_time, abs_deadline, time_of_oversupply);
|
||||
}
|
||||
|
||||
void
|
||||
dbf_free(void *dbf)
|
||||
{
|
||||
assert(dbf_conf.free_fn != NULL);
|
||||
return dbf_conf.free_fn(dbf);
|
||||
}
|
||||
|
||||
void
|
||||
dbf_plug_functions(struct dbf_config *config)
|
||||
{
|
||||
memcpy(&dbf_conf, config, sizeof(struct dbf_config));
|
||||
}
|
||||
|
||||
void
|
||||
*dbf_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant)
|
||||
{
|
||||
#ifdef DBF_USE_LINKEDLIST
|
||||
return dbf_list_initialize(num_of_workers, reservation_percentile, worker_idx, tenant);
|
||||
#else
|
||||
return dbf_array_initialize(num_of_workers, reservation_percentile, worker_idx, tenant);
|
||||
#endif
|
||||
}
|
@ -0,0 +1,199 @@
|
||||
#include <assert.h>
|
||||
#include "dbf.h"
|
||||
#include "sandbox_types.h"
|
||||
|
||||
struct dbf_list {
|
||||
struct tenant *tenant;
|
||||
int worker_idx;
|
||||
uint64_t max_relative_deadline;
|
||||
double base_supply; /* supply amount for time 1 */
|
||||
uint64_t time_of_oversupply;
|
||||
uint64_t demand_total;
|
||||
|
||||
struct ps_list_head demands_list;
|
||||
};
|
||||
|
||||
static inline int
|
||||
dbf_list_get_worker_idx(void * dbf_raw)
|
||||
{
|
||||
assert(dbf_raw);
|
||||
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
|
||||
return dbf->worker_idx;
|
||||
}
|
||||
|
||||
/*static inline uint64_t
|
||||
dbf_list_get_max_relative_dl(void * dbf_raw)
|
||||
{
|
||||
assert(dbf_raw);
|
||||
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
|
||||
return dbf->max_relative_deadline;
|
||||
}*/
|
||||
|
||||
static inline uint64_t
|
||||
dbf_list_get_time_of_oversupply(void * dbf_raw)
|
||||
{
|
||||
assert(dbf_raw);
|
||||
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
|
||||
return dbf->time_of_oversupply;
|
||||
}
|
||||
|
||||
static void
|
||||
dbf_list_print(void *dbf_raw, uint64_t start_time)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
|
||||
|
||||
printf("DBF INFO LL:\n\
|
||||
\t WorkerIDX: \t%d\n\
|
||||
\t Basic Supply: \t%lf\n\n", dbf->worker_idx, dbf->base_supply);
|
||||
|
||||
struct demand_node *node = NULL;
|
||||
uint64_t demand_sum = 0;
|
||||
|
||||
ps_list_foreach_d(&dbf->demands_list, node)
|
||||
{
|
||||
const uint32_t live_deadline_len = node->abs_deadline - start_time;
|
||||
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply;
|
||||
demand_sum += node->demand;
|
||||
uint64_t over = 0;
|
||||
if (demand_sum >= max_supply_at_time_i) over = demand_sum - max_supply_at_time_i;
|
||||
printf("demand_at[%lu] = %lu, t=%s, demand_sum=%lu/supply=%lu, demand_over=%lu\n", node->abs_deadline, node->demand, node->tenant->name, demand_sum, max_supply_at_time_i, over);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
dbf_list_try_add_new_demand(void *dbf_raw, uint64_t start_time, uint64_t abs_deadline, uint64_t adjustment, struct sandbox_metadata *sm)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
assert(start_time < abs_deadline);
|
||||
assert(sm);
|
||||
assert(sm->demand_node == NULL);
|
||||
assert(adjustment > 0);
|
||||
// if (adjustment == 0) return false;
|
||||
|
||||
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
|
||||
struct demand_node *node = NULL;
|
||||
uint64_t past_deadline_demand = 0;
|
||||
uint64_t demand_sum = 0;
|
||||
|
||||
ps_list_foreach_d(&dbf->demands_list, node)
|
||||
{
|
||||
if (node->abs_deadline <= start_time) past_deadline_demand = demand_sum;
|
||||
else if (node->abs_deadline >= abs_deadline) break;
|
||||
demand_sum += node->demand;
|
||||
}
|
||||
|
||||
struct demand_node *node_spot = node;
|
||||
assert(abs_deadline != node->abs_deadline);
|
||||
assert(abs_deadline == sm->absolute_deadline);
|
||||
|
||||
demand_sum += adjustment;
|
||||
const uint64_t live_deadline_len = abs_deadline - start_time;
|
||||
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply; // + past_deadline_demand;
|
||||
if (demand_sum > max_supply_at_time_i) {
|
||||
dbf->time_of_oversupply = abs_deadline;
|
||||
goto err;
|
||||
}
|
||||
|
||||
while(!ps_list_is_head_d(&dbf->demands_list, node)) {
|
||||
struct demand_node *tmp_next = ps_list_next_d(node);
|
||||
const uint64_t live_deadline_len = node->abs_deadline - start_time;
|
||||
const uint64_t max_supply_at_time_i = live_deadline_len * dbf->base_supply; // + past_deadline_demand;
|
||||
demand_sum += node->demand;
|
||||
if (demand_sum > max_supply_at_time_i) {
|
||||
dbf->time_of_oversupply = node->abs_deadline;
|
||||
goto err;
|
||||
}
|
||||
node = tmp_next;
|
||||
}
|
||||
|
||||
struct demand_node *new_node = (struct demand_node*) malloc(sizeof(struct demand_node));
|
||||
ps_list_init_d(new_node);
|
||||
new_node->abs_deadline = abs_deadline;
|
||||
new_node->demand = adjustment;
|
||||
new_node->tenant = sm->tenant;
|
||||
// new_node->sandbox_meta = sm;
|
||||
sm->demand_node = new_node;
|
||||
assert(ps_list_singleton_d(new_node));
|
||||
ps_list_append_d(node_spot, new_node);
|
||||
dbf->demand_total = demand_sum + adjustment;
|
||||
return true;
|
||||
err:
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
dbf_list_force_add_extra_slack(void *dbf_raw, struct sandbox_metadata *sm, uint64_t adjustment)
|
||||
{
|
||||
assert(dbf_raw != NULL);
|
||||
assert(sm);
|
||||
assert(sm->demand_node);
|
||||
assert(adjustment > 0);
|
||||
|
||||
struct demand_node *node = sm->demand_node;
|
||||
assert(node->abs_deadline == sm->absolute_deadline);
|
||||
assert(node->demand >= adjustment);
|
||||
node->demand += adjustment;
|
||||
|
||||
struct dbf_list *dbf = (struct dbf_list *)dbf_raw;
|
||||
dbf->demand_total += adjustment;
|
||||
}
|
||||
|
||||
void
|
||||
dbf_list_reduce_demand(struct sandbox_metadata *sm, uint64_t adjustment, bool delete_node)
|
||||
{
|
||||
assert(sm);
|
||||
assert(sm->demand_node);
|
||||
assert(delete_node || adjustment > 0);
|
||||
|
||||
struct demand_node *node = sm->demand_node;
|
||||
assert(node->abs_deadline == sm->absolute_deadline);
|
||||
assert(node->demand >= adjustment);
|
||||
node->demand -= adjustment;
|
||||
|
||||
// assert(dbf->demand_total >= adjustment);
|
||||
// dbf->demand_total -= adjustment;
|
||||
|
||||
if (delete_node) {
|
||||
assert(node->demand == 0);
|
||||
/* Clean up empty and repetitive nodes */
|
||||
ps_list_rem_d(node);
|
||||
free(node);
|
||||
node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
dbf_list_free(void *dbf)
|
||||
{
|
||||
assert(dbf != NULL);
|
||||
|
||||
free(dbf);
|
||||
}
|
||||
|
||||
void *
|
||||
dbf_list_initialize(uint32_t num_of_workers, uint8_t reservation_percentile, int worker_idx, struct tenant *tenant)
|
||||
{
|
||||
struct dbf_config config = {
|
||||
// .try_update_demand_fn = dbf_list_try_add_new_demand,
|
||||
.get_worker_idx_fn = dbf_list_get_worker_idx,
|
||||
.get_time_of_oversupply_fn = dbf_list_get_time_of_oversupply,
|
||||
.print_fn = dbf_list_print,
|
||||
.free_fn = dbf_list_free
|
||||
};
|
||||
|
||||
dbf_plug_functions(&config);
|
||||
|
||||
assert(runtime_max_deadline > 0);
|
||||
|
||||
struct dbf_list *dbf = (struct dbf_list *)calloc(1, sizeof(struct dbf_list));
|
||||
ps_list_head_init(&dbf->demands_list);
|
||||
|
||||
dbf->max_relative_deadline = runtime_max_deadline;
|
||||
dbf->worker_idx = worker_idx;
|
||||
// uint32_t cpu_factor = (num_of_workers == 1) ? 1 : num_of_workers * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 100;
|
||||
dbf->base_supply = /*runtime_quantum * */1.0*num_of_workers * reservation_percentile * RUNTIME_MAX_CPU_UTIL_PERCENTILE / 10000;
|
||||
dbf->tenant = tenant;
|
||||
|
||||
return dbf;
|
||||
}
|
@ -0,0 +1,204 @@
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include "global_request_scheduler.h"
|
||||
#include "listener_thread.h"
|
||||
#include "panic.h"
|
||||
#include "priority_queue.h"
|
||||
#include "runtime.h"
|
||||
#include "tenant_functions.h"
|
||||
#include "sandbox_set_as_error.h"
|
||||
#include "dbf.h"
|
||||
#include "local_cleanup_queue.h"
|
||||
|
||||
struct priority_queue *global_request_scheduler_mtdbf;
|
||||
|
||||
lock_t global_lock;
|
||||
// int max_global_runqueue_len = 0; //////////
|
||||
|
||||
/**
|
||||
* Pushes a sandbox request to the global runqueue
|
||||
* @param sandbox
|
||||
* @returns pointer to request if added. NULL otherwise
|
||||
*/
|
||||
static struct sandbox *
|
||||
global_request_scheduler_mtdbf_add(struct sandbox *sandbox)
|
||||
{
|
||||
assert(sandbox);
|
||||
assert(global_request_scheduler_mtdbf);
|
||||
assert(listener_thread_is_running());
|
||||
|
||||
lock_node_t node = {};
|
||||
lock_lock(&global_lock, &node);
|
||||
|
||||
int rc = priority_queue_enqueue_nolock(global_request_scheduler_mtdbf, sandbox);
|
||||
if (rc != 0) {
|
||||
assert(sandbox->response_code == 0);
|
||||
sandbox->response_code = 4293;
|
||||
sandbox = NULL; // TODO: FIX ME
|
||||
goto done;
|
||||
}
|
||||
|
||||
sandbox->owned_worker_idx = -1;
|
||||
|
||||
// if(priority_queue_length_nolock(global_request_scheduler_mtdbf) > max_global_runqueue_len) {
|
||||
// max_global_runqueue_len = priority_queue_length_nolock(global_request_scheduler_mtdbf);
|
||||
// printf("Global MAX Queue Length: %u\n", max_global_runqueue_len);
|
||||
// }
|
||||
// printf("GlobalLen: %d, Tenant: %s, Tenant-G: %d, Tenant-L: %d\n\n", priority_queue_length_nolock(global_request_scheduler_mtdbf), sandbox->tenant->name,
|
||||
// priority_queue_length_nolock(sandbox->tenant->global_sandbox_metas), priority_queue_length_nolock(sandbox->tenant->local_sandbox_metas));
|
||||
|
||||
done:
|
||||
lock_unlock(&global_lock, &node);
|
||||
return sandbox;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param pointer to the pointer that we want to set to the address of the removed sandbox request
|
||||
* @returns 0 if successful, -ENOENT if empty
|
||||
*/
|
||||
int
|
||||
global_request_scheduler_mtdbf_remove(struct sandbox **removed_sandbox)
|
||||
{
|
||||
/* This function won't be used with the MTDS scheduler. Keeping merely for the polymorhism. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param removed_sandbox pointer to set to removed sandbox request
|
||||
* @param target_deadline the deadline that the request must be earlier than to dequeue
|
||||
* @returns 0 if successful, -ENOENT if empty or if request isn't earlier than target_deadline
|
||||
*/
|
||||
int
|
||||
global_request_scheduler_mtdbf_remove_if_earlier(struct sandbox **removed_sandbox, uint64_t target_deadline)
|
||||
{
|
||||
int rc = -ENOENT;
|
||||
|
||||
const uint64_t now = __getcycles();
|
||||
struct sandbox *local = local_runqueue_get_next();
|
||||
|
||||
uint64_t local_rem = local == NULL ? 0 : local->remaining_exec;
|
||||
|
||||
lock_node_t node = {};
|
||||
lock_lock(&global_lock, &node);
|
||||
|
||||
struct sandbox_metadata global_metadata = global_request_scheduler_peek_metadata();
|
||||
uint64_t global_deadline = global_metadata.absolute_deadline;
|
||||
|
||||
if(USING_EARLIEST_START_FIRST) {
|
||||
if (global_deadline - global_metadata.remaining_exec >= target_deadline - local_rem) goto err_enoent;
|
||||
} else {
|
||||
if (global_deadline >= target_deadline) goto err_enoent;
|
||||
}
|
||||
// if (global_deadline == UINT64_MAX) goto err_enoent;
|
||||
|
||||
/* Spot the sandbox to remove */
|
||||
struct sandbox *top_sandbox = NULL;
|
||||
rc = priority_queue_top_nolock(global_request_scheduler_mtdbf, (void **)&top_sandbox);
|
||||
assert(top_sandbox);
|
||||
assert(top_sandbox->absolute_deadline == global_deadline);
|
||||
assert(top_sandbox->remaining_exec == global_metadata.remaining_exec);
|
||||
assert(top_sandbox->state == SANDBOX_INITIALIZED || top_sandbox->state == SANDBOX_PREEMPTED);
|
||||
assert(top_sandbox->response_code == 0);
|
||||
|
||||
if (top_sandbox->sandbox_meta->terminated) {
|
||||
assert(top_sandbox->sandbox_meta->error_code > 0);
|
||||
top_sandbox->response_code = top_sandbox->sandbox_meta->error_code;
|
||||
} else if (global_deadline < now + (!top_sandbox->exceeded_estimation ? top_sandbox->remaining_exec : 0)) {
|
||||
top_sandbox->response_code = top_sandbox->state == SANDBOX_INITIALIZED ? 4080 : 4082;
|
||||
} else if (USING_LOCAL_RUNQUEUE) {
|
||||
struct tenant *tenant = top_sandbox->tenant;
|
||||
struct route *route = top_sandbox->route;
|
||||
|
||||
// assert(dbf_get_worker_idx(worker_dbf) == worker_thread_idx);
|
||||
// if (!dbf_try_update_demand(worker_dbf, now, route->relative_deadline,
|
||||
// global_deadline, top_sandbox->remaining_exec, DBF_CHECK_AND_ADD_DEMAND, NULL, NULL)) {
|
||||
// goto err_enoent;
|
||||
// }
|
||||
}
|
||||
else if(local) {
|
||||
assert(USING_WRITEBACK_FOR_PREEMPTION);
|
||||
assert(local->state == SANDBOX_INTERRUPTED);
|
||||
assert(local->writeback_preemption_in_progress == false);
|
||||
assert(local->owned_worker_idx >= 0);
|
||||
assert(local->pq_idx_in_runqueue >= 1);
|
||||
local->writeback_preemption_in_progress = true;
|
||||
local_runqueue_delete(local);
|
||||
// local->response_code = 5000;
|
||||
// interrupted_sandbox_exit();
|
||||
}
|
||||
|
||||
top_sandbox->timestamp_of.dispatched = now; // remove the same op from scheduler validate and set_as_runable
|
||||
top_sandbox->owned_worker_idx = -2;
|
||||
// printf("Worker %i accepted a sandbox #%lu!\n", worker_thread_idx, top_sandbox->id);
|
||||
|
||||
rc = priority_queue_dequeue_nolock(global_request_scheduler_mtdbf, (void **)removed_sandbox);
|
||||
assert(rc == 0);
|
||||
assert(*removed_sandbox == top_sandbox);
|
||||
|
||||
assert(top_sandbox->state == SANDBOX_INITIALIZED || top_sandbox->state == SANDBOX_PREEMPTED);
|
||||
|
||||
lock_unlock(&global_lock, &node);
|
||||
|
||||
done:
|
||||
return rc;
|
||||
err_enoent:
|
||||
lock_unlock(&global_lock, &node);
|
||||
rc = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param removed_sandbox pointer to set to removed sandbox request
|
||||
* @param target_deadline the deadline that the request must be earlier than to dequeue
|
||||
* @param mt_class the multi-tenancy class of the global request to compare the target deadline against
|
||||
* @returns 0 if successful, -ENOENT if empty or if request isn't earlier than target_deadline
|
||||
*/
|
||||
int
|
||||
global_request_scheduler_mtdbf_remove_with_mt_class(struct sandbox **removed_sandbox, uint64_t target_deadline,
|
||||
enum MULTI_TENANCY_CLASS target_mt_class)
|
||||
{
|
||||
/* This function won't be used with the MTDBF scheduler. Keeping merely for the polymorhism. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Peek at the priority of the highest priority task without having to take the lock
|
||||
* Because this is a min-heap PQ, the highest priority is the lowest 64-bit integer
|
||||
* This is used to store an absolute deadline
|
||||
* @returns value of highest priority value in queue or ULONG_MAX if empty
|
||||
*/
|
||||
static uint64_t
|
||||
global_request_scheduler_mtdbf_peek(void)
|
||||
{
|
||||
return priority_queue_peek(global_request_scheduler_mtdbf);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Initializes the variant and registers against the polymorphic interface
|
||||
*/
|
||||
void
|
||||
global_request_scheduler_mtdbf_initialize()
|
||||
{
|
||||
global_request_scheduler_mtdbf = priority_queue_initialize_new(RUNTIME_RUNQUEUE_SIZE, false, USING_EARLIEST_START_FIRST ? sandbox_get_priority_global : sandbox_get_priority,
|
||||
global_request_scheduler_update_highest_priority,
|
||||
sandbox_update_pq_idx_in_runqueue);
|
||||
|
||||
lock_init(&global_lock);
|
||||
|
||||
struct global_request_scheduler_config config = {
|
||||
.add_fn = global_request_scheduler_mtdbf_add,
|
||||
.remove_fn = global_request_scheduler_mtdbf_remove,
|
||||
.remove_if_earlier_fn = global_request_scheduler_mtdbf_remove_if_earlier,
|
||||
.peek_fn = global_request_scheduler_mtdbf_peek
|
||||
};
|
||||
|
||||
global_request_scheduler_initialize(&config);
|
||||
}
|
||||
|
||||
void
|
||||
global_request_scheduler_mtdbf_free()
|
||||
{
|
||||
priority_queue_free(global_request_scheduler_mtdbf);
|
||||
}
|
@ -0,0 +1,124 @@
|
||||
#include <stdint.h>
|
||||
#include <threads.h>
|
||||
|
||||
#include "arch/context.h"
|
||||
#include "current_sandbox.h"
|
||||
#include "debuglog.h"
|
||||
#include "global_request_scheduler.h"
|
||||
#include "local_runqueue.h"
|
||||
#include "local_runqueue_mtdbf.h"
|
||||
#include "panic.h"
|
||||
#include "priority_queue.h"
|
||||
#include "sandbox_functions.h"
|
||||
#include "runtime.h"
|
||||
#include "dbf.h"
|
||||
|
||||
thread_local struct priority_queue *local_runqueue_mtdbf;
|
||||
// thread_local struct priority_queue *local_default_queue;
|
||||
|
||||
thread_local static int max_local_runqueue_len = 0; //////////
|
||||
|
||||
/**
|
||||
* Checks if the run queue is empty
|
||||
* @returns true if empty. false otherwise
|
||||
*/
|
||||
bool
|
||||
local_runqueue_mtdbf_is_empty()
|
||||
{
|
||||
return priority_queue_length_nolock(local_runqueue_mtdbf) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a sandbox to the run queue
|
||||
* @param sandbox
|
||||
* @returns pointer to sandbox added
|
||||
*/
|
||||
void
|
||||
local_runqueue_mtdbf_add(struct sandbox *sandbox)
|
||||
{
|
||||
assert(sandbox != NULL);
|
||||
|
||||
int rc = priority_queue_enqueue_nolock(local_runqueue_mtdbf, sandbox);
|
||||
if (unlikely(rc == -ENOSPC)) {
|
||||
struct priority_queue *temp = priority_queue_grow_nolock(local_runqueue_mtdbf);
|
||||
if (unlikely(temp == NULL)) panic("Failed to grow local runqueue\n");
|
||||
local_runqueue_mtdbf = temp;
|
||||
rc = priority_queue_enqueue_nolock(local_runqueue_mtdbf, sandbox);
|
||||
if (unlikely(rc == -ENOSPC)) panic("Thread Runqueue is full!\n");
|
||||
}
|
||||
|
||||
// if (sandbox->global_queue_type == 2) {
|
||||
// rc = priority_queue_enqueue_nolock(local_default_queue, sandbox);
|
||||
// assert(rc == 0);
|
||||
// }
|
||||
|
||||
sandbox->owned_worker_idx = worker_thread_idx;
|
||||
|
||||
if(priority_queue_length_nolock(local_runqueue_mtdbf) > max_local_runqueue_len) {
|
||||
max_local_runqueue_len = priority_queue_length_nolock(local_runqueue_mtdbf);
|
||||
debuglog("Local MAX Queue Length: %u", max_local_runqueue_len);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a sandbox from the runqueue
|
||||
* @param sandbox to delete
|
||||
*/
|
||||
static void
|
||||
local_runqueue_mtdbf_delete(struct sandbox *sandbox)
|
||||
{
|
||||
assert(sandbox != NULL);
|
||||
|
||||
priority_queue_delete_by_idx_nolock(local_runqueue_mtdbf, sandbox, sandbox->pq_idx_in_runqueue);
|
||||
sandbox->owned_worker_idx = -2;
|
||||
// if (sandbox->pq_idx_in_default_queue >= 1) {
|
||||
// assert(sandbox->global_queue_type == 2 );
|
||||
// priority_queue_delete_by_idx_nolock(local_default_queue, sandbox, sandbox->pq_idx_in_default_queue);
|
||||
// }
|
||||
}
|
||||
|
||||
/**
|
||||
* This function determines the next sandbox to run.
|
||||
* This is the head of the runqueue
|
||||
*
|
||||
* Execute the sandbox at the head of the thread local runqueue
|
||||
* @return the sandbox to execute or NULL if none are available
|
||||
*/
|
||||
struct sandbox *
|
||||
local_runqueue_mtdbf_get_next()
|
||||
{
|
||||
/* Get the deadline of the sandbox at the head of the local request queue */
|
||||
struct sandbox *next = NULL;
|
||||
int rc = priority_queue_top_nolock(local_runqueue_mtdbf, (void **)&next);
|
||||
|
||||
if (rc == -ENOENT) return NULL;
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
// static inline void
|
||||
// sandbox_update_pq_idx_in_default_queue(void *element, size_t idx)
|
||||
// {
|
||||
// assert(element);
|
||||
// struct sandbox *sandbox = (struct sandbox *)element;
|
||||
// sandbox->pq_idx_in_default_queue = idx;
|
||||
// }
|
||||
|
||||
/**
|
||||
* Registers the PS variant with the polymorphic interface
|
||||
*/
|
||||
void
|
||||
local_runqueue_mtdbf_initialize()
|
||||
{
|
||||
/* Initialize local state */
|
||||
local_runqueue_mtdbf = priority_queue_initialize_new(RUNTIME_RUNQUEUE_SIZE, false, sandbox_get_priority_global, NULL,
|
||||
sandbox_update_pq_idx_in_runqueue);
|
||||
|
||||
/* Register Function Pointers for Abstract Scheduling API */
|
||||
struct local_runqueue_config config = { .add_fn = local_runqueue_mtdbf_add,
|
||||
.is_empty_fn = local_runqueue_mtdbf_is_empty,
|
||||
.delete_fn = local_runqueue_mtdbf_delete,
|
||||
.get_next_fn = local_runqueue_mtdbf_get_next };
|
||||
|
||||
local_runqueue_initialize(&config);
|
||||
}
|
@ -1,3 +1,172 @@
|
||||
#include "scheduler.h"
|
||||
|
||||
enum SCHEDULER scheduler = SCHEDULER_EDF;
|
||||
|
||||
void
|
||||
sandbox_process_scheduler_updates(struct sandbox *sandbox)
|
||||
{
|
||||
if (scheduler == SCHEDULER_MTDS && tenant_is_paid(sandbox->tenant)) {
|
||||
atomic_fetch_sub(&sandbox->tenant->remaining_budget, sandbox->last_running_state_duration);
|
||||
sandbox->last_running_state_duration = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef TRAFFIC_CONTROL
|
||||
assert(sandbox->sandbox_meta);
|
||||
assert(sandbox == sandbox->sandbox_meta->sandbox_shadow);
|
||||
assert(sandbox->id == sandbox->sandbox_meta->id);
|
||||
|
||||
struct comm_with_worker *cfw = &comm_from_workers[worker_thread_idx];
|
||||
assert(cfw);
|
||||
|
||||
const uint64_t now = __getcycles();
|
||||
|
||||
struct message new_message = {
|
||||
.sandbox = sandbox,
|
||||
.sandbox_id = sandbox->id,
|
||||
.sandbox_meta = sandbox->sandbox_meta,
|
||||
.state = sandbox->state,
|
||||
.sender_worker_idx = worker_thread_idx,
|
||||
.exceeded_estimation = sandbox->exceeded_estimation,
|
||||
.total_running_duration = 0,
|
||||
.timestamp = now
|
||||
};
|
||||
|
||||
if (sandbox->state == SANDBOX_RETURNED || sandbox->state == SANDBOX_ERROR) {
|
||||
uint64_t adjustment = sandbox->last_running_state_duration;
|
||||
if (sandbox->remaining_exec < adjustment) adjustment = sandbox->remaining_exec;
|
||||
// const uint64_t adjustment = sandbox->remaining_exec;
|
||||
|
||||
if (USING_LOCAL_RUNQUEUE && adjustment > 0 && sandbox->response_code == 0) {
|
||||
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
|
||||
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
|
||||
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
|
||||
}
|
||||
|
||||
// sandbox->remaining_exec = 0;
|
||||
|
||||
new_message.message_type = MESSAGE_CFW_DELETE_SANDBOX;
|
||||
new_message.adjustment = adjustment;
|
||||
// new_message.remaining_exec = 0;
|
||||
new_message.remaining_exec = sandbox->remaining_exec;
|
||||
new_message.total_running_duration = sandbox->duration_of_state[SANDBOX_RUNNING_USER] + sandbox->duration_of_state[SANDBOX_RUNNING_SYS];
|
||||
|
||||
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
|
||||
panic("Ring The buffer was full and the enqueue operation has failed.!");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unless the sandbox is in the terminal state (handled above), then the only state it can be is INTERRUPTED */
|
||||
assert(sandbox->state == SANDBOX_INTERRUPTED);
|
||||
assert(sandbox == current_sandbox_get());
|
||||
assert(sandbox->response_code == 0);
|
||||
assert(sandbox->remaining_exec > 0);
|
||||
assert(!sandbox->exceeded_estimation || sandbox->remaining_exec == runtime_quantum);
|
||||
|
||||
if (sandbox->sandbox_meta->terminated) {
|
||||
assert(sandbox->sandbox_meta->error_code > 0);
|
||||
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
|
||||
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
|
||||
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
|
||||
sandbox->response_code = sandbox->sandbox_meta->error_code;
|
||||
interrupted_sandbox_exit();
|
||||
return;
|
||||
}
|
||||
|
||||
if (sandbox->absolute_deadline < now + (!sandbox->exceeded_estimation ? sandbox->remaining_exec : 0)) {
|
||||
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
|
||||
// sandbox->route->relative_deadline, sandbox->absolute_deadline, sandbox->remaining_exec,
|
||||
// DBF_DELETE_EXISTING_DEMAND, NULL, NULL);
|
||||
sandbox->response_code = 4081;
|
||||
interrupted_sandbox_exit();
|
||||
return;
|
||||
}
|
||||
|
||||
dbf_update_mode_t dbf_reduce_mode = DBF_REDUCE_EXISTING_DEMAND;
|
||||
uint64_t adjustment = sandbox->last_running_state_duration;
|
||||
if (sandbox->remaining_exec < sandbox->last_running_state_duration || sandbox->exceeded_estimation) {
|
||||
/* To avoid less than quantum updates manually set the adjustment to quantum */
|
||||
adjustment = sandbox->remaining_exec;
|
||||
dbf_reduce_mode = DBF_DELETE_EXISTING_DEMAND;
|
||||
}
|
||||
|
||||
sandbox->last_running_state_duration = 0;
|
||||
sandbox->remaining_exec -= adjustment;
|
||||
|
||||
new_message.adjustment = adjustment;
|
||||
new_message.message_type = MESSAGE_CFW_REDUCE_DEMAND;
|
||||
new_message.remaining_exec = sandbox->remaining_exec;
|
||||
|
||||
if (USING_LOCAL_RUNQUEUE /* && !sandbox->exceeded_estimation */) {
|
||||
// dbf_try_update_demand(worker_dbf, sandbox->timestamp_of.dispatched,
|
||||
// sandbox->route->relative_deadline, sandbox->absolute_deadline, adjustment,
|
||||
// dbf_reduce_mode, NULL, NULL);
|
||||
}
|
||||
|
||||
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
|
||||
panic("Ring The buffer was full and the enqueue operation has failed.!")
|
||||
}
|
||||
|
||||
if (sandbox->remaining_exec == 0) {
|
||||
/* OVERSHOOT case! */
|
||||
// printf("Went over estimation - sandbox_id=%lu of %s!\n", sandbox->id, sandbox->tenant->name);
|
||||
if (sandbox->exceeded_estimation == false) sandbox->tenant->num_of_overshooted_sandboxes++;
|
||||
sandbox->exceeded_estimation = true;
|
||||
sandbox->num_of_overshoots++;
|
||||
if (sandbox->num_of_overshoots > sandbox->tenant->max_overshoot_of_same_sandbox) {
|
||||
sandbox->tenant->max_overshoot_of_same_sandbox = sandbox->num_of_overshoots;
|
||||
}
|
||||
|
||||
const uint64_t extra_demand = runtime_quantum;
|
||||
|
||||
if (USING_LOCAL_RUNQUEUE && USING_TRY_LOCAL_EXTRA
|
||||
/*&& dbf_try_update_demand(worker_dbf, now, sandbox->route->relative_deadline,
|
||||
sandbox->absolute_deadline, extra_demand, DBF_CHECK_AND_ADD_DEMAND, &new_message, NULL)*/
|
||||
|| (!USING_LOCAL_RUNQUEUE && USING_TRY_LOCAL_EXTRA)) {
|
||||
/* Worker DBF has supply left */
|
||||
// printf("Worker %d granted extra for sandbox %lu!\n", worker_thread_idx, sandbox->id);
|
||||
|
||||
sandbox->remaining_exec = extra_demand;
|
||||
|
||||
new_message.adjustment = extra_demand;
|
||||
new_message.exceeded_estimation = true;
|
||||
new_message.message_type = MESSAGE_CFW_EXTRA_DEMAND_REQUEST;
|
||||
new_message.remaining_exec = extra_demand;
|
||||
|
||||
if (!ck_ring_enqueue_spsc_message(&cfw->worker_ring, cfw->worker_ring_buffer, &new_message)) {
|
||||
panic("Ring The buffer was full and the enqueue operation has failed.!")
|
||||
}
|
||||
|
||||
return;
|
||||
} else if (USING_WRITEBACK_FOR_OVERSHOOT) {
|
||||
/* Write back */
|
||||
// printf("No supply left in worker #%d. So, writeback sandbox=%lu of %s\n", worker_thread_idx, sandbox->id, sandbox->tenant->name);
|
||||
|
||||
sandbox->remaining_exec = 0;
|
||||
sandbox->writeback_overshoot_in_progress= true;
|
||||
local_runqueue_delete(sandbox); // TODO: This needs to go in preemp_sandbox state change!
|
||||
return;
|
||||
} else {
|
||||
/* Kill work */
|
||||
// printf("No supply left in worker #%d. So, kill sandbox=%lu of %s\n", worker_thread_idx, sandbox->id, sandbox->tenant->name);
|
||||
|
||||
assert(sandbox->response_code == 0);
|
||||
sandbox->response_code = 4093;
|
||||
interrupted_sandbox_exit();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
if (sandbox->remaining_exec > sandbox->last_running_state_duration) {
|
||||
sandbox->remaining_exec -= sandbox->last_running_state_duration;
|
||||
} else {
|
||||
sandbox->remaining_exec = 0;
|
||||
}
|
||||
sandbox->last_running_state_duration = 0;
|
||||
|
||||
#endif
|
||||
}
|
||||
|
@ -0,0 +1,254 @@
|
||||
#include <unistd.h>
|
||||
|
||||
#include "traffic_control.h"
|
||||
#include "debuglog.h"
|
||||
#include "global_request_scheduler_mtdbf.h"
|
||||
#include "tenant_functions.h"
|
||||
#include "sandbox_set_as_error.h"
|
||||
#include "dbf.h"
|
||||
|
||||
#ifdef TRAFFIC_CONTROL
|
||||
// void *global_dbf;
|
||||
void **global_virt_worker_dbfs;
|
||||
void *global_worker_dbf; // temp ///////////
|
||||
|
||||
extern struct priority_queue *global_request_scheduler_mtdbf;//, *global_default;
|
||||
extern lock_t global_lock;
|
||||
|
||||
void
|
||||
traffic_control_initialize()
|
||||
{
|
||||
assert(runtime_max_deadline > 0);
|
||||
|
||||
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
|
||||
global_virt_worker_dbfs = malloc(N_VIRT_WORKERS_DBF * sizeof(void*));
|
||||
for (int i = 0; i < N_VIRT_WORKERS_DBF; i++) {
|
||||
global_virt_worker_dbfs[i] = dbf_initialize(runtime_worker_threads_count/N_VIRT_WORKERS_DBF, 100, -1, NULL);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
traffic_control_log_decision(const int num, const bool admitted)
|
||||
{
|
||||
#ifdef LOG_TRAFFIC_CONTROL
|
||||
debuglog("Admission case #: %d, Admitted? %s\n", num, admitted ? "yes" : "no");
|
||||
#endif /* LOG_TRAFFIC_CONTROL */
|
||||
}
|
||||
int ind = 0;
|
||||
int
|
||||
global_virt_worker_dbfs_try_update_demand(uint64_t start_time, uint64_t adjustment, uint64_t *time_oversupply_p, struct sandbox_metadata *sm)
|
||||
{
|
||||
bool global_can_admit = false;
|
||||
uint64_t time_oversupply = 0;
|
||||
const uint64_t absolute_deadline = sm->absolute_deadline;
|
||||
const int N_VIRT_WORKERS_DBF = USING_AGGREGATED_GLOBAL_DBF ? 1 : runtime_worker_threads_count;
|
||||
|
||||
/* Hack the start time to make sure demand less than the quantum is also served */
|
||||
if((absolute_deadline - start_time) * N_VIRT_WORKERS_DBF < runtime_quantum) start_time = absolute_deadline - runtime_quantum;
|
||||
|
||||
for (int i = ind; i < (N_VIRT_WORKERS_DBF) + ind; i++) {
|
||||
assert(global_virt_worker_dbfs);
|
||||
void *global_dbf = global_virt_worker_dbfs[i%N_VIRT_WORKERS_DBF];
|
||||
global_can_admit = dbf_list_try_add_new_demand(global_dbf, start_time, absolute_deadline, adjustment, sm);
|
||||
if (global_can_admit) {
|
||||
ind = (i+1)%N_VIRT_WORKERS_DBF;
|
||||
return i%N_VIRT_WORKERS_DBF;
|
||||
}
|
||||
|
||||
if (time_oversupply < dbf_get_time_of_oversupply(global_dbf)) time_oversupply = dbf_get_time_of_oversupply(global_dbf);
|
||||
}
|
||||
|
||||
*time_oversupply_p = time_oversupply;
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint64_t
|
||||
traffic_control_decide(struct sandbox_metadata *sandbox_meta, const uint64_t start_time, const uint64_t estimated_execution, int *ret_code, int *worker_id_virtual)
|
||||
{
|
||||
/* Nominal non-zero value in case traffic control is disabled */
|
||||
uint64_t work_admitted = estimated_execution;
|
||||
|
||||
int rc = 0;
|
||||
int worker_id_v = -1;
|
||||
|
||||
assert(sandbox_meta);
|
||||
struct tenant *tenant = sandbox_meta->tenant;
|
||||
const uint64_t absolute_deadline = sandbox_meta->absolute_deadline;
|
||||
|
||||
uint64_t time_global_oversupply = 0;
|
||||
worker_id_v = global_virt_worker_dbfs_try_update_demand(start_time, estimated_execution, &time_global_oversupply, sandbox_meta);
|
||||
bool global_can_admit = worker_id_v >= 0;
|
||||
|
||||
// bool tenant_can_admit = tenant_try_add_job(tenant, start_time, estimated_execution, TRS_CHECK_GUARANTEED, sandbox_meta);
|
||||
bool tenant_can_admit = tenant_can_admit_guaranteed(tenant, start_time, estimated_execution);
|
||||
|
||||
if (tenant_can_admit && global_can_admit) {
|
||||
/* Case #1: Both the tenant and overall system is under utlized. So, just admit. */
|
||||
tenant_can_admit = tenant_try_add_job_as_guaranteed(tenant, start_time, estimated_execution, sandbox_meta);
|
||||
assert(tenant_can_admit);
|
||||
traffic_control_log_decision(1, true);
|
||||
rc = 1;
|
||||
} else if (!tenant_can_admit && global_can_admit) {
|
||||
/* Case #2: Tenant is over utilized, but system is under utilized. So, admit for work-conservation. */
|
||||
if (USING_WORK_CONSERVATION == false) {
|
||||
traffic_control_log_decision(2, false);
|
||||
dbf_try_update_demand(global_virt_worker_dbfs[worker_id_v], start_time,
|
||||
0, absolute_deadline, estimated_execution,
|
||||
DBF_DELETE_EXISTING_DEMAND, NULL, sandbox_meta);
|
||||
goto any_work_not_admitted;
|
||||
}
|
||||
|
||||
traffic_control_log_decision(2, true);
|
||||
rc = 2;
|
||||
} else if (tenant_can_admit && !global_can_admit) {
|
||||
/* Case #3: Tenant is under utilized, but system is over utilized. So, shed work and then admit. */
|
||||
assert(time_global_oversupply >= absolute_deadline);
|
||||
|
||||
int worker_id_virt_just_shed;
|
||||
while (!global_can_admit) {
|
||||
assert(worker_id_v < 0);
|
||||
|
||||
worker_id_virt_just_shed = -1;
|
||||
uint64_t cleared_demand = traffic_control_shed_work(tenant, time_global_oversupply, &worker_id_virt_just_shed, false);
|
||||
if (cleared_demand == 0) {
|
||||
/* No "bad" tenant requests left in the global queue, so we have deny the guaranteed tenant job. */
|
||||
traffic_control_log_decision(3, false);
|
||||
goto guaranteed_work_not_admitted;
|
||||
}
|
||||
|
||||
assert(worker_id_virt_just_shed >= 0);
|
||||
void *global_dbf = global_virt_worker_dbfs[worker_id_virt_just_shed];
|
||||
global_can_admit = dbf_list_try_add_new_demand(global_dbf, start_time, absolute_deadline, estimated_execution, sandbox_meta);
|
||||
time_global_oversupply = dbf_get_time_of_oversupply(global_dbf);
|
||||
}
|
||||
|
||||
worker_id_v = worker_id_virt_just_shed;
|
||||
tenant_can_admit = tenant_try_add_job_as_guaranteed(tenant, start_time, estimated_execution, sandbox_meta);
|
||||
assert(tenant_can_admit);
|
||||
traffic_control_log_decision(3, true);
|
||||
rc = 1;
|
||||
} else if (!tenant_can_admit && !global_can_admit) {
|
||||
/* Case #4: Do NOT admit. */
|
||||
|
||||
// printf("Case #4: Do NOT admit.\n");
|
||||
// traffic_control_log_decision(4, false);
|
||||
// goto any_work_not_admitted;
|
||||
|
||||
// assert(time_global_oversupply >= absolute_deadline);
|
||||
|
||||
int worker_id_virt_just_shed;
|
||||
while (!global_can_admit) {
|
||||
assert(worker_id_v < 0);
|
||||
|
||||
worker_id_virt_just_shed = -1;
|
||||
|
||||
uint64_t cleared_demand = traffic_control_shed_work(tenant, time_global_oversupply, &worker_id_virt_just_shed, true);
|
||||
if (cleared_demand == 0) {
|
||||
/* No "bad" tenant requests left in the global queue, so we have deny this new job. */
|
||||
traffic_control_log_decision(4, false);
|
||||
goto any_work_not_admitted;
|
||||
}
|
||||
|
||||
assert(worker_id_virt_just_shed >= 0);
|
||||
void *global_dbf = global_virt_worker_dbfs[worker_id_virt_just_shed];
|
||||
global_can_admit = dbf_list_try_add_new_demand(global_dbf, start_time, absolute_deadline, estimated_execution, sandbox_meta);
|
||||
time_global_oversupply = dbf_get_time_of_oversupply(global_dbf);
|
||||
}
|
||||
// printf("Case #4: Do admit %s.\n", tenant->name);
|
||||
assert (global_can_admit);
|
||||
worker_id_v = worker_id_virt_just_shed;
|
||||
rc = 2;
|
||||
}
|
||||
|
||||
done:
|
||||
*ret_code = rc;
|
||||
*worker_id_virtual = worker_id_v;
|
||||
return work_admitted;
|
||||
any_work_not_admitted:
|
||||
work_admitted = 0;
|
||||
rc = sandbox_meta->exceeded_estimation ? 4295 : 4290;
|
||||
goto done;
|
||||
guaranteed_work_not_admitted:
|
||||
work_admitted = 0;
|
||||
rc = sandbox_meta->exceeded_estimation ? 4296: 4291;
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
||||
uint64_t traffic_control_shed_work(struct tenant *tenant_to_exclude, uint64_t time_of_oversupply, int *worker_id_virt_just_shed, bool weak_shed)
|
||||
{
|
||||
uint64_t cleared_demand = 0;
|
||||
*worker_id_virt_just_shed = -1;
|
||||
struct sandbox_metadata *sandbox_meta = NULL;
|
||||
|
||||
struct tenant *tenant_to_punish = tenant_database_find_tenant_most_oversupply(tenant_to_exclude, time_of_oversupply, weak_shed, &sandbox_meta);
|
||||
if (tenant_to_punish == NULL) {
|
||||
// printf("null\n");
|
||||
assert (sandbox_meta == NULL);
|
||||
goto done;
|
||||
}
|
||||
if (tenant_to_punish == tenant_to_exclude) {
|
||||
// printf("itself\n");
|
||||
// TODO: Should be able to kill from itself???
|
||||
goto done;
|
||||
}
|
||||
|
||||
assert(sandbox_meta);
|
||||
assert(sandbox_meta->tenant == tenant_to_punish);
|
||||
assert(sandbox_meta->absolute_deadline <= time_of_oversupply);
|
||||
assert(sandbox_meta->terminated == false);
|
||||
assert(sandbox_meta->error_code == 0);
|
||||
|
||||
if (sandbox_meta->state == SANDBOX_INITIALIZED) {
|
||||
assert(sandbox_meta->tenant_queue == tenant_to_punish->global_sandbox_metas);
|
||||
sandbox_meta->error_code = 4090;
|
||||
assert(sandbox_meta->owned_worker_idx == -2);
|
||||
} else {
|
||||
assert(sandbox_meta->tenant_queue == tenant_to_punish->local_sandbox_metas);
|
||||
sandbox_meta->error_code = 4091;
|
||||
|
||||
struct message new_message = { 0 };
|
||||
if (sandbox_meta->owned_worker_idx >= 0 && sandbox_refs[sandbox_meta->id % RUNTIME_MAX_ALIVE_SANDBOXES]) {
|
||||
assert(comm_to_workers);
|
||||
struct comm_with_worker *ctw = &comm_to_workers[sandbox_meta->owned_worker_idx];
|
||||
assert(ctw);
|
||||
assert(ctw->worker_idx == sandbox_meta->owned_worker_idx);
|
||||
assert(ck_ring_size(&ctw->worker_ring) < LISTENER_THREAD_RING_SIZE);
|
||||
|
||||
new_message.sandbox_meta = sandbox_meta;
|
||||
new_message.sandbox = sandbox_meta->sandbox_shadow;
|
||||
new_message.sandbox_id = sandbox_meta->id;
|
||||
new_message.message_type = MESSAGE_CTW_SHED_CURRENT_JOB;
|
||||
|
||||
if (!ck_ring_enqueue_spsc_message(&ctw->worker_ring, ctw->worker_ring_buffer, &new_message)) {
|
||||
panic("Ring buffer was full and the enqueue failed!")
|
||||
}
|
||||
pthread_kill(runtime_worker_threads[sandbox_meta->owned_worker_idx], SIGALRM);
|
||||
}
|
||||
}
|
||||
|
||||
struct sandbox_metadata *sm_to_remove = NULL;
|
||||
int rc = priority_queue_dequeue_nolock(sandbox_meta->tenant_queue, (void **)&sm_to_remove);
|
||||
assert(rc == 0);
|
||||
assert(sandbox_meta == sm_to_remove);
|
||||
|
||||
assert(sandbox_meta->trs_job_node == NULL);
|
||||
assert(sandbox_meta->remaining_exec > 0);
|
||||
assert(sandbox_meta->global_queue_type == 2);
|
||||
assert(sandbox_meta->worker_id_virt>=0);
|
||||
|
||||
void *global_dbf = global_virt_worker_dbfs[sandbox_meta->worker_id_virt];
|
||||
dbf_list_reduce_demand(sandbox_meta, sandbox_meta->remaining_exec + sandbox_meta->extra_slack, true);
|
||||
sandbox_meta->demand_node = NULL;
|
||||
|
||||
cleared_demand = sandbox_meta->remaining_exec;
|
||||
// sandbox_meta->remaining_exec = 0;
|
||||
// sandbox_meta->extra_slack = 0;
|
||||
*worker_id_virt_just_shed = sandbox_meta->worker_id_virt;
|
||||
sandbox_meta->terminated = true;
|
||||
done:
|
||||
return cleared_demand;
|
||||
}
|
||||
|
||||
#endif /* TRAFFIC_CONTROL */
|
@ -0,0 +1,4 @@
|
||||
SLEDGE_SCHEDULER=MTDBF
|
||||
SLEDGE_DISABLE_PREEMPTION=false
|
||||
SLEDGE_SANDBOX_PERF_LOG=perf.log
|
||||
SLEDGE_SPINLOOP_PAUSE_ENABLED=false
|
@ -0,0 +1 @@
|
||||
out.png
|
@ -0,0 +1,102 @@
|
||||
SLEDGE_BINARY_DIR=../../runtime/bin
|
||||
HOST?=localhost # pass arguments to change this: make client-lpd HOST=10.10.1.4
|
||||
# HOST=arena0.andrew.cmu.edu
|
||||
# HOST=c220g2-011017.wisc.cloudlab.us
|
||||
PORT0=10000
|
||||
PORT1=15000
|
||||
PORT2=20000
|
||||
PORT3=25000
|
||||
PORT4=30000
|
||||
PORT5=35000
|
||||
PORT6=40000
|
||||
HEY_OPTS=-disable-compression -disable-keepalive -disable-redirects
|
||||
|
||||
default: run
|
||||
|
||||
clean:
|
||||
rm -rf res/*
|
||||
|
||||
run:
|
||||
SLEDGE_SIGALRM_HANDLER=TRIAGED SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=true SLEDGE_HTTP_SESSION_PERF_LOG=http_perf.log SLEDGE_SANDBOX_PERF_LOG=perf.log LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} ${SLEDGE_BINARY_DIR}/sledgert spec.json
|
||||
|
||||
debug:
|
||||
SLEDGE_SCHEDULER=MTDBF SLEDGE_SPINLOOP_PAUSE_ENABLED=false SLEDGE_NWORKERS=18 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} gdb ${SLEDGE_BINARY_DIR}/sledgert \
|
||||
--eval-command="handle SIGUSR1 noprint nostop" \
|
||||
--eval-command="handle SIGPIPE noprint nostop" \
|
||||
--eval-command="set pagination off" \
|
||||
--eval-command="set print pretty" \
|
||||
--eval-command="run spec.json"
|
||||
|
||||
valgrind:
|
||||
SLEDGE_DISABLE_PREEMPTION=true SLEDGE_NWORKERS=1 LD_LIBRARY_PATH=${SLEDGE_BINARY_DIR} valgrind --leak-check=full --max-stackframe=11150456 --run-libc-freeres=no --run-cxx-freeres=no ${SLEDGE_BINARY_DIR}/sledgert spec.json
|
||||
|
||||
|
||||
client-cnn:
|
||||
curl -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@input-cnn/faces01.jpg" "${HOST}:${PORT0}/cnn"
|
||||
|
||||
client-cifar10:
|
||||
curl -H 'Expect: ' -H "Content-Type: image/bmp" --data-binary "@input-cifar10/airplane1.bmp" "${HOST}:${PORT1}/cifar10"
|
||||
|
||||
client-gocr:
|
||||
curl -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@input-gocr/5x8.pnm" "${HOST}:${PORT2}/gocr"
|
||||
|
||||
client-lpd:
|
||||
# curl -H 'Expect: ' -H "Content-Type: image/png" --data-binary "@input-lpd-png/Cars0.png" "${HOST}:${PORT3}/lpd"
|
||||
curl -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@input-lpd-jpg/Cars0.jpg" "${HOST}:${PORT3}/lpd"
|
||||
|
||||
client-resize:
|
||||
curl -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@input-resize/picsum_512x512_01.jpg" "${HOST}:${PORT4}/resize" --output "out-resize.jpg"
|
||||
|
||||
client-ekf:
|
||||
curl -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@input-ekf/iter00.dat" "${HOST}:${PORT5}/ekf" --output "out-ekf-iter00.dat"
|
||||
|
||||
client-fib-curl:
|
||||
curl -i -H 'Expect: ' -H "Content-Type: text/plain" "${HOST}:${PORT6}/fib?30"
|
||||
|
||||
########################################## Choose a random file to send with curl: ##########################################
|
||||
client-cnn-random:
|
||||
@dir="input-cnn"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
curl -s -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT0}/cnn"
|
||||
|
||||
client-cifar10-random:
|
||||
@dir="input-cifar10"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
curl -s -H 'Expect: ' -H "Content-Type: image/bmp" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT1}/cifar10"
|
||||
|
||||
client-gocr-random:
|
||||
@dir="input-gocr"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
curl -s -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT2}/gocr"
|
||||
|
||||
client-lpd-random:
|
||||
# @dir="input-lpd-png"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
# curl -s -H 'Expect: ' -H "Content-Type: image/png" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT3}/lpd"
|
||||
@dir="input-lpd-jpg"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
curl -s -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT3}/lpd"
|
||||
|
||||
client-resize-random:
|
||||
@dir="input-resize"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
curl -s -H 'Expect: ' -H "Content-Type: image/jpeg" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT4}/resize" --output "out-resize-$$random_file"
|
||||
|
||||
client-ekf-random:
|
||||
@dir="input-ekf"; random_file="$$(ls $$dir | shuf -n 1)"; echo "Random file: $$random_file"; \
|
||||
curl -s -H 'Expect: ' -H "Content-Type: application/octet-stream" --data-binary "@$$dir/$$random_file" "${HOST}:${PORT5}/ekf" --output "out-ekf-$$random_file"
|
||||
#############################################################################################################################
|
||||
|
||||
client-fib-once:
|
||||
echo 30 | http ${HOST}:${PORT6}/fib
|
||||
# http ${HOST}:${PORT6}/fib?30
|
||||
|
||||
client-fib-loadtest:
|
||||
loadtest -n 10 -c 10 -P 30 "http://${HOST}:${PORT6}/fib"
|
||||
|
||||
client-fib-hey:
|
||||
hey ${HEY_OPTS} -z 10s -c 72 -t 0 -o csv -m POST -d "30\n" "http://${HOST}:${PORT6}/fib"
|
||||
|
||||
client-fib-wrk:
|
||||
wrk -t 1 -c 1 -d 5s -R 1 "http://${HOST}:${PORT6}/fib?30"
|
||||
|
||||
|
||||
client-admin:
|
||||
echo 5 | http ${HOST}:55555/admin
|
||||
|
||||
client-terminator:
|
||||
echo 5 | http ${HOST}:55555/terminator
|
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
VARYING=(72 108 144)
|
||||
|
||||
for var in "${VARYING[@]}"; do
|
||||
mkdir -p "$var"dpi
|
||||
mkdir -p "$var"dpi-orig
|
||||
for ((i=5; i<=15; i++)); do
|
||||
shuf -n10 /usr/share/dict/american-english > "$var"dpi-orig/"$i"words.txt
|
||||
pango-view --dpi="$var" --font=mono -qo "$var"dpi-orig/"$var"dpi_"$i"words.png "$var"dpi-orig/"$i"words.txt
|
||||
pngtopnm "$var"dpi-orig/"$var"dpi_"$i"words.png > "$var"dpi/"$var"dpi_"$i"words.pnm
|
||||
done
|
||||
done
|
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |
After Width: | Height: | Size: 3.1 KiB |