kernel/sched/
scheduler.rs1use core::{ffi::c_void, sync::atomic::AtomicBool};
5
6use super::task::{Task, TaskId};
7use crate::{
8 mem::{self, array::IndexMap, heap::BinaryHeap, queue::Queue},
9 sched::{
10 task::TaskDescriptor,
11 thread::{RunState, ThreadMap, ThreadUId, Timing},
12 },
13 sync::spinlock::SpinLocked,
14 utils,
15};
16
17pub static SCHEDULER: SpinLocked<Scheduler> = SpinLocked::new(Scheduler::new());
19static SCHEDULER_ENABLED: AtomicBool = AtomicBool::new(false);
20
21pub struct Scheduler {
24 current: Option<ThreadUId>,
26 current_interval: usize,
28 user_tasks: IndexMap<usize, Task, 8>,
30 threads: ThreadMap<8>,
32 queue: BinaryHeap<(usize, ThreadUId), 32>,
34 callbacks: Queue<(ThreadUId, usize), 32>,
36 time: usize,
38}
39
40impl Scheduler {
41 pub const fn new() -> Self {
43 Self {
44 current: None,
45 current_interval: 0,
46 user_tasks: IndexMap::new(),
47 threads: ThreadMap::new(),
48 queue: BinaryHeap::new(),
49 callbacks: Queue::new(),
50 time: 0,
51 }
52 }
53
54 pub fn create_task(&mut self, desc: TaskDescriptor) -> Result<TaskId, utils::KernelError> {
55 let size = mem::align_up(desc.mem_size);
56 let idx = self
57 .user_tasks
58 .find_empty()
59 .ok_or(utils::KernelError::OutOfMemory)?;
60 let task_id = TaskId::new_user(idx);
61
62 let task = Task::new(size, task_id)?;
63 self.user_tasks.insert(&idx, task)?;
64 Ok(task_id)
65 }
66
67 pub fn create_thread(
68 &mut self,
69 entry: extern "C" fn(),
70 fin: Option<extern "C" fn() -> !>,
71 timing: Timing,
72 task_id: TaskId,
73 ) -> Result<ThreadUId, utils::KernelError> {
74 let task_idx: usize = task_id.into();
75
76 if let Some(task) = self.user_tasks.get_mut(&task_idx) {
77 let desc = task.create_thread(entry, fin, timing)?;
78 let id = self.threads.create(desc)?;
79 self.queue.push((timing.period, id))?;
80 Ok(id)
81 } else {
82 Err(utils::KernelError::InvalidArgument)
83 }
84 }
85
86 fn update_current_ctx(&mut self, ctx: *mut c_void) {
90 if let Some(id) = self.current {
91 if let Some(thread) = self.threads.get_mut(&id) {
92 thread
93 .update_sp(ctx)
94 .expect("Failed to update thread context");
95 }
96 }
97 }
98
99 fn select_new_thread(&mut self) -> Option<*mut c_void> {
105 if let Some(id) = self.queue.pop().map(|(_, id)| id) {
106 if let Some(id) = self.current {
108 if let Some(thread) = self.threads.get_mut(&id) {
109 thread.update_run_state(RunState::Ready);
110 let delay = self.callbacks.back().map(|(_, delay)| *delay).unwrap_or(0);
112 if thread.timing().period > (self.time + delay) {
114 let _ = self
116 .callbacks
117 .push_back((id, thread.timing().period - (self.time + delay)));
118 } else {
119 let _ = self.queue.push((thread.timing().exec_time, id));
121 }
122 }
123 }
124
125 if let Some(thread) = self.threads.get_mut(&id) {
126 thread.update_run_state(RunState::Runs);
127
128 self.current_interval = thread.timing().exec_time;
130 self.current = Some(id);
131
132 return Some(thread.sp());
134 }
135 }
136
137 None
138 }
139
140 fn fire_thread_if_necessary(&mut self) -> bool {
144 let mut found = false;
145 while let Some((id, cnt)) = self.callbacks.front().cloned() {
146 if cnt - 1 == 0 {
148 self.callbacks.pop_front();
149 if let Some(thread) = self.threads.get_mut(&id) {
150 thread.update_run_state(RunState::Ready);
151
152 let _ = self.queue.push((thread.timing().exec_time, id));
153 found = true;
154 }
155 } else {
156 let _ = self.callbacks.insert(0, (id, cnt - 1));
158 break;
159 }
160 }
161
162 found
163 }
164
165 pub fn tick(&mut self) -> bool {
167 self.time += 1;
168
169 if self.fire_thread_if_necessary() {
171 return true;
172 }
173
174 if self.time >= self.current_interval {
176 self.time = 0;
177 return true;
178 }
179
180 false
181 }
182}
183
184pub fn enabled() -> bool {
185 SCHEDULER_ENABLED.load(core::sync::atomic::Ordering::Acquire)
186}
187
188pub fn set_enabled(enabled: bool) {
189 SCHEDULER_ENABLED.store(enabled, core::sync::atomic::Ordering::Release);
190}
191
192#[unsafe(no_mangle)]
195pub extern "C" fn sched_enter(ctx: *mut c_void) -> *mut c_void {
196 {
197 let mut scheduler = SCHEDULER.lock();
198 scheduler.update_current_ctx(ctx);
200
201 scheduler.select_new_thread().unwrap_or(ctx)
203 }
204}