Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Felix Kopp
Ardix
Commits
104578d0
Verified
Commit
104578d0
authored
Aug 12, 2021
by
Felix Kopp
Browse files
sched: add thread management syscalls
parent
a370ef69
Changes
22
Hide whitespace changes
Inline
Side-by-side
arch/at91sam3x8e/entry.c
View file @
104578d0
...
...
@@ -16,7 +16,7 @@
extern
uint16_t
__syscall_return_point
;
#endif
void
arch_
enter
(
struct
exc_context
*
context
)
void
enter
_syscall
(
struct
exc_context
*
context
)
{
enum
syscall
number
=
sc_num
(
context
);
long
(
*
handler
)(
sysarg_t
arg1
,
sysarg_t
arg2
,
sysarg_t
arg3
,
...
...
@@ -47,6 +47,8 @@ void arch_enter(struct exc_context *context)
return
;
}
current
->
tcb
.
exc_context
=
context
;
/* TODO: not every syscall uses the max amount of parameters (duh) */
sc_ret
=
handler
(
sc_arg1
(
context
),
sc_arg2
(
context
),
sc_arg3
(
context
),
sc_arg4
(
context
),
sc_arg5
(
context
),
sc_arg6
(
context
));
...
...
@@ -54,6 +56,12 @@ void arch_enter(struct exc_context *context)
sc_set_rval
(
context
,
sc_ret
);
}
void
enter_sched
(
struct
exc_context
*
context
)
{
current
->
tcb
.
exc_context
=
context
;
schedule
();
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
...
...
arch/at91sam3x8e/handle_fault.c
View file @
104578d0
...
...
@@ -106,8 +106,6 @@ static void print_regs(struct exc_context *context)
print_reg
(
"xPSR"
,
context
->
sp
->
psr
);
}
#include <arch/debug.h>
__naked
__noreturn
void
handle_fault
(
struct
exc_context
*
context
,
enum
irqno
irqno
)
{
uart_emergency_setup
();
...
...
@@ -120,7 +118,6 @@ __naked __noreturn void handle_fault(struct exc_context *context, enum irqno irq
uart_write_sync
(
"
\n
System halted, goodbye
\n\n
"
);
__breakpoint
;
while
(
1
);
}
...
...
arch/at91sam3x8e/handle_pend_sv.S
View file @
104578d0
...
...
@@ -4,14 +4,15 @@
.
text
/*
void
schedule
(
void
)
; */
.
extern
sched
ule
/*
void
enter_sched
(
struct
exc_context
*
context
)
; */
.
extern
enter_
sched
/*
void
handle_pend_sv
(
void
)
; */
func_begin
handle_pend_sv
prepare_entry
bl
schedule
mov
r0
,
sp
bl
enter_sched
prepare_leave
bx
lr
...
...
arch/at91sam3x8e/handle_svc.S
View file @
104578d0
...
...
@@ -4,15 +4,15 @@
.
text
/*
void
arch_
enter
(
struct
exc_context
*
context
)
; */
.
extern
arch_
enter
/*
void
enter
_syscall
(
struct
exc_context
*
context
)
; */
.
extern
enter
_syscall
/*
void
handle_svc
(
void
)
; */
func_begin
handle_svc
prepare_entry
mov
r0
,
sp
bl
arch_enter
/*
arch_enter
(
sp
)
; */
bl
enter_syscall
/*
enter_syscall
(
sp
)
; */
prepare_leave
bx
lr
...
...
arch/at91sam3x8e/include/arch/hardware.h
View file @
104578d0
...
...
@@ -69,11 +69,17 @@ struct context {
/**
* @brief Task Control Block.
* This is a low level structure used by `do_switch()` to do the actual context
* switching,
* switching, and embedded into `struct task`. We do this nesting because it
* makes it easier to access the TCB's fields from assembly, and it also makes
* us less dependent on a specific architecture.
*/
struct
tcb
{
struct
context
context
;
struct
hw_context
*
hw_context
;
/*
* Needed for exec() because the child task leaves kernel space over a
* different route than the parent one.
*/
struct
exc_context
*
exc_context
;
};
__always_inline
sysarg_t
sc_num
(
const
struct
exc_context
*
ctx
)
...
...
arch/at91sam3x8e/sched.c
View file @
104578d0
...
...
@@ -14,6 +14,7 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
volatile
unsigned
long
int
tick
=
0
;
...
...
@@ -51,13 +52,23 @@ int arch_sched_init(unsigned int freq)
return
0
;
}
void
arch_
task_init
(
struct
task
*
task
,
void
(
*
entry
)(
void
))
void
task_init
(
struct
task
*
task
,
int
(
*
entry
)(
void
))
{
task
->
bottom
=
task
->
stack
+
CONFIG_STACK_SIZE
;
/* TODO: Use separate stacks for kernel and program */
struct
hw_context
*
hw_context
=
task
->
bottom
-
sizeof
(
*
hw_context
);
struct
exc_context
*
exc_context
=
(
void
*
)
hw_context
-
sizeof
(
*
exc_context
);
memset
(
hw_context
,
0
,
task
->
bottom
-
(
void
*
)
hw_context
);
/*
* The return value of entry(), which is the exit code, will be stored
* in r0 as per the AAPCS. Conveniently, this happens to be the same
* register that is also used for passing the first argument to a
* function, so by setting the initial link register to exit() we
* effectively inject a call to that function after the task's main
* routine returns.
*/
hw_context
->
lr
=
exit
;
hw_context
->
pc
=
entry
;
hw_context
->
psr
=
0x01000000
;
/* Thumb = 1, unprivileged */
...
...
@@ -67,29 +78,15 @@ void arch_task_init(struct task *task, void (*entry)(void))
memset
(
&
task
->
tcb
,
0
,
sizeof
(
task
->
tcb
));
task
->
tcb
.
context
.
sp
=
exc_context
;
task
->
tcb
.
context
.
pc
=
_leave
;
task
->
tcb
.
exc_context
=
exc_context
;
}
__naked
__noreturn
void
_idle
(
void
)
__naked
int
_idle
(
void
)
{
/* TODO: put the CPU to sleep */
while
(
1
);
}
int
arch_idle_task_init
(
struct
task
*
task
)
{
void
*
stack
=
malloc
(
CONFIG_STACK_SIZE
);
if
(
stack
==
NULL
)
return
-
ENOMEM
;
task
->
bottom
=
stack
+
CONFIG_STACK_SIZE
;
/* full-descending stack */
arch_task_init
(
task
,
_idle
);
task
->
sleep
=
0
;
task
->
last_tick
=
0
;
task
->
state
=
TASK_READY
;
task
->
pid
=
-
1
;
return
0
;
}
unsigned
long
int
ms_to_ticks
(
unsigned
long
int
ms
)
{
return
(
ms
*
(
unsigned
long
int
)
tick_freq
)
/
1000lu
/* 1 s = 1000 ms */
;
...
...
include/arch-generic/sched.h
View file @
104578d0
...
...
@@ -16,16 +16,16 @@ struct task; /* see include/ardix/sched.h */
int
arch_sched_init
(
unsigned
int
freq
);
/**
* Initialize a new process.
* This requires the process' `stack_base` field to be initialized as the
* initial register values are written to the stack.
* @brief Initialize a new task.
*
* @param process: The process.
* @param entry: The process entry point.
*
* @param task Task to initialize
* @param entry Task entry point
*/
void
arch_
task_init
(
struct
task
*
task
,
void
(
*
entry
)(
void
));
void
task_init
(
struct
task
*
task
,
int
(
*
entry
)(
void
));
int
arch_idle_task_init
(
struct
task
*
task
);
/** @brief Idle task entry point. */
__naked
int
_idle
(
void
);
/**
* @brief Convert milliseconds to system ticks, rounding to zero.
...
...
include/arch-generic/syscall.h
View file @
104578d0
...
...
@@ -7,6 +7,9 @@
#define ARCH_SYS_sleep 2
#define ARCH_SYS_malloc 3
#define ARCH_SYS_free 4
#define ARCH_SYS_exec 5
#define ARCH_SYS_exit 6
#define ARCH_SYS_waitpid 7
/*
* This file is part of Ardix.
...
...
include/ardix/kevent.h
View file @
104578d0
...
...
@@ -25,6 +25,8 @@ enum kevent_kind {
KEVENT_DEVICE
,
/** @brief File has changed */
KEVENT_FILE
,
/** @brief Task has exited */
KEVENT_TASK
,
KEVENT_KIND_COUNT
,
};
...
...
include/ardix/sched.h
View file @
104578d0
...
...
@@ -6,6 +6,8 @@
#include <ardix/kent.h>
#include <ardix/list.h>
#include <ardix/mutex.h>
#include <ardix/task.h>
#include <ardix/types.h>
#include <config.h>
...
...
@@ -14,40 +16,6 @@
#warning "CONFIG_SCHED_MAXTASK is > 64, this could have a significant performance impact"
#endif
enum
task_state
{
/** Task is dead / doesn't exist */
TASK_DEAD
,
/** Task is ready for execution or currently running. */
TASK_READY
,
/** Task is waiting for its next time share. */
TASK_QUEUE
,
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
TASK_SLEEP
,
/** Task is waiting for I/O to flush buffers. */
TASK_IOWAIT
,
/** Task is waiting for a mutex to be unlocked. */
TASK_LOCKWAIT
,
};
/** @brief Core structure holding information about a task. */
struct
task
{
struct
tcb
tcb
;
struct
kent
kent
;
/**
* @brief Points to the bottom of the stack.
* In a full-descending stack, this is one word after the highest stack address.
*/
void
*
bottom
;
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
unsigned
long
int
sleep
;
/** @brief Last execution in ticks */
unsigned
long
int
last_tick
;
enum
task_state
state
;
pid_t
pid
;
};
/** @brief Current task (access from syscall context only) */
extern
struct
task
*
volatile
current
;
...
...
@@ -64,7 +32,7 @@ int sched_init(void);
* @brief Main scheduler routine.
* This will iterate over the process table and choose a new task to be run,
* which `current` is then updated to. If the old task was in state
* `TASK_R
EADY
`, it is set to `TASK_QUEUE`.
* `TASK_R
UNNING
`, it is set to `TASK_QUEUE`.
*/
void
schedule
(
void
);
...
...
@@ -77,9 +45,10 @@ void schedule(void);
* setup work.
*
* @param task Task to make a copy of
* @param err Where to store the error code (will be written 0 on success)
* @returns The new (child) task copy, or `NULL` on failure
*/
struct
task
*
task_clone
(
struct
task
*
task
);
struct
task
*
task_clone
(
struct
task
*
task
,
int
*
trr
);
/**
* @brief Sleep for an approximate amount of milliseconds.
...
...
include/ardix/syscall.h
View file @
104578d0
...
...
@@ -15,6 +15,9 @@ enum syscall {
SYS_sleep
=
ARCH_SYS_sleep
,
SYS_malloc
=
ARCH_SYS_malloc
,
SYS_free
=
ARCH_SYS_free
,
SYS_exec
=
ARCH_SYS_exec
,
SYS_exit
=
ARCH_SYS_exit
,
SYS_waitpid
=
ARCH_SYS_waitpid
,
NSYSCALLS
};
...
...
@@ -31,6 +34,11 @@ long sys_stub(void);
long
sys_read
(
int
fd
,
void
*
buf
,
size_t
len
);
long
sys_write
(
int
fd
,
const
void
*
buf
,
size_t
len
);
long
sys_sleep
(
unsigned
long
millis
);
long
sys_malloc
(
size_t
size
);
void
sys_free
(
void
*
ptr
);
long
sys_exec
(
int
(
*
entry
)(
void
));
void
sys_exit
(
int
code
);
long
sys_waitpid
(
pid_t
pid
,
int
*
stat_loc
,
int
options
);
/*
* This file is part of Ardix.
...
...
include/ardix/task.h
0 → 100644
View file @
104578d0
/* See the end of this file for copyright, license, and warranty information. */
#pragma once
#include <arch/hardware.h>
#include <ardix/kent.h>
#include <ardix/kevent.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <ardix/util.h>
enum
task_state
{
/** Task is dead / doesn't exist */
TASK_DEAD
,
/** Task is currently running. */
TASK_RUNNING
,
/** Task is waiting for its next time share. */
TASK_QUEUE
,
/** Task is sleeping, `task::sleep` specifies for how many ticks. */
TASK_SLEEP
,
/** Task is waiting for I/O to flush buffers. */
TASK_IOWAIT
,
/** Task is waiting for a mutex to be unlocked. */
TASK_LOCKWAIT
,
/** Task is waiting for child to */
TASK_WAITPID
,
};
/** @brief Core structure holding information about a task. */
struct
task
{
struct
tcb
tcb
;
struct
kent
kent
;
/**
* @brief Points to the bottom of the stack.
* In a full-descending stack, this is one word after the highest stack address.
*/
void
*
bottom
;
/** @brief Lowest address in the stack, as returned by malloc. */
void
*
stack
;
/** @brief If state is `TASK_SLEEP`, the total amount of ticks to sleep */
unsigned
long
int
sleep
;
/** @brief Last execution in ticks */
unsigned
long
int
last_tick
;
/*
* if a child process exited before its parent called waitpid(),
* this is where the children are stored temporarily
*/
struct
list_head
pending_sigchld
;
struct
mutex
pending_sigchld_lock
;
enum
task_state
state
;
pid_t
pid
;
};
__always_inline
void
task_get
(
struct
task
*
task
)
{
kent_get
(
&
task
->
kent
);
}
__always_inline
void
task_put
(
struct
task
*
task
)
{
kent_put
(
&
task
->
kent
);
}
__always_inline
struct
task
*
task_parent
(
struct
task
*
task
)
{
if
(
task
->
pid
==
0
)
return
NULL
;
else
return
container_of
(
task
->
kent
.
parent
,
struct
task
,
kent
);
}
struct
task_kevent
{
struct
kevent
kevent
;
struct
task
*
task
;
int
status
;
};
void
task_kevent_create_and_dispatch
(
struct
task
*
task
,
int
status
);
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/
include/sys/wait.h
0 → 100644
View file @
104578d0
/* See the end of this file for copyright, license, and warranty information. */
#pragma once
#include <stdint.h>
#include <toolchain.h>
__shared
pid_t
waitpid
(
pid_t
pid
,
int
*
stat_loc
,
int
options
);
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
*
* Ardix is non-violent software: you may only use, redistribute,
* and/or modify it under the terms of the CNPLv6+ as found in
* the LICENSE file in the source code root directory or at
* <https://git.pixie.town/thufie/CNPL>.
*
* Ardix comes with ABSOLUTELY NO WARRANTY, to the extent
* permitted by applicable law. See the CNPLv6+ for details.
*/
include/unistd.h
View file @
104578d0
...
...
@@ -3,10 +3,21 @@
#pragma once
#include <stdint.h>
#include <toolchain.h>
__shared
ssize_t
read
(
int
fildes
,
void
*
buf
,
size_t
nbyte
);
__shared
ssize_t
write
(
int
fildes
,
const
void
*
buf
,
size_t
nbyte
);
__shared
ssize_t
sleep
(
unsigned
long
int
millis
);
/**
* @brief Create a new thread.
*
* Embedded systems typically don't have a MMU and thus no virtual memory,
* meaning it is impossible to implement a proper fork. So, the `fork()` and
* `execve()` system calls have to be combined into one.
*/
__shared
pid_t
exec
(
int
(
*
entry
)(
void
));
__shared
__noreturn
void
exit
(
int
status
);
__shared
pid_t
waitpid
(
pid_t
pid
,
int
*
stat_loc
,
int
options
);
/*
* This file is part of Ardix.
...
...
kernel/CMakeLists.txt
View file @
104578d0
...
...
@@ -21,6 +21,7 @@ target_sources(ardix_kernel PRIVATE
sched.c
serial.c
syscall.c
task.c
userspace.c
)
...
...
kernel/mutex.c
View file @
104578d0
...
...
@@ -44,9 +44,7 @@ void mutex_unlock(struct mutex *mutex)
spin_unlock
(
&
mutex
->
wait_queue_lock
);
if
(
waiter
!=
NULL
)
{
struct
task
*
task
=
waiter
->
task
;
current
->
state
=
TASK_QUEUE
;
do_switch
(
current
,
task
);
waiter
->
task
->
state
=
TASK_QUEUE
;
}
else
{
_mutex_unlock
(
&
mutex
->
lock
);
}
...
...
kernel/sched.c
View file @
104578d0
...
...
@@ -38,6 +38,7 @@
#include <ardix/kevent.h>
#include <ardix/malloc.h>
#include <ardix/sched.h>
#include <ardix/task.h>
#include <ardix/types.h>
#include <errno.h>
...
...
@@ -48,6 +49,7 @@ extern uint32_t _sstack;
extern
uint32_t
_estack
;
static
struct
task
*
tasks
[
CONFIG_SCHED_MAXTASK
];
static
MUTEX
(
tasks_lock
);
struct
task
*
volatile
current
;
static
struct
task
kernel_task
;
...
...
@@ -56,7 +58,12 @@ static struct task idle_task;
static
void
task_destroy
(
struct
kent
*
kent
)
{
struct
task
*
task
=
container_of
(
kent
,
struct
task
,
kent
);
mutex_lock
(
&
tasks_lock
);
tasks
[
task
->
pid
]
=
NULL
;
mutex_unlock
(
&
tasks_lock
);
kfree
(
task
->
stack
);
kfree
(
task
);
}
...
...
@@ -72,8 +79,12 @@ int sched_init(void)
memset
(
&
kernel_task
.
tcb
,
0
,
sizeof
(
kernel_task
.
tcb
));
kernel_task
.
bottom
=
&
_estack
;
kernel_task
.
stack
=
kernel_task
.
bottom
-
CONFIG_STACK_SIZE
;
kernel_task
.
pid
=
0
;
kernel_task
.
state
=
TASK_READY
;
kernel_task
.
state
=
TASK_RUNNING
;
list_init
(
&
kernel_task
.
pending_sigchld
);
mutex_init
(
&
kernel_task
.
pending_sigchld_lock
);
tasks
[
0
]
=
&
kernel_task
;
current
=
&
kernel_task
;
...
...
@@ -85,11 +96,17 @@ int sched_init(void)
if
(
err
!=
0
)
goto
out
;
err
=
arch_sched_init
(
CONFIG_SCHED_FREQ
);
if
(
err
!=
0
)
idle_task
.
stack
=
kmalloc
(
CONFIG_STACK_SIZE
);
if
(
idle_task
.
stack
==
NULL
)
goto
out
;
idle_task
.
bottom
=
idle_task
.
stack
+
CONFIG_STACK_SIZE
;
idle_task
.
pid
=
-
1
;
idle_task
.
state
=
TASK_QUEUE
;
list_init
(
&
idle_task
.
pending_sigchld
);
mutex_init
(
&
idle_task
.
pending_sigchld_lock
);
task_init
(
&
idle_task
,
_idle
);
err
=
arch_
idle_task_init
(
&
idle_task
);
err
=
arch_
sched_init
(
CONFIG_SCHED_FREQ
);
if
(
err
!=
0
)
goto
out
;
...
...
@@ -113,11 +130,12 @@ static inline bool can_run(const struct task *task)
case
TASK_SLEEP
:
return
tick
-
task
->
last_tick
>=
task
->
sleep
;
case
TASK_QUEUE
:
case
TASK_R
EADY
:
case
TASK_R
UNNING
:
return
true
;
case
TASK_DEAD
:
case
TASK_IOWAIT
:
case
TASK_LOCKWAIT
:
case
TASK_WAITPID
:
return
false
;
}
...
...
@@ -135,7 +153,7 @@ void schedule(void)
kevents_process
();
if
(
old
->
state
==
TASK_R
EADY
)
if
(
old
->
state
==
TASK_R
UNNING
)
old
->
state
=
TASK_QUEUE
;
for
(
unsigned
int
i
=
0
;
i
<
ARRAY_SIZE
(
tasks
);
i
++
)
{
...
...
@@ -156,7 +174,7 @@ void schedule(void)
if
(
new
==
NULL
)
new
=
&
idle_task
;
new
->
state
=
TASK_R
EADY
;
new
->
state
=
TASK_R
UNNING
;
new
->
last_tick
=
tick
;
current
=
new
;
...
...
@@ -177,9 +195,67 @@ long sys_sleep(unsigned long int millis)
current
->
sleep
=
ms_to_ticks
(
millis
);
yield
(
TASK_SLEEP
);
/* TODO: return actual milliseconds */
/*
* TODO: actually, use fucking hardware timers which were specifically
* invented for this exact kind of feature because (1) the tick
* resolution is often less than 1 ms and (2) ticks aren't really
* supposed to be guaranteed to happen at regular intervals and
* (3) the scheduler doesn't even check whether there is a task
* whose sleep period just expired
*/
return
0
;
}
long
sys_exec
(
int
(
*
entry
)(
void
))
{
pid_t
pid
;
struct
task
*
child
=
NULL
;
mutex_lock
(
&
tasks_lock
);
for
(
pid
=
1
;
pid
<
CONFIG_SCHED_MAXTASK
;
pid
++
)
{
if
(
tasks
[
pid
]
==
NULL
)
break
;
}
if
(
pid
==
CONFIG_SCHED_MAXTASK
)
{
pid
=
-
EAGAIN
;
goto
out
;
}
child
=
kmalloc
(
sizeof
(
*
child
));
if
(
child
==
NULL
)
{
pid
=
-
ENOMEM
;
goto
out
;
}
child
->
pid
=
pid
;
child
->
stack
=
kmalloc
(
CONFIG_STACK_SIZE
);
if
(
child
->
stack
==
NULL
)
{
pid
=
-
ENOMEM
;
goto
err_stack_malloc
;
}
child
->
kent
.
parent
=
&
current
->
kent
;
child
->
kent
.
destroy
=
task_destroy
;
kent_init
(
&
child
->
kent
);
child
->
bottom
=
child
->
stack
+
CONFIG_STACK_SIZE
;
task_init
(
child
,
entry
);
list_init
(
&
child
->
pending_sigchld
);
mutex_init
(
&
child
->
pending_sigchld_lock
);
child
->
state
=
TASK_QUEUE
;
tasks
[
pid
]
=
child
;
goto
out
;
err_stack_malloc:
kfree
(
child
);
out:
mutex_unlock
(
&
tasks_lock
);
return
pid
;
}
/*
* This file is part of Ardix.
* Copyright (c) 2020, 2021 Felix Kopp <owo@fef.moe>.
...
...
kernel/syscall.c
View file @
104578d0
...
...
@@ -15,6 +15,9 @@ long (*const sys_table[NSYSCALLS])(sysarg_t arg1, sysarg_t arg2, sysarg_t arg3,
sys_table_entry
(
SYS_sleep
,
sys_sleep
),
sys_table_entry
(
SYS_malloc
,
sys_malloc
),