Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Felix Kopp
Ardix
Commits
10e8b00f
Verified
Commit
10e8b00f
authored
Aug 10, 2021
by
Felix Kopp
Browse files
sched: avoid memory allocation for kernel task
parent
4287759c
Changes
1
Hide whitespace changes
Inline
Side-by-side
kernel/sched.c
View file @
10e8b00f
/* See the end of this file for copyright, license, and warranty information. */
/**
* @file sched.c
* @brief Simple round-robin scheduler.
*
* Tasks are stored in a lookup table, `tasks`, which is indexed by pid.
* The global `current` variable points to the task that is currently running,
* which must only be accessed from scheduling context (i.e. from within a
* syscall or scheduling interrupt handler).
*
* When `schedule()` is called, it first processes the kevent queue in which irq
* handlers store broadcasts for changes in hardware state, such as a DMA buffer
* having been fully transmitted. Tasks register an event listener for the
* event they are waiting for before entering I/O wait, and remove their waiting
* flag in the listener callback.
*
* After all events are processed, `schedule()` iterates over the task table
* starting from one task after the one that has been currently running, and
* chooses the first one it encounters that is suitable for being woken back up
* (i.e. is in state `TASK_QUEUE`). Thus, the previously running task is only
* executed again if no other tasks are ready to be executed. If no task is
* runnable, the idle task is selected.
*
* The last step is performing the in-kernel context switch to the next task
* to be run, which is done by `do_switch()`. This routine stores the current
* register state in the old task's TCB and loads the registers from the new
* one. Execution then continues where the task that is switched to previously
* called `do_switch()`, and eventually returns back to userspace by returning
* from the exception handler.
*/
#include <arch-generic/do_switch.h>
#include <arch-generic/sched.h>
#include <arch-generic/watchdog.h>
...
...
@@ -17,15 +47,16 @@
extern
uint32_t
_sstack
;
extern
uint32_t
_estack
;
static
struct
task
*
task
tab
[
CONFIG_SCHED_MAXTASK
];
static
struct
task
*
task
s
[
CONFIG_SCHED_MAXTASK
];
struct
task
*
volatile
current
;
static
struct
task
kernel_task
;
static
struct
task
idle_task
;
static
void
task_destroy
(
struct
kent
*
kent
)
{
struct
task
*
task
=
container_of
(
kent
,
struct
task
,
kent
);
task
tab
[
task
->
pid
]
=
NULL
;
task
s
[
task
->
pid
]
=
NULL
;
free
(
task
);
}
...
...
@@ -33,26 +64,22 @@ int sched_init(void)
{
int
err
;
struct
task
*
ktask
=
malloc
(
sizeof
(
*
ktask
));
if
(
ktask
==
NULL
)
return
-
ENOMEM
;
ktask
->
kent
.
parent
=
kent_root
;
ktask
->
kent
.
destroy
=
task_destroy
;
err
=
kent_init
(
&
ktask
->
kent
);
kernel_task
.
kent
.
parent
=
kent_root
;
kernel_task
.
kent
.
destroy
=
task_destroy
;
err
=
kent_init
(
&
kernel_task
.
kent
);
if
(
err
!=
0
)
goto
out
;
memset
(
&
ktask
->
tcb
,
0
,
sizeof
(
ktask
->
tcb
));
ktask
->
bottom
=
&
_estack
;
ktask
->
pid
=
0
;
ktask
->
state
=
TASK_READY
;
memset
(
&
k
ernel_
task
.
tcb
,
0
,
sizeof
(
k
ernel_
task
.
tcb
));
k
ernel_
task
.
bottom
=
&
_estack
;
k
ernel_
task
.
pid
=
0
;
k
ernel_
task
.
state
=
TASK_READY
;
task
tab
[
0
]
=
k
task
;
current
=
k
task
;
task
s
[
0
]
=
&
kernel_
task
;
current
=
&
kernel_
task
;
for
(
unsigned
int
i
=
1
;
i
<
ARRAY_SIZE
(
task
tab
);
i
++
)
task
tab
[
i
]
=
NULL
;
for
(
unsigned
int
i
=
1
;
i
<
ARRAY_SIZE
(
task
s
);
i
++
)
task
s
[
i
]
=
NULL
;
err
=
arch_watchdog_init
();
if
(
err
!=
0
)
...
...
@@ -110,15 +137,16 @@ void schedule(void)
if
(
old
->
state
==
TASK_READY
)
old
->
state
=
TASK_QUEUE
;
for
(
unsigned
int
i
=
0
;
i
<
ARRAY_SIZE
(
tasktab
);
i
++
)
{
for
(
unsigned
int
i
=
0
;
i
<
ARRAY_SIZE
(
tasks
);
i
++
)
{
/*
* increment nextpid before accessing the task table
* because it is -1 if the idle task was running
*/
nextpid
++
;
nextpid
%=
ARRAY_SIZE
(
task
tab
);
nextpid
%=
ARRAY_SIZE
(
task
s
);
struct
task
*
tmp
=
task
tab
[
nextpid
];
struct
task
*
tmp
=
task
s
[
nextpid
];
if
(
tmp
!=
NULL
&&
can_run
(
tmp
))
{
new
=
tmp
;
break
;
...
...
@@ -140,50 +168,14 @@ void schedule(void)
void
yield
(
enum
task_state
state
)
{
struct
task
*
task
=
current
;
task
->
state
=
state
;
current
->
state
=
state
;
schedule
();
}
struct
task
*
sched_fork
(
struct
task
*
parent
)
{
pid_t
pid
;
struct
task
*
child
=
malloc
(
sizeof
(
*
child
));
if
(
child
==
NULL
)
goto
err_alloc
;
for
(
pid
=
0
;
pid
<
CONFIG_SCHED_MAXTASK
;
pid
++
)
{
if
(
tasktab
[
pid
]
==
NULL
)
break
;
}
if
(
pid
==
CONFIG_SCHED_MAXTASK
)
goto
err_maxtask
;
child
->
kent
.
parent
=
&
parent
->
kent
;
child
->
kent
.
destroy
=
task_destroy
;
if
(
kent_init
(
&
child
->
kent
)
!=
0
)
goto
err_kent
;
child
->
pid
=
pid
;
return
child
;
err_kent:
err_maxtask:
free
(
child
);
err_alloc:
return
NULL
;
}
void
msleep
(
unsigned
long
int
ms
)
{
current
->
sleep
=
ms_to_ticks
(
ms
);
yield
(
TASK_SLEEP
);
}
long
sys_sleep
(
unsigned
long
int
millis
)
{
msleep
(
millis
);
current
->
sleep
=
ms_to_ticks
(
millis
);
yield
(
TASK_SLEEP
);
/* TODO: return actual milliseconds */
return
0
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment