Cleanup and improve spin lock code

This commit is contained in:
Dale Weiler 2015-05-21 04:28:14 -04:00
parent 3820512357
commit 0dbd5ab089
22 changed files with 288 additions and 213 deletions

View File

@ -51,13 +51,13 @@ static inline void ring_buffer_increment_write(ring_buffer_t * ring_buffer) {
size_t ring_buffer_read(ring_buffer_t * ring_buffer, size_t size, uint8_t * buffer) {
size_t collected = 0;
while (collected == 0) {
spin_lock(&ring_buffer->lock);
spin_lock(ring_buffer->lock);
while (ring_buffer_unread(ring_buffer) > 0 && collected < size) {
buffer[collected] = ring_buffer->buffer[ring_buffer->read_ptr];
ring_buffer_increment_read(ring_buffer);
collected++;
}
spin_unlock(&ring_buffer->lock);
spin_unlock(ring_buffer->lock);
wakeup_queue(ring_buffer->wait_queue_writers);
if (collected == 0) {
if (sleep_on(ring_buffer->wait_queue_readers) && ring_buffer->internal_stop) {
@ -73,7 +73,7 @@ size_t ring_buffer_read(ring_buffer_t * ring_buffer, size_t size, uint8_t * buff
size_t ring_buffer_write(ring_buffer_t * ring_buffer, size_t size, uint8_t * buffer) {
size_t written = 0;
while (written < size) {
spin_lock(&ring_buffer->lock);
spin_lock(ring_buffer->lock);
while (ring_buffer_available(ring_buffer) > 0 && written < size) {
ring_buffer->buffer[ring_buffer->write_ptr] = buffer[written];
@ -81,7 +81,7 @@ size_t ring_buffer_write(ring_buffer_t * ring_buffer, size_t size, uint8_t * buf
written++;
}
spin_unlock(&ring_buffer->lock);
spin_unlock(ring_buffer->lock);
wakeup_queue(ring_buffer->wait_queue_readers);
if (written < size) {
if (sleep_on(ring_buffer->wait_queue_writers) && ring_buffer->internal_stop) {
@ -101,9 +101,10 @@ ring_buffer_t * ring_buffer_create(size_t size) {
out->buffer = malloc(size);
out->write_ptr = 0;
out->read_ptr = 0;
out->lock = 0;
out->size = size;
spin_init(out->lock);
out->internal_stop = 0;
out->wait_queue_readers = list_create();

View File

@ -97,13 +97,13 @@ uint32_t read_pipe(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *buf
size_t collected = 0;
while (collected == 0) {
spin_lock(&pipe->lock_read);
spin_lock(pipe->lock_read);
while (pipe_unread(pipe) > 0 && collected < size) {
buffer[collected] = pipe->buffer[pipe->read_ptr];
pipe_increment_read(pipe);
collected++;
}
spin_unlock(&pipe->lock_read);
spin_unlock(pipe->lock_read);
wakeup_queue(pipe->wait_queue_writers);
/* Deschedule and switch */
if (collected == 0) {
@ -141,7 +141,7 @@ uint32_t write_pipe(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *bu
size_t written = 0;
while (written < size) {
spin_lock(&pipe->lock_write);
spin_lock(pipe->lock_write);
#if 0
size_t available = 0;
@ -164,7 +164,7 @@ uint32_t write_pipe(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *bu
}
#endif
spin_unlock(&pipe->lock_write);
spin_unlock(pipe->lock_write);
wakeup_queue(pipe->wait_queue_readers);
if (written < size) {
sleep_on(pipe->wait_queue_writers);
@ -242,10 +242,11 @@ fs_node_t * make_pipe(size_t size) {
pipe->read_ptr = 0;
pipe->size = size;
pipe->refcount = 0;
pipe->lock_read = 0;
pipe->lock_write= 0;
pipe->dead = 0;
spin_init(pipe->lock_read);
spin_init(pipe->lock_write);
pipe->wait_queue_writers = list_create();
pipe->wait_queue_readers = list_create();

View File

@ -109,12 +109,13 @@ uint32_t write_fs(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *buff
}
}
volatile uint8_t tmp_refcount_lock = 0;
//volatile uint8_t tmp_refcount_lock = 0;
static spin_lock_t tmp_refcount_lock = { 0 };
void vfs_lock(fs_node_t * node) {
spin_lock(&tmp_refcount_lock);
spin_lock(tmp_refcount_lock);
node->refcount = -1;
spin_unlock(&tmp_refcount_lock);
spin_unlock(tmp_refcount_lock);
}
/**
@ -128,9 +129,9 @@ void open_fs(fs_node_t *node, unsigned int flags) {
if (!node) return;
if (node->refcount >= 0) {
spin_lock(&tmp_refcount_lock);
spin_lock(tmp_refcount_lock);
node->refcount++;
spin_unlock(&tmp_refcount_lock);
spin_unlock(tmp_refcount_lock);
}
if (node->open) {
@ -153,7 +154,7 @@ void close_fs(fs_node_t *node) {
if (node->refcount == -1) return;
spin_lock(&tmp_refcount_lock);
spin_lock(tmp_refcount_lock);
node->refcount--;
if (node->refcount == 0) {
debug_print(NOTICE, "Node refcount [%s] is now 0: %d", node->name, node->refcount);
@ -164,7 +165,7 @@ void close_fs(fs_node_t *node) {
free(node);
}
spin_unlock(&tmp_refcount_lock);
spin_unlock(tmp_refcount_lock);
}
/**
@ -356,9 +357,9 @@ fs_node_t *clone_fs(fs_node_t *source) {
if (!source) return NULL;
if (source->refcount >= 0) {
spin_lock(&tmp_refcount_lock);
spin_lock(tmp_refcount_lock);
source->refcount++;
spin_unlock(&tmp_refcount_lock);
spin_unlock(tmp_refcount_lock);
}
return source;
@ -521,7 +522,8 @@ int vfs_mount_type(char * type, char * arg, char * mountpoint) {
return 0;
}
volatile uint8_t tmp_vfs_lock = 0;
//volatile uint8_t tmp_vfs_lock = 0;
static spin_lock_t tmp_vfs_lock = { 0 };
/**
* vfs_mount - Mount a file system to the specified path.
*
@ -543,7 +545,7 @@ void * vfs_mount(char * path, fs_node_t * local_root) {
return NULL;
}
spin_lock(&tmp_vfs_lock);
spin_lock(tmp_vfs_lock);
local_root->refcount = -1;
@ -615,7 +617,7 @@ void * vfs_mount(char * path, fs_node_t * local_root) {
}
free(p);
spin_unlock(&tmp_vfs_lock);
spin_unlock(tmp_vfs_lock);
return ret_val;
}

View File

@ -3,7 +3,6 @@
#include <fs.h>
fs_node_t * tmpfs_create(char * name);
uint8_t volatile tmpfs_lock;
struct tmpfs_file {
char * name;

View File

@ -14,8 +14,10 @@ typedef struct _pipe_device {
size_t read_ptr;
size_t size;
size_t refcount;
uint8_t volatile lock_read;
uint8_t volatile lock_write;
//uint8_t volatile lock_read;
//uint8_t volatile lock_write;
volatile int lock_read[2];
volatile int lock_write[2];
list_t * wait_queue_readers;
list_t * wait_queue_writers;
int dead;

View File

@ -48,7 +48,7 @@ typedef struct image {
uintptr_t user_stack; /* User stack */
uintptr_t start;
uintptr_t shm_heap;
volatile uint8_t lock;
volatile int lock[2];
} image_t;
/* Resizable descriptor table */

View File

@ -6,7 +6,7 @@ typedef struct {
size_t write_ptr;
size_t read_ptr;
size_t size;
uint8_t volatile lock;
volatile int lock[2];
list_t * wait_queue_readers;
list_t * wait_queue_writers;
int internal_stop;

View File

@ -39,8 +39,11 @@ extern char * boot_arg_extra; /* Extra data to pass to init */
extern void *sbrk(uintptr_t increment);
extern void spin_lock(uint8_t volatile * lock);
extern void spin_unlock(uint8_t volatile * lock);
/* spin.c */
typedef volatile int spin_lock_t[2];
extern void spin_init(spin_lock_t lock);
extern void spin_lock(spin_lock_t lock);
extern void spin_unlock(spin_lock_t lock);
extern void return_to_userspace(void);

View File

@ -133,42 +133,42 @@ static void * __attribute__ ((malloc)) klcalloc(uintptr_t nmemb, uintptr_t size)
static void * __attribute__ ((malloc)) klvalloc(uintptr_t size);
static void klfree(void * ptr);
static uint8_t volatile mem_lock = 0;
static spin_lock_t mem_lock = { 0 };
void * __attribute__ ((malloc)) malloc(uintptr_t size) {
spin_lock(&mem_lock);
spin_lock(mem_lock);
void * ret = klmalloc(size);
spin_unlock(&mem_lock);
spin_unlock(mem_lock);
return ret;
}
void * __attribute__ ((malloc)) realloc(void * ptr, uintptr_t size) {
spin_lock(&mem_lock);
spin_lock(mem_lock);
void * ret = klrealloc(ptr, size);
spin_unlock(&mem_lock);
spin_unlock(mem_lock);
return ret;
}
void * __attribute__ ((malloc)) calloc(uintptr_t nmemb, uintptr_t size) {
spin_lock(&mem_lock);
spin_lock(mem_lock);
void * ret = klcalloc(nmemb, size);
spin_unlock(&mem_lock);
spin_unlock(mem_lock);
return ret;
}
void * __attribute__ ((malloc)) valloc(uintptr_t size) {
spin_lock(&mem_lock);
spin_lock(mem_lock);
void * ret = klvalloc(size);
spin_unlock(&mem_lock);
spin_unlock(mem_lock);
return ret;
}
void free(void * ptr) {
spin_lock(&mem_lock);
spin_lock(mem_lock);
if ((uintptr_t)ptr > placement_pointer) {
klfree(ptr);
}
spin_unlock(&mem_lock);
spin_unlock(mem_lock);
}

View File

@ -23,7 +23,8 @@ uintptr_t placement_pointer = (uintptr_t)&end;
uintptr_t heap_end = (uintptr_t)NULL;
uintptr_t kernel_heap_alloc_point = KERNEL_HEAP_INIT;
static volatile uint8_t frame_alloc_lock = 0;
//static volatile uint8_t frame_alloc_lock = 0;
static spin_lock_t frame_alloc_lock = { 0 };
uint32_t first_n_frames(int n);
void
@ -56,7 +57,7 @@ kmalloc_real(
clear_frame(map_to_physical(i));
}
/* XXX This is going to get touchy... */
spin_lock(&frame_alloc_lock);
spin_lock(frame_alloc_lock);
uint32_t index = first_n_frames((size + 0xFFF) / 0x1000);
if (index == 0xFFFFFFFF) {
return 0;
@ -66,7 +67,7 @@ kmalloc_real(
page_t * page = get_page((uintptr_t)address + (i * 0x1000),0,kernel_directory);
page->frame = index + i;
}
spin_unlock(&frame_alloc_lock);
spin_unlock(frame_alloc_lock);
}
*phys = map_to_physical((uintptr_t)address);
}
@ -220,12 +221,12 @@ alloc_frame(
page->user = (is_kernel == 1) ? 0 : 1;
return;
} else {
spin_lock(&frame_alloc_lock);
spin_lock(frame_alloc_lock);
uint32_t index = first_frame();
assert(index != (uint32_t)-1 && "Out of frames.");
set_frame(index * 0x1000);
page->frame = index;
spin_unlock(&frame_alloc_lock);
spin_unlock(frame_alloc_lock);
page->present = 1;
page->rw = (is_writeable == 1) ? 1 : 0;
page->user = (is_kernel == 1) ? 0 : 1;

View File

@ -15,7 +15,8 @@
#include <list.h>
static volatile uint8_t bsl; // big shm lock
//static volatile uint8_t bsl; // big shm lock
static spin_lock_t bsl; // big shm lock
tree_t * shm_tree = NULL;
@ -250,7 +251,7 @@ static size_t chunk_size (shm_chunk_t * chunk) {
void * shm_obtain (char * path, size_t * size) {
spin_lock(&bsl);
spin_lock(bsl);
process_t * proc = (process_t *)current_process;
if (proc->group != 0) {
@ -268,14 +269,14 @@ void * shm_obtain (char * path, size_t * size) {
if (size == 0) {
// The process doesn't want a chunk...?
spin_unlock(&bsl);
spin_unlock(bsl);
return NULL;
}
chunk = create_chunk(node, *size);
if (chunk == NULL) {
debug_print(ERROR, "Could not allocate a shm_chunk_t");
spin_unlock(&bsl);
spin_unlock(bsl);
return NULL;
}
@ -287,14 +288,14 @@ void * shm_obtain (char * path, size_t * size) {
void * vshm_start = map_in(chunk, proc);
*size = chunk_size(chunk);
spin_unlock(&bsl);
spin_unlock(bsl);
invalidate_page_tables();
return vshm_start;
}
int shm_release (char * path) {
spin_lock(&bsl);
spin_lock(bsl);
process_t * proc = (process_t *)current_process;
if (proc->group != 0) {
@ -304,7 +305,7 @@ int shm_release (char * path) {
/* First, find the right chunk */
shm_node_t * _node = get_node(path, 0);
if (!_node) {
spin_unlock(&bsl);
spin_unlock(bsl);
return 1;
}
shm_chunk_t * chunk = _node->chunk;
@ -319,7 +320,7 @@ int shm_release (char * path) {
}
}
if (node == NULL) {
spin_unlock(&bsl);
spin_unlock(bsl);
return 1;
}
@ -340,14 +341,14 @@ int shm_release (char * path) {
free(node);
free(mapping);
spin_unlock(&bsl);
spin_unlock(bsl);
return 0;
}
/* This function should only be called if the process's address space
* is about to be destroyed -- chunks will not be unmounted therefrom ! */
void shm_release_all (process_t * proc) {
spin_lock(&bsl);
spin_lock(bsl);
node_t * node;
while ((node = list_pop(proc->shm_mappings)) != NULL) {
@ -362,7 +363,7 @@ void shm_release_all (process_t * proc) {
proc->shm_mappings->head = proc->shm_mappings->tail = NULL;
proc->shm_mappings->length = 0;
spin_unlock(&bsl);
spin_unlock(bsl);
}

57
kernel/spin.c Normal file
View File

@ -0,0 +1,57 @@
/* vim: tabstop=4 shiftwidth=4 noexpandtab
* This file is part of ToaruOS and is released under the terms
* of the NCSA / University of Illinois License - see LICENSE.md
* Copyright (C) 2015 Dale Weiler
*
* Spin locks with waiters
*
*/
#include <system.h>
static inline int arch_atomic_swap(volatile int * x, int v) {
asm("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
return v;
}
static inline void arch_atomic_store(volatile int * p, int x) {
asm("movl %1, %0" : "=m"(*p) : "r"(x) : "memory");
}
static inline void arch_atomic_inc(volatile int * x) {
asm("lock; incl %0" : "=m"(*x) : "m"(*x) : "memory");
}
static inline void arch_atomic_dec(volatile int * x) {
asm("lock; decl %0" : "=m"(*x) : "m"(*x) : "memory");
}
void spin_wait(volatile int * addr, volatile int * waiters) {
if (waiters) {
arch_atomic_inc(waiters);
}
while (*addr) {
switch_task(1);
}
if (waiters) {
arch_atomic_dec(waiters);
}
}
void spin_lock(spin_lock_t lock) {
while (arch_atomic_swap(lock, 1)) {
spin_wait(lock, lock+1);
}
}
void spin_init(spin_lock_t lock) {
lock[0] = 0;
lock[1] = 0;
}
void spin_unlock(spin_lock_t lock) {
if (lock[0]) {
arch_atomic_store(lock, 0);
if (lock[1])
switch_task(1);
}
}

View File

@ -24,10 +24,15 @@ list_t * sleep_queue;
volatile process_t * current_process = NULL;
process_t * kernel_idle_task = NULL;
static uint8_t volatile tree_lock = 0;
static uint8_t volatile process_queue_lock = 0;
static uint8_t volatile wait_lock_tmp = 0;
static uint8_t volatile sleep_lock = 0;
//static uint8_t volatile tree_lock = 0;
//static uint8_t volatile process_queue_lock = 0;
//static uint8_t volatile wait_lock_tmp = 0;
//static uint8_t volatile sleep_lock = 0;
static spin_lock_t tree_lock = { 0 };
static spin_lock_t process_queue_lock = { 0 };
static spin_lock_t wait_lock_tmp = { 0 };
static spin_lock_t sleep_lock = { 0 };
/* Default process name string */
char * default_name = "[unnamed]";
@ -112,9 +117,9 @@ void make_process_ready(process_t * proc) {
/* XXX can't wake from timed sleep */
if (proc->timed_sleep_node) {
IRQ_OFF;
spin_lock(&sleep_lock);
spin_lock(sleep_lock);
list_delete(sleep_queue, proc->timed_sleep_node);
spin_unlock(&sleep_lock);
spin_unlock(sleep_lock);
IRQ_RES;
proc->sleep_node.owner = NULL;
free(proc->timed_sleep_node->value);
@ -122,14 +127,14 @@ void make_process_ready(process_t * proc) {
/* Else: I have no idea what happened. */
} else {
proc->sleep_interrupted = 1;
spin_lock(&wait_lock_tmp);
spin_lock(wait_lock_tmp);
list_delete((list_t*)proc->sleep_node.owner, &proc->sleep_node);
spin_unlock(&wait_lock_tmp);
spin_unlock(wait_lock_tmp);
}
}
spin_lock(&process_queue_lock);
spin_lock(process_queue_lock);
list_append(process_queue, &proc->sched_node);
spin_unlock(&process_queue_lock);
spin_unlock(process_queue_lock);
}
@ -155,11 +160,11 @@ void delete_process(process_t * proc) {
}
/* Remove the entry. */
spin_lock(&tree_lock);
spin_lock(tree_lock);
/* Reparent everyone below me to init */
tree_remove_reparent_root(process_tree, entry);
list_delete(process_list, list_find(process_list, proc));
spin_unlock(&tree_lock);
spin_unlock(tree_lock);
/* Uh... */
free(proc);
@ -239,7 +244,8 @@ process_t * spawn_init(void) {
init->image.user_stack = 0;
init->image.size = 0;
init->image.shm_heap = SHM_START; /* Yeah, a bit of a hack. */
init->image.lock = 0;
spin_init(init->image.lock);
/* Process is not finished */
init->finished = 0;
@ -291,11 +297,11 @@ void process_disown(process_t * proc) {
/* Find the process in the tree */
tree_node_t * entry = proc->tree_entry;
/* Break it of from its current parent */
spin_lock(&tree_lock);
spin_lock(tree_lock);
tree_break_off(process_tree, entry);
/* And insert it back elsewhere */
tree_node_insert_child_node(process_tree, process_tree->root, entry);
spin_unlock(&tree_lock);
spin_unlock(tree_lock);
}
/*
@ -340,7 +346,8 @@ process_t * spawn_process(volatile process_t * parent) {
debug_print(INFO," }");
proc->image.user_stack = parent->image.user_stack;
proc->image.shm_heap = SHM_START; /* Yeah, a bit of a hack. */
proc->image.lock = 0;
spin_init(proc->image.lock);
assert(proc->image.stack && "Failed to allocate kernel stack for new process.");
@ -390,10 +397,10 @@ process_t * spawn_process(volatile process_t * parent) {
tree_node_t * entry = tree_node_create(proc);
assert(entry && "Failed to allocate a process tree node for new process.");
proc->tree_entry = entry;
spin_lock(&tree_lock);
spin_lock(tree_lock);
tree_node_insert_child_node(process_tree, parent->tree_entry, entry);
list_insert(process_list, (void *)proc);
spin_unlock(&tree_lock);
spin_unlock(tree_lock);
/* Return the new process */
return proc;
@ -409,9 +416,9 @@ uint8_t process_compare(void * proc_v, void * pid_v) {
process_t * process_from_pid(pid_t pid) {
if (pid < 0) return NULL;
spin_lock(&tree_lock);
spin_lock(tree_lock);
tree_node_t * entry = tree_find(process_tree,&pid,process_compare);
spin_unlock(&tree_lock);
spin_unlock(tree_lock);
if (entry) {
return (process_t *)entry->value;
}
@ -420,7 +427,7 @@ process_t * process_from_pid(pid_t pid) {
process_t * process_get_parent(process_t * process) {
process_t * result = NULL;
spin_lock(&tree_lock);
spin_lock(tree_lock);
tree_node_t * entry = process->tree_entry;
@ -428,7 +435,7 @@ process_t * process_get_parent(process_t * process) {
result = entry->parent->value;
}
spin_unlock(&tree_lock);
spin_unlock(tree_lock);
return result;
}
@ -539,9 +546,9 @@ uint32_t process_move_fd(process_t * proc, int src, int dest) {
int wakeup_queue(list_t * queue) {
int awoken_processes = 0;
while (queue->length > 0) {
spin_lock(&wait_lock_tmp);
spin_lock(wait_lock_tmp);
node_t * node = list_pop(queue);
spin_unlock(&wait_lock_tmp);
spin_unlock(wait_lock_tmp);
if (!((process_t *)node->value)->finished) {
make_process_ready(node->value);
}
@ -553,9 +560,9 @@ int wakeup_queue(list_t * queue) {
int wakeup_queue_interrupted(list_t * queue) {
int awoken_processes = 0;
while (queue->length > 0) {
spin_lock(&wait_lock_tmp);
spin_lock(wait_lock_tmp);
node_t * node = list_pop(queue);
spin_unlock(&wait_lock_tmp);
spin_unlock(wait_lock_tmp);
if (!((process_t *)node->value)->finished) {
process_t * proc = node->value;
proc->sleep_interrupted = 1;
@ -574,9 +581,9 @@ int sleep_on(list_t * queue) {
return 0;
}
current_process->sleep_interrupted = 0;
spin_lock(&wait_lock_tmp);
spin_lock(wait_lock_tmp);
list_append(queue, (node_t *)&current_process->sleep_node);
spin_unlock(&wait_lock_tmp);
spin_unlock(wait_lock_tmp);
switch_task(0);
return current_process->sleep_interrupted;
}
@ -588,7 +595,7 @@ int process_is_ready(process_t * proc) {
void wakeup_sleepers(unsigned long seconds, unsigned long subseconds) {
IRQ_OFF;
spin_lock(&sleep_lock);
spin_lock(sleep_lock);
if (sleep_queue->length) {
sleeper_t * proc = ((sleeper_t *)sleep_queue->head->value);
while (proc && (proc->end_tick < seconds || (proc->end_tick == seconds && proc->end_subtick <= subseconds))) {
@ -607,7 +614,7 @@ void wakeup_sleepers(unsigned long seconds, unsigned long subseconds) {
}
}
}
spin_unlock(&sleep_lock);
spin_unlock(sleep_lock);
IRQ_RES;
}
@ -619,7 +626,7 @@ void sleep_until(process_t * process, unsigned long seconds, unsigned long subse
process->sleep_node.owner = sleep_queue;
IRQ_OFF;
spin_lock(&sleep_lock);
spin_lock(sleep_lock);
node_t * before = NULL;
foreach(node, sleep_queue) {
sleeper_t * candidate = ((sleeper_t *)node->value);
@ -633,7 +640,7 @@ void sleep_until(process_t * process, unsigned long seconds, unsigned long subse
proc->end_tick = seconds;
proc->end_subtick = subseconds;
process->timed_sleep_node = list_insert_after(sleep_queue, before, proc);
spin_unlock(&sleep_lock);
spin_unlock(sleep_lock);
IRQ_RES;
}

View File

@ -36,8 +36,8 @@ void enter_signal_handler(uintptr_t location, int signum, uintptr_t stack) {
debug_print(CRITICAL, "Failed to jump to signal handler!");
}
static uint8_t volatile sig_lock;
static uint8_t volatile sig_lock_b;
static spin_lock_t sig_lock;
static spin_lock_t sig_lock_b;
char isdeadly[] = {
0, /* 0? */
@ -136,9 +136,9 @@ void return_from_signal_handler(void) {
rets_from_sig = list_create();
}
spin_lock(&sig_lock);
spin_lock(sig_lock);
list_insert(rets_from_sig, (process_t *)current_process);
spin_unlock(&sig_lock);
spin_unlock(sig_lock);
switch_next();
}
@ -146,11 +146,11 @@ void return_from_signal_handler(void) {
void fix_signal_stacks(void) {
uint8_t redo_me = 0;
if (rets_from_sig) {
spin_lock(&sig_lock_b);
spin_lock(sig_lock_b);
while (rets_from_sig->head) {
spin_lock(&sig_lock);
spin_lock(sig_lock);
node_t * n = list_dequeue(rets_from_sig);
spin_unlock(&sig_lock);
spin_unlock(sig_lock);
if (!n) {
continue;
}
@ -168,12 +168,12 @@ void fix_signal_stacks(void) {
p->signal_kstack = NULL;
make_process_ready(p);
}
spin_unlock(&sig_lock_b);
spin_unlock(sig_lock_b);
}
if (redo_me) {
spin_lock(&sig_lock);
spin_lock(sig_lock);
list_insert(rets_from_sig, (process_t *)current_process);
spin_unlock(&sig_lock);
spin_unlock(sig_lock);
switch_next();
}
}

View File

@ -151,7 +151,7 @@ static int sys_sbrk(int size) {
if (proc->group != 0) {
proc = process_from_pid(proc->group);
}
spin_lock(&proc->image.lock);
spin_lock(proc->image.lock);
uintptr_t ret = proc->image.heap;
uintptr_t i_ret = ret;
while (ret % 0x1000) {
@ -164,7 +164,7 @@ static int sys_sbrk(int size) {
alloc_frame(get_page(proc->image.heap_actual, 1, current_directory), 0, 1);
invalidate_tables_at(proc->image.heap_actual);
}
spin_unlock(&proc->image.lock);
spin_unlock(proc->image.lock);
return ret;
}

View File

@ -8,16 +8,6 @@
*/
#include <system.h>
void spin_lock(uint8_t volatile * lock) {
while(__sync_lock_test_and_set(lock, 0x01)) {
switch_task(1);
}
}
void spin_unlock(uint8_t volatile * lock) {
__sync_lock_release(lock);
}
char * boot_arg = NULL;
char * boot_arg_extra = NULL;

View File

@ -26,7 +26,8 @@ struct ata_device {
ata_identify_t identity;
};
static volatile uint8_t ata_lock = 0;
//static volatile uint8_t ata_lock = 0;
static spin_lock_t ata_lock = { 0 };
/* TODO support other sector sizes */
#define ATA_SECTOR_SIZE 512
@ -302,7 +303,7 @@ static void ata_device_read_sector(struct ata_device * dev, uint32_t lba, uint8_
uint16_t bus = dev->io_base;
uint8_t slave = dev->slave;
spin_lock(&ata_lock);
spin_lock(ata_lock);
int errors = 0;
try_again:
@ -323,7 +324,7 @@ try_again:
errors++;
if (errors > 4) {
debug_print(WARNING, "-- Too many errors trying to read this block. Bailing.");
spin_unlock(&ata_lock);
spin_unlock(ata_lock);
return;
}
goto try_again;
@ -332,14 +333,14 @@ try_again:
int size = 256;
inportsm(bus,buf,size);
ata_wait(dev, 0);
spin_unlock(&ata_lock);
spin_unlock(ata_lock);
}
static void ata_device_write_sector(struct ata_device * dev, uint32_t lba, uint8_t * buf) {
uint16_t bus = dev->io_base;
uint8_t slave = dev->slave;
spin_lock(&ata_lock);
spin_lock(ata_lock);
outportb(bus + ATA_REG_CONTROL, 0x02);
@ -358,7 +359,7 @@ static void ata_device_write_sector(struct ata_device * dev, uint32_t lba, uint8
outportsm(bus,buf,size);
outportb(bus + 0x07, ATA_CMD_CACHE_FLUSH);
ata_wait(dev, 0);
spin_unlock(&ata_lock);
spin_unlock(ata_lock);
}
static int buffer_compare(uint32_t * ptr1, uint32_t * ptr2, size_t size) {

View File

@ -38,7 +38,7 @@ typedef struct {
unsigned int cache_entries; /* Size of ->disk_cache */
unsigned int cache_time; /* "timer" that increments with each cache read/write */
uint8_t volatile lock; /* Synchronization lock point */
spin_lock_t lock; /* Synchronization lock point */
uint8_t bgd_block_span;
uint8_t bgd_offset;
@ -120,14 +120,14 @@ static int read_block(ext2_fs_t * this, unsigned int block_no, uint8_t * buf) {
}
/* This operation requires the filesystem lock to be obtained */
spin_lock(&this->lock);
spin_lock(this->lock);
/* We can make reads without a cache in place. */
if (!DC) {
/* In such cases, we read directly from the block device */
read_fs(this->block_device, block_no * this->block_size, this->block_size, (uint8_t *)buf);
/* We are done, release the lock */
spin_unlock(&this->lock);
spin_unlock(this->lock);
/* And return SUCCESS */
return E_SUCCESS;
}
@ -145,7 +145,7 @@ static int read_block(ext2_fs_t * this, unsigned int block_no, uint8_t * buf) {
/* Read the block */
memcpy(buf, DC[i].block, this->block_size);
/* Release the lock */
spin_unlock(&this->lock);
spin_unlock(this->lock);
/* Success! */
return E_SUCCESS;
}
@ -178,7 +178,7 @@ static int read_block(ext2_fs_t * this, unsigned int block_no, uint8_t * buf) {
DC[oldest].dirty = 0;
/* Release the lock */
spin_unlock(&this->lock);
spin_unlock(this->lock);
/* And return success */
return E_SUCCESS;
@ -199,7 +199,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
}
/* This operation requires the filesystem lock */
spin_lock(&this->lock);
spin_lock(this->lock);
/* Find the entry in the cache */
int oldest = -1;
@ -210,7 +210,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
DC[i].last_use = get_cache_time(this);
DC[i].dirty = 1;
memcpy(DC[i].block, buf, this->block_size);
spin_unlock(&this->lock);
spin_unlock(this->lock);
return E_SUCCESS;
}
if (DC[i].last_use < oldest_age) {
@ -233,7 +233,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
DC[oldest].dirty = 1;
/* Release the lock */
spin_unlock(&this->lock);
spin_unlock(this->lock);
/* We're done. */
return E_SUCCESS;
@ -241,7 +241,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
static unsigned int ext2_sync(ext2_fs_t * this) {
/* This operation requires the filesystem lock */
spin_lock(&this->lock);
spin_lock(this->lock);
/* Flush each cache entry. */
for (unsigned int i = 0; i < this->cache_entries; ++i) {
@ -251,7 +251,7 @@ static unsigned int ext2_sync(ext2_fs_t * this) {
}
/* Release the lock */
spin_unlock(&this->lock);
spin_unlock(this->lock);
return 0;
}

View File

@ -15,13 +15,13 @@
typedef struct packet_manager {
/* uh, nothing, lol */
list_t * exchanges;
volatile uint8_t lock;
spin_lock_t lock;
} pex_t;
typedef struct packet_exchange {
char * name;
char fresh;
volatile uint8_t lock;
spin_lock_t lock;
fs_node_t * server_pipe;
list_t * clients;
} pex_ex_t;
@ -130,12 +130,12 @@ static uint32_t write_server(fs_node_t * node, uint32_t offset, uint32_t size, u
if (head->target == NULL) {
/* Brodcast packet */
spin_lock(&p->lock);
spin_lock(p->lock);
foreach(f, p->clients) {
debug_print(INFO, "Sending to client 0x%x", f->value);
send_to_client(p, (pex_client_t *)f->value, size - sizeof(header_t), head->data);
}
spin_unlock(&p->lock);
spin_unlock(p->lock);
debug_print(INFO, "Done broadcasting to clients.");
return size;
} else if (head->target->parent != p) {
@ -221,7 +221,7 @@ static void close_client(fs_node_t * node) {
debug_print(WARNING, "Closing packetfs client: 0x%x:0x%x", p, c);
spin_lock(&p->lock);
spin_lock(p->lock);
node_t * n = list_find(p->clients, c);
if (n && n->owner == p->clients) {
@ -229,7 +229,7 @@ static void close_client(fs_node_t * node) {
free(n);
}
spin_unlock(&p->lock);
spin_unlock(p->lock);
char tmp[1];
@ -301,11 +301,11 @@ static struct dirent * readdir_packetfs(fs_node_t *node, uint32_t index) {
return NULL;
}
spin_lock(&p->lock);
spin_lock(p->lock);
foreach(f, p->exchanges) {
if (i == index) {
spin_unlock(&p->lock);
spin_unlock(p->lock);
pex_ex_t * t = (pex_ex_t *)f->value;
struct dirent * out = malloc(sizeof(struct dirent));
memset(out, 0x00, sizeof(struct dirent));
@ -317,7 +317,7 @@ static struct dirent * readdir_packetfs(fs_node_t *node, uint32_t index) {
}
}
spin_unlock(&p->lock);
spin_unlock(p->lock);
return NULL;
}
@ -341,17 +341,17 @@ static fs_node_t * finddir_packetfs(fs_node_t * node, char * name) {
debug_print(INFO, "[pex] finddir(%s)", name);
spin_lock(&p->lock);
spin_lock(p->lock);
foreach(f, p->exchanges) {
pex_ex_t * t = (pex_ex_t *)f->value;
if (!strcmp(name, t->name)) {
spin_unlock(&p->lock);
spin_unlock(p->lock);
return file_from_pex(t);
}
}
spin_unlock(&p->lock);
spin_unlock(p->lock);
return NULL;
}
@ -363,12 +363,12 @@ static void create_packetfs(fs_node_t *parent, char *name, uint16_t permission)
debug_print(NOTICE, "[pex] create(%s)", name);
spin_lock(&p->lock);
spin_lock(p->lock);
foreach(f, p->exchanges) {
pex_ex_t * t = (pex_ex_t *)f->value;
if (!strcmp(name, t->name)) {
spin_unlock(&p->lock);
spin_unlock(p->lock);
/* Already exists */
return;
}
@ -379,14 +379,15 @@ static void create_packetfs(fs_node_t *parent, char *name, uint16_t permission)
new_exchange->name = strdup(name);
new_exchange->fresh = 1;
new_exchange->lock = 0;
new_exchange->clients = list_create();
new_exchange->server_pipe = make_pipe(4096);
spin_init(new_exchange->lock);
/* XXX Create exchange server pipe */
list_insert(p->exchanges, new_exchange);
spin_unlock(&p->lock);
spin_unlock(p->lock);
}
@ -403,7 +404,7 @@ static void unlink_packetfs(fs_node_t *parent, char *name) {
int i = -1, j = 0;
spin_lock(&p->lock);
spin_lock(p->lock);
foreach(f, p->exchanges) {
pex_ex_t * t = (pex_ex_t *)f->value;
@ -419,13 +420,15 @@ static void unlink_packetfs(fs_node_t *parent, char *name) {
list_remove(p->exchanges, i);
}
spin_unlock(&p->lock);
spin_unlock(p->lock);
}
static fs_node_t * packetfs_manager(void) {
pex_t * pex = malloc(sizeof(pex_t));
pex->exchanges = list_create();
pex->lock = 0;
spin_init(pex->lock);
fs_node_t * fnode = malloc(sizeof(fs_node_t));
memset(fnode, 0x00, sizeof(fs_node_t));
fnode->inode = 0;

View File

@ -38,7 +38,9 @@ static void find_rtl(uint32_t device, uint16_t vendorid, uint16_t deviceid, void
static void net_handler_enqueue(void * buffer);
static list_t * net_queue = NULL;
static volatile uint8_t net_queue_lock = 0;
//static volatile uint8_t net_queue_lock = 0;
static spin_lock_t net_queue_lock = { 0 };
static int rtl_irq = 0;
static uint32_t rtl_iobase = 0;
@ -62,16 +64,17 @@ static fs_node_t * irc_socket;
static uint32_t seq_no = 0xff0000;
static uint32_t ack_no = 0x0;
static volatile uint8_t _lock;
//static volatile uint8_t _lock;
static spin_lock_t _lock;
static int next_tx_buf(void) {
int out;
spin_lock(&_lock);
spin_lock(_lock);
out = next_tx;
next_tx++;
if (next_tx == 4) {
next_tx = 0;
}
spin_unlock(&_lock);
spin_unlock(_lock);
return out;
}
@ -397,7 +400,9 @@ static char irc_input[400] = {'\0'};
static char irc_prompt[100] = {'\0'};
static char irc_nick[32] = {'\0'};
static char irc_payload[512];
static volatile uint8_t irc_tty_lock = 0;
static spin_lock_t irc_tty_lock = { 0 };
//static volatile uint8_t irc_tty_lock = 0;
//static struct netif rtl_netif;
static void irc_send(char * payload) {
@ -419,7 +424,7 @@ static void handle_irc_packet(fs_node_t * tty, size_t size, uint8_t * packet) {
if ((uintptr_t)e > (uintptr_t)packet + size) {
break;
}
spin_lock(&irc_tty_lock);
spin_lock(irc_tty_lock);
if (!e) {
/* XXX */
@ -495,7 +500,7 @@ prompt_:
fprintf(tty, "%s", irc_prompt);
fprintf(tty, "%s", irc_input);
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
if (!e) break;
@ -621,11 +626,11 @@ static struct ethernet_packet * net_receive(void) {
while (!net_queue->length) {
sleep_on(rx_wait);
}
spin_lock(&net_queue_lock);
spin_lock(net_queue_lock);
node_t * n = list_dequeue(net_queue);
struct ethernet_packet * eth = (struct ethernet_packet *)n->value;
free(n);
spin_unlock(&net_queue_lock);
spin_unlock(net_queue_lock);
return eth;
}
@ -654,11 +659,11 @@ static void net_handler(void * data, char * name) {
static void net_handler_enqueue(void * buffer) {
/* XXX size? source? */
spin_lock(&net_queue_lock);
spin_lock(net_queue_lock);
list_insert(net_queue, buffer);
spin_unlock(&net_queue_lock);
spin_unlock(net_queue_lock);
}
static void parse_dns_response(fs_node_t * tty, void * last_packet) {
@ -953,11 +958,11 @@ static int tty_readline(fs_node_t * dev, char * linebuf, int max) {
debug_print(WARNING, "Read nothing?");
continue;
}
spin_lock(&irc_tty_lock);
spin_lock(irc_tty_lock);
linebuf[read] = buf[0];
if (buf[0] == '\n') {
linebuf[read] = 0;
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
break;
} else if (buf[0] == 0x08) {
if (read > 0) {
@ -969,18 +974,18 @@ static int tty_readline(fs_node_t * dev, char * linebuf, int max) {
switch (buf[0]) {
case 0x0C: /* ^L */
/* Should reset display here */
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
break;
default:
/* do nothing */
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
break;
}
} else {
fprintf(dev, "%c", buf[0]);
read += r;
}
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
}
tty_set_buffered(dev);
return read;
@ -1050,7 +1055,7 @@ DEFINE_SHELL_FUNCTION(irc_join, "irc channel tool") {
fprintf(tty, irc_prompt);
int c = tty_readline(tty, irc_input, 400);
spin_lock(&irc_tty_lock);
spin_lock(irc_tty_lock);
irc_input[c] = '\0';
@ -1058,7 +1063,7 @@ DEFINE_SHELL_FUNCTION(irc_join, "irc channel tool") {
fprintf(tty, "\n");
sprintf(irc_payload, "PART %s\r\n", channel);
irc_send(irc_payload);
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
break;
}
@ -1079,7 +1084,7 @@ DEFINE_SHELL_FUNCTION(irc_join, "irc channel tool") {
}
memset(irc_input, 0x00, sizeof(irc_input));
spin_unlock(&irc_tty_lock);
spin_unlock(irc_tty_lock);
}
memset(irc_prompt, 0x00, sizeof(irc_prompt));
memset(irc_input, 0x00, sizeof(irc_input));

View File

@ -34,7 +34,8 @@ static int snd_mixer_ioctl(fs_node_t * node, int request, void * argp);
static void snd_mixer_open(fs_node_t * node, unsigned int flags);
static void snd_mixer_close(fs_node_t * node);
static uint8_t _devices_lock;
static spin_lock_t _devices_lock;
static list_t _devices;
static fs_node_t _dsp_fnode = {
.name = "dsp",
@ -51,7 +52,7 @@ static fs_node_t _mixer_fnode = {
.open = snd_mixer_open,
.close = snd_mixer_close,
};
static uint8_t _buffers_lock;
static spin_lock_t _buffers_lock;
static list_t _buffers;
static uint32_t _next_device_id = SND_DEVICE_MAIN;
@ -59,7 +60,7 @@ int snd_register(snd_device_t * device) {
int rv = 0;
debug_print(WARNING, "[snd] _devices lock: %d", _devices_lock);
spin_lock(&_devices_lock);
spin_lock(_devices_lock);
device->id = _next_device_id;
_next_device_id++;
if (list_find(&_devices, device)) {
@ -71,7 +72,7 @@ int snd_register(snd_device_t * device) {
debug_print(NOTICE, "[snd] %s registered", device->name);
snd_register_cleanup:
spin_unlock(&_devices_lock);
spin_unlock(_devices_lock);
return rv;
}
@ -88,7 +89,7 @@ int snd_unregister(snd_device_t * device) {
debug_print(NOTICE, "[snd] %s unregistered", device->name);
snd_unregister_cleanup:
spin_unlock(&_devices_lock);
spin_unlock(_devices_lock);
return rv;
}
@ -108,19 +109,19 @@ static void snd_dsp_open(fs_node_t * node, unsigned int flags) {
*/
/* Allocate a buffer for the node and keep a reference for ourselves */
node->device = ring_buffer_create(SND_BUF_SIZE);
spin_lock(&_buffers_lock);
spin_lock(_buffers_lock);
list_insert(&_buffers, node->device);
spin_unlock(&_buffers_lock);
spin_unlock(_buffers_lock);
}
static void snd_dsp_close(fs_node_t * node) {
spin_lock(&_buffers_lock);
spin_lock(_buffers_lock);
list_delete(&_buffers, list_find(&_buffers, node->device));
spin_unlock(&_buffers_lock);
spin_unlock(_buffers_lock);
}
static snd_device_t * snd_device_by_id(uint32_t device_id) {
spin_lock(&_devices_lock);
spin_lock(_devices_lock);
snd_device_t * out = NULL;
snd_device_t * cur = NULL;
@ -130,7 +131,7 @@ static snd_device_t * snd_device_by_id(uint32_t device_id) {
out = cur;
}
}
spin_unlock(&_devices_lock);
spin_unlock(_devices_lock);
return out;
}
@ -198,7 +199,7 @@ int snd_request_buf(snd_device_t * device, uint32_t size, uint8_t *buffer) {
memset(buffer, 0, size);
spin_lock(&_buffers_lock);
spin_lock(_buffers_lock);
foreach(buf_node, &_buffers) {
ring_buffer_t * buf = buf_node->value;
/* ~0x3 is to ensure we don't read partial samples or just a single channel */
@ -222,19 +223,19 @@ int snd_request_buf(snd_device_t * device, uint32_t size, uint8_t *buffer) {
bytes_left -= this_read_size;
}
}
spin_unlock(&_buffers_lock);
spin_unlock(_buffers_lock);
return size;
}
static snd_device_t * snd_main_device() {
spin_lock(&_devices_lock);
spin_lock(_devices_lock);
foreach(node, &_devices) {
spin_unlock(&_devices_lock);
spin_unlock(_devices_lock);
return node->value;
}
spin_unlock(&_devices_lock);
spin_unlock(_devices_lock);
return NULL;
}

View File

@ -18,7 +18,7 @@
#define TMPFS_TYPE_FILE 1
#define TMPFS_TYPE_DIR 2
uint8_t volatile tmpfs_lock = 0;
static spin_lock_t tmpfs_lock = { 0 };
struct tmpfs_dir * tmpfs_root = NULL;
@ -26,7 +26,7 @@ static fs_node_t * tmpfs_from_dir(struct tmpfs_dir * d);
static struct tmpfs_file * tmpfs_file_new(char * name) {
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
struct tmpfs_file * t = malloc(sizeof(struct tmpfs_file));
t->name = strdup(name);
@ -45,12 +45,12 @@ static struct tmpfs_file * tmpfs_file_new(char * name) {
t->blocks[i] = NULL;
}
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
return t;
}
static struct tmpfs_dir * tmpfs_dir_new(char * name, struct tmpfs_dir * parent) {
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
struct tmpfs_dir * d = malloc(sizeof(struct tmpfs_dir));
d->name = strdup(name);
@ -63,7 +63,7 @@ static struct tmpfs_dir * tmpfs_dir_new(char * name, struct tmpfs_dir * parent)
d->ctime = d->atime;
d->files = list_create();
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
return d;
}
@ -82,7 +82,7 @@ static void tmpfs_file_blocks_embiggen(struct tmpfs_file * t) {
static char * tmpfs_file_getset_block(struct tmpfs_file * t, size_t blockid, int create) {
debug_print(INFO, "Reading block %d from file %s", blockid, t->name);
if (create) {
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
while (blockid >= t->pointers) {
tmpfs_file_blocks_embiggen(t);
}
@ -91,7 +91,7 @@ static char * tmpfs_file_getset_block(struct tmpfs_file * t, size_t blockid, int
t->blocks[t->block_count] = malloc(BLOCKSIZE);
t->block_count += 1;
}
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
} else {
if (blockid >= t->block_count) {
debug_print(CRITICAL, "This will probably end badly.");
@ -283,22 +283,23 @@ static fs_node_t * finddir_tmpfs(fs_node_t * node, char * name) {
struct tmpfs_dir * d = (struct tmpfs_dir *)node->device;
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
foreach(f, d->files) {
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
if (!strcmp(name, t->name)) {
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
switch (t->type) {
case TMPFS_TYPE_FILE:
return tmpfs_from_file(t);
case TMPFS_TYPE_DIR:
return tmpfs_from_dir((struct tmpfs_dir *)t);
}
return NULL;
}
}
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
return NULL;
}
@ -306,7 +307,7 @@ static fs_node_t * finddir_tmpfs(fs_node_t * node, char * name) {
static void unlink_tmpfs(fs_node_t * node, char * name) {
struct tmpfs_dir * d = (struct tmpfs_dir *)node->device;
int i = -1, j = 0;
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
foreach(f, d->files) {
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
@ -323,7 +324,7 @@ static void unlink_tmpfs(fs_node_t * node, char * name) {
list_remove(d->files, i);
}
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
return;
}
@ -333,16 +334,16 @@ static void create_tmpfs(fs_node_t *parent, char *name, uint16_t permission) {
struct tmpfs_dir * d = (struct tmpfs_dir *)parent->device;
debug_print(CRITICAL, "Creating TMPFS file %s in %s", name, d->name);
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
foreach(f, d->files) {
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
if (!strcmp(name, t->name)) {
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
debug_print(WARNING, "... already exists.");
return; /* Already exists */
}
}
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
debug_print(NOTICE, "... creating a new file.");
struct tmpfs_file * t = tmpfs_file_new(name);
@ -350,9 +351,9 @@ static void create_tmpfs(fs_node_t *parent, char *name, uint16_t permission) {
t->uid = current_process->user;
t->gid = current_process->user;
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
list_insert(d->files, t);
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
}
static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
@ -361,16 +362,16 @@ static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
struct tmpfs_dir * d = (struct tmpfs_dir *)parent->device;
debug_print(CRITICAL, "Creating TMPFS directory %s (in %s)", name, d->name);
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
foreach(f, d->files) {
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
if (!strcmp(name, t->name)) {
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
debug_print(WARNING, "... already exists.");
return; /* Already exists */
}
}
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
debug_print(NOTICE, "... creating a new directory.");
struct tmpfs_dir * out = tmpfs_dir_new(name, d);
@ -378,9 +379,9 @@ static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
out->uid = current_process->user;
out->gid = current_process->user;
spin_lock(&tmpfs_lock);
spin_lock(tmpfs_lock);
list_insert(d->files, out);
spin_unlock(&tmpfs_lock);
spin_unlock(tmpfs_lock);
}
static fs_node_t * tmpfs_from_dir(struct tmpfs_dir * d) {