Cleanup and improve spin lock code
This commit is contained in:
parent
3820512357
commit
0dbd5ab089
@ -51,13 +51,13 @@ static inline void ring_buffer_increment_write(ring_buffer_t * ring_buffer) {
|
|||||||
size_t ring_buffer_read(ring_buffer_t * ring_buffer, size_t size, uint8_t * buffer) {
|
size_t ring_buffer_read(ring_buffer_t * ring_buffer, size_t size, uint8_t * buffer) {
|
||||||
size_t collected = 0;
|
size_t collected = 0;
|
||||||
while (collected == 0) {
|
while (collected == 0) {
|
||||||
spin_lock(&ring_buffer->lock);
|
spin_lock(ring_buffer->lock);
|
||||||
while (ring_buffer_unread(ring_buffer) > 0 && collected < size) {
|
while (ring_buffer_unread(ring_buffer) > 0 && collected < size) {
|
||||||
buffer[collected] = ring_buffer->buffer[ring_buffer->read_ptr];
|
buffer[collected] = ring_buffer->buffer[ring_buffer->read_ptr];
|
||||||
ring_buffer_increment_read(ring_buffer);
|
ring_buffer_increment_read(ring_buffer);
|
||||||
collected++;
|
collected++;
|
||||||
}
|
}
|
||||||
spin_unlock(&ring_buffer->lock);
|
spin_unlock(ring_buffer->lock);
|
||||||
wakeup_queue(ring_buffer->wait_queue_writers);
|
wakeup_queue(ring_buffer->wait_queue_writers);
|
||||||
if (collected == 0) {
|
if (collected == 0) {
|
||||||
if (sleep_on(ring_buffer->wait_queue_readers) && ring_buffer->internal_stop) {
|
if (sleep_on(ring_buffer->wait_queue_readers) && ring_buffer->internal_stop) {
|
||||||
@ -73,7 +73,7 @@ size_t ring_buffer_read(ring_buffer_t * ring_buffer, size_t size, uint8_t * buff
|
|||||||
size_t ring_buffer_write(ring_buffer_t * ring_buffer, size_t size, uint8_t * buffer) {
|
size_t ring_buffer_write(ring_buffer_t * ring_buffer, size_t size, uint8_t * buffer) {
|
||||||
size_t written = 0;
|
size_t written = 0;
|
||||||
while (written < size) {
|
while (written < size) {
|
||||||
spin_lock(&ring_buffer->lock);
|
spin_lock(ring_buffer->lock);
|
||||||
|
|
||||||
while (ring_buffer_available(ring_buffer) > 0 && written < size) {
|
while (ring_buffer_available(ring_buffer) > 0 && written < size) {
|
||||||
ring_buffer->buffer[ring_buffer->write_ptr] = buffer[written];
|
ring_buffer->buffer[ring_buffer->write_ptr] = buffer[written];
|
||||||
@ -81,7 +81,7 @@ size_t ring_buffer_write(ring_buffer_t * ring_buffer, size_t size, uint8_t * buf
|
|||||||
written++;
|
written++;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&ring_buffer->lock);
|
spin_unlock(ring_buffer->lock);
|
||||||
wakeup_queue(ring_buffer->wait_queue_readers);
|
wakeup_queue(ring_buffer->wait_queue_readers);
|
||||||
if (written < size) {
|
if (written < size) {
|
||||||
if (sleep_on(ring_buffer->wait_queue_writers) && ring_buffer->internal_stop) {
|
if (sleep_on(ring_buffer->wait_queue_writers) && ring_buffer->internal_stop) {
|
||||||
@ -101,9 +101,10 @@ ring_buffer_t * ring_buffer_create(size_t size) {
|
|||||||
out->buffer = malloc(size);
|
out->buffer = malloc(size);
|
||||||
out->write_ptr = 0;
|
out->write_ptr = 0;
|
||||||
out->read_ptr = 0;
|
out->read_ptr = 0;
|
||||||
out->lock = 0;
|
|
||||||
out->size = size;
|
out->size = size;
|
||||||
|
|
||||||
|
spin_init(out->lock);
|
||||||
|
|
||||||
out->internal_stop = 0;
|
out->internal_stop = 0;
|
||||||
|
|
||||||
out->wait_queue_readers = list_create();
|
out->wait_queue_readers = list_create();
|
||||||
|
@ -97,13 +97,13 @@ uint32_t read_pipe(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *buf
|
|||||||
|
|
||||||
size_t collected = 0;
|
size_t collected = 0;
|
||||||
while (collected == 0) {
|
while (collected == 0) {
|
||||||
spin_lock(&pipe->lock_read);
|
spin_lock(pipe->lock_read);
|
||||||
while (pipe_unread(pipe) > 0 && collected < size) {
|
while (pipe_unread(pipe) > 0 && collected < size) {
|
||||||
buffer[collected] = pipe->buffer[pipe->read_ptr];
|
buffer[collected] = pipe->buffer[pipe->read_ptr];
|
||||||
pipe_increment_read(pipe);
|
pipe_increment_read(pipe);
|
||||||
collected++;
|
collected++;
|
||||||
}
|
}
|
||||||
spin_unlock(&pipe->lock_read);
|
spin_unlock(pipe->lock_read);
|
||||||
wakeup_queue(pipe->wait_queue_writers);
|
wakeup_queue(pipe->wait_queue_writers);
|
||||||
/* Deschedule and switch */
|
/* Deschedule and switch */
|
||||||
if (collected == 0) {
|
if (collected == 0) {
|
||||||
@ -141,7 +141,7 @@ uint32_t write_pipe(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *bu
|
|||||||
|
|
||||||
size_t written = 0;
|
size_t written = 0;
|
||||||
while (written < size) {
|
while (written < size) {
|
||||||
spin_lock(&pipe->lock_write);
|
spin_lock(pipe->lock_write);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
size_t available = 0;
|
size_t available = 0;
|
||||||
@ -164,7 +164,7 @@ uint32_t write_pipe(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *bu
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
spin_unlock(&pipe->lock_write);
|
spin_unlock(pipe->lock_write);
|
||||||
wakeup_queue(pipe->wait_queue_readers);
|
wakeup_queue(pipe->wait_queue_readers);
|
||||||
if (written < size) {
|
if (written < size) {
|
||||||
sleep_on(pipe->wait_queue_writers);
|
sleep_on(pipe->wait_queue_writers);
|
||||||
@ -242,10 +242,11 @@ fs_node_t * make_pipe(size_t size) {
|
|||||||
pipe->read_ptr = 0;
|
pipe->read_ptr = 0;
|
||||||
pipe->size = size;
|
pipe->size = size;
|
||||||
pipe->refcount = 0;
|
pipe->refcount = 0;
|
||||||
pipe->lock_read = 0;
|
|
||||||
pipe->lock_write= 0;
|
|
||||||
pipe->dead = 0;
|
pipe->dead = 0;
|
||||||
|
|
||||||
|
spin_init(pipe->lock_read);
|
||||||
|
spin_init(pipe->lock_write);
|
||||||
|
|
||||||
pipe->wait_queue_writers = list_create();
|
pipe->wait_queue_writers = list_create();
|
||||||
pipe->wait_queue_readers = list_create();
|
pipe->wait_queue_readers = list_create();
|
||||||
|
|
||||||
|
@ -109,12 +109,13 @@ uint32_t write_fs(fs_node_t *node, uint32_t offset, uint32_t size, uint8_t *buff
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
volatile uint8_t tmp_refcount_lock = 0;
|
//volatile uint8_t tmp_refcount_lock = 0;
|
||||||
|
static spin_lock_t tmp_refcount_lock = { 0 };
|
||||||
|
|
||||||
void vfs_lock(fs_node_t * node) {
|
void vfs_lock(fs_node_t * node) {
|
||||||
spin_lock(&tmp_refcount_lock);
|
spin_lock(tmp_refcount_lock);
|
||||||
node->refcount = -1;
|
node->refcount = -1;
|
||||||
spin_unlock(&tmp_refcount_lock);
|
spin_unlock(tmp_refcount_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -128,9 +129,9 @@ void open_fs(fs_node_t *node, unsigned int flags) {
|
|||||||
if (!node) return;
|
if (!node) return;
|
||||||
|
|
||||||
if (node->refcount >= 0) {
|
if (node->refcount >= 0) {
|
||||||
spin_lock(&tmp_refcount_lock);
|
spin_lock(tmp_refcount_lock);
|
||||||
node->refcount++;
|
node->refcount++;
|
||||||
spin_unlock(&tmp_refcount_lock);
|
spin_unlock(tmp_refcount_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node->open) {
|
if (node->open) {
|
||||||
@ -153,7 +154,7 @@ void close_fs(fs_node_t *node) {
|
|||||||
|
|
||||||
if (node->refcount == -1) return;
|
if (node->refcount == -1) return;
|
||||||
|
|
||||||
spin_lock(&tmp_refcount_lock);
|
spin_lock(tmp_refcount_lock);
|
||||||
node->refcount--;
|
node->refcount--;
|
||||||
if (node->refcount == 0) {
|
if (node->refcount == 0) {
|
||||||
debug_print(NOTICE, "Node refcount [%s] is now 0: %d", node->name, node->refcount);
|
debug_print(NOTICE, "Node refcount [%s] is now 0: %d", node->name, node->refcount);
|
||||||
@ -164,7 +165,7 @@ void close_fs(fs_node_t *node) {
|
|||||||
|
|
||||||
free(node);
|
free(node);
|
||||||
}
|
}
|
||||||
spin_unlock(&tmp_refcount_lock);
|
spin_unlock(tmp_refcount_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -356,9 +357,9 @@ fs_node_t *clone_fs(fs_node_t *source) {
|
|||||||
if (!source) return NULL;
|
if (!source) return NULL;
|
||||||
|
|
||||||
if (source->refcount >= 0) {
|
if (source->refcount >= 0) {
|
||||||
spin_lock(&tmp_refcount_lock);
|
spin_lock(tmp_refcount_lock);
|
||||||
source->refcount++;
|
source->refcount++;
|
||||||
spin_unlock(&tmp_refcount_lock);
|
spin_unlock(tmp_refcount_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return source;
|
return source;
|
||||||
@ -521,7 +522,8 @@ int vfs_mount_type(char * type, char * arg, char * mountpoint) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
volatile uint8_t tmp_vfs_lock = 0;
|
//volatile uint8_t tmp_vfs_lock = 0;
|
||||||
|
static spin_lock_t tmp_vfs_lock = { 0 };
|
||||||
/**
|
/**
|
||||||
* vfs_mount - Mount a file system to the specified path.
|
* vfs_mount - Mount a file system to the specified path.
|
||||||
*
|
*
|
||||||
@ -543,7 +545,7 @@ void * vfs_mount(char * path, fs_node_t * local_root) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&tmp_vfs_lock);
|
spin_lock(tmp_vfs_lock);
|
||||||
|
|
||||||
local_root->refcount = -1;
|
local_root->refcount = -1;
|
||||||
|
|
||||||
@ -615,7 +617,7 @@ void * vfs_mount(char * path, fs_node_t * local_root) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
free(p);
|
free(p);
|
||||||
spin_unlock(&tmp_vfs_lock);
|
spin_unlock(tmp_vfs_lock);
|
||||||
return ret_val;
|
return ret_val;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include <fs.h>
|
#include <fs.h>
|
||||||
|
|
||||||
fs_node_t * tmpfs_create(char * name);
|
fs_node_t * tmpfs_create(char * name);
|
||||||
uint8_t volatile tmpfs_lock;
|
|
||||||
|
|
||||||
struct tmpfs_file {
|
struct tmpfs_file {
|
||||||
char * name;
|
char * name;
|
||||||
|
@ -14,8 +14,10 @@ typedef struct _pipe_device {
|
|||||||
size_t read_ptr;
|
size_t read_ptr;
|
||||||
size_t size;
|
size_t size;
|
||||||
size_t refcount;
|
size_t refcount;
|
||||||
uint8_t volatile lock_read;
|
//uint8_t volatile lock_read;
|
||||||
uint8_t volatile lock_write;
|
//uint8_t volatile lock_write;
|
||||||
|
volatile int lock_read[2];
|
||||||
|
volatile int lock_write[2];
|
||||||
list_t * wait_queue_readers;
|
list_t * wait_queue_readers;
|
||||||
list_t * wait_queue_writers;
|
list_t * wait_queue_writers;
|
||||||
int dead;
|
int dead;
|
||||||
|
@ -48,7 +48,7 @@ typedef struct image {
|
|||||||
uintptr_t user_stack; /* User stack */
|
uintptr_t user_stack; /* User stack */
|
||||||
uintptr_t start;
|
uintptr_t start;
|
||||||
uintptr_t shm_heap;
|
uintptr_t shm_heap;
|
||||||
volatile uint8_t lock;
|
volatile int lock[2];
|
||||||
} image_t;
|
} image_t;
|
||||||
|
|
||||||
/* Resizable descriptor table */
|
/* Resizable descriptor table */
|
||||||
|
@ -6,7 +6,7 @@ typedef struct {
|
|||||||
size_t write_ptr;
|
size_t write_ptr;
|
||||||
size_t read_ptr;
|
size_t read_ptr;
|
||||||
size_t size;
|
size_t size;
|
||||||
uint8_t volatile lock;
|
volatile int lock[2];
|
||||||
list_t * wait_queue_readers;
|
list_t * wait_queue_readers;
|
||||||
list_t * wait_queue_writers;
|
list_t * wait_queue_writers;
|
||||||
int internal_stop;
|
int internal_stop;
|
||||||
|
@ -39,8 +39,11 @@ extern char * boot_arg_extra; /* Extra data to pass to init */
|
|||||||
|
|
||||||
extern void *sbrk(uintptr_t increment);
|
extern void *sbrk(uintptr_t increment);
|
||||||
|
|
||||||
extern void spin_lock(uint8_t volatile * lock);
|
/* spin.c */
|
||||||
extern void spin_unlock(uint8_t volatile * lock);
|
typedef volatile int spin_lock_t[2];
|
||||||
|
extern void spin_init(spin_lock_t lock);
|
||||||
|
extern void spin_lock(spin_lock_t lock);
|
||||||
|
extern void spin_unlock(spin_lock_t lock);
|
||||||
|
|
||||||
extern void return_to_userspace(void);
|
extern void return_to_userspace(void);
|
||||||
|
|
||||||
|
@ -133,42 +133,42 @@ static void * __attribute__ ((malloc)) klcalloc(uintptr_t nmemb, uintptr_t size)
|
|||||||
static void * __attribute__ ((malloc)) klvalloc(uintptr_t size);
|
static void * __attribute__ ((malloc)) klvalloc(uintptr_t size);
|
||||||
static void klfree(void * ptr);
|
static void klfree(void * ptr);
|
||||||
|
|
||||||
static uint8_t volatile mem_lock = 0;
|
static spin_lock_t mem_lock = { 0 };
|
||||||
|
|
||||||
void * __attribute__ ((malloc)) malloc(uintptr_t size) {
|
void * __attribute__ ((malloc)) malloc(uintptr_t size) {
|
||||||
spin_lock(&mem_lock);
|
spin_lock(mem_lock);
|
||||||
void * ret = klmalloc(size);
|
void * ret = klmalloc(size);
|
||||||
spin_unlock(&mem_lock);
|
spin_unlock(mem_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void * __attribute__ ((malloc)) realloc(void * ptr, uintptr_t size) {
|
void * __attribute__ ((malloc)) realloc(void * ptr, uintptr_t size) {
|
||||||
spin_lock(&mem_lock);
|
spin_lock(mem_lock);
|
||||||
void * ret = klrealloc(ptr, size);
|
void * ret = klrealloc(ptr, size);
|
||||||
spin_unlock(&mem_lock);
|
spin_unlock(mem_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void * __attribute__ ((malloc)) calloc(uintptr_t nmemb, uintptr_t size) {
|
void * __attribute__ ((malloc)) calloc(uintptr_t nmemb, uintptr_t size) {
|
||||||
spin_lock(&mem_lock);
|
spin_lock(mem_lock);
|
||||||
void * ret = klcalloc(nmemb, size);
|
void * ret = klcalloc(nmemb, size);
|
||||||
spin_unlock(&mem_lock);
|
spin_unlock(mem_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void * __attribute__ ((malloc)) valloc(uintptr_t size) {
|
void * __attribute__ ((malloc)) valloc(uintptr_t size) {
|
||||||
spin_lock(&mem_lock);
|
spin_lock(mem_lock);
|
||||||
void * ret = klvalloc(size);
|
void * ret = klvalloc(size);
|
||||||
spin_unlock(&mem_lock);
|
spin_unlock(mem_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void * ptr) {
|
void free(void * ptr) {
|
||||||
spin_lock(&mem_lock);
|
spin_lock(mem_lock);
|
||||||
if ((uintptr_t)ptr > placement_pointer) {
|
if ((uintptr_t)ptr > placement_pointer) {
|
||||||
klfree(ptr);
|
klfree(ptr);
|
||||||
}
|
}
|
||||||
spin_unlock(&mem_lock);
|
spin_unlock(mem_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -518,7 +518,7 @@ static void * klmalloc_stack_pop(klmalloc_bin_header *header) {
|
|||||||
assert((uintptr_t)header->head < (uintptr_t)header + PAGE_SIZE);
|
assert((uintptr_t)header->head < (uintptr_t)header + PAGE_SIZE);
|
||||||
assert((uintptr_t)header->head > (uintptr_t)header + sizeof(klmalloc_bin_header) - 1);
|
assert((uintptr_t)header->head > (uintptr_t)header + sizeof(klmalloc_bin_header) - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove the current head and point
|
* Remove the current head and point
|
||||||
* the head to where the old head pointed.
|
* the head to where the old head pointed.
|
||||||
@ -758,7 +758,7 @@ static void klfree(void *ptr) {
|
|||||||
if (bucket_id > (uintptr_t)NUM_BINS) {
|
if (bucket_id > (uintptr_t)NUM_BINS) {
|
||||||
bucket_id = BIG_BIN;
|
bucket_id = BIG_BIN;
|
||||||
klmalloc_big_bin_header *bheader = (klmalloc_big_bin_header*)header;
|
klmalloc_big_bin_header *bheader = (klmalloc_big_bin_header*)header;
|
||||||
|
|
||||||
assert(bheader);
|
assert(bheader);
|
||||||
assert(bheader->head == NULL);
|
assert(bheader->head == NULL);
|
||||||
assert((bheader->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
|
assert((bheader->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
|
||||||
@ -955,7 +955,7 @@ static void * __attribute__ ((malloc)) klcalloc(uintptr_t nmemb, uintptr_t size)
|
|||||||
/*
|
/*
|
||||||
* Allocate memory and zero it before returning
|
* Allocate memory and zero it before returning
|
||||||
* a pointer to the newly allocated memory.
|
* a pointer to the newly allocated memory.
|
||||||
*
|
*
|
||||||
* Implemented by way of a simple malloc followed
|
* Implemented by way of a simple malloc followed
|
||||||
* by a memset to 0x00 across the length of the
|
* by a memset to 0x00 across the length of the
|
||||||
* requested memory chunk.
|
* requested memory chunk.
|
||||||
|
@ -23,7 +23,8 @@ uintptr_t placement_pointer = (uintptr_t)&end;
|
|||||||
uintptr_t heap_end = (uintptr_t)NULL;
|
uintptr_t heap_end = (uintptr_t)NULL;
|
||||||
uintptr_t kernel_heap_alloc_point = KERNEL_HEAP_INIT;
|
uintptr_t kernel_heap_alloc_point = KERNEL_HEAP_INIT;
|
||||||
|
|
||||||
static volatile uint8_t frame_alloc_lock = 0;
|
//static volatile uint8_t frame_alloc_lock = 0;
|
||||||
|
static spin_lock_t frame_alloc_lock = { 0 };
|
||||||
uint32_t first_n_frames(int n);
|
uint32_t first_n_frames(int n);
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -56,7 +57,7 @@ kmalloc_real(
|
|||||||
clear_frame(map_to_physical(i));
|
clear_frame(map_to_physical(i));
|
||||||
}
|
}
|
||||||
/* XXX This is going to get touchy... */
|
/* XXX This is going to get touchy... */
|
||||||
spin_lock(&frame_alloc_lock);
|
spin_lock(frame_alloc_lock);
|
||||||
uint32_t index = first_n_frames((size + 0xFFF) / 0x1000);
|
uint32_t index = first_n_frames((size + 0xFFF) / 0x1000);
|
||||||
if (index == 0xFFFFFFFF) {
|
if (index == 0xFFFFFFFF) {
|
||||||
return 0;
|
return 0;
|
||||||
@ -66,7 +67,7 @@ kmalloc_real(
|
|||||||
page_t * page = get_page((uintptr_t)address + (i * 0x1000),0,kernel_directory);
|
page_t * page = get_page((uintptr_t)address + (i * 0x1000),0,kernel_directory);
|
||||||
page->frame = index + i;
|
page->frame = index + i;
|
||||||
}
|
}
|
||||||
spin_unlock(&frame_alloc_lock);
|
spin_unlock(frame_alloc_lock);
|
||||||
}
|
}
|
||||||
*phys = map_to_physical((uintptr_t)address);
|
*phys = map_to_physical((uintptr_t)address);
|
||||||
}
|
}
|
||||||
@ -220,12 +221,12 @@ alloc_frame(
|
|||||||
page->user = (is_kernel == 1) ? 0 : 1;
|
page->user = (is_kernel == 1) ? 0 : 1;
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&frame_alloc_lock);
|
spin_lock(frame_alloc_lock);
|
||||||
uint32_t index = first_frame();
|
uint32_t index = first_frame();
|
||||||
assert(index != (uint32_t)-1 && "Out of frames.");
|
assert(index != (uint32_t)-1 && "Out of frames.");
|
||||||
set_frame(index * 0x1000);
|
set_frame(index * 0x1000);
|
||||||
page->frame = index;
|
page->frame = index;
|
||||||
spin_unlock(&frame_alloc_lock);
|
spin_unlock(frame_alloc_lock);
|
||||||
page->present = 1;
|
page->present = 1;
|
||||||
page->rw = (is_writeable == 1) ? 1 : 0;
|
page->rw = (is_writeable == 1) ? 1 : 0;
|
||||||
page->user = (is_kernel == 1) ? 0 : 1;
|
page->user = (is_kernel == 1) ? 0 : 1;
|
||||||
|
@ -15,7 +15,8 @@
|
|||||||
#include <list.h>
|
#include <list.h>
|
||||||
|
|
||||||
|
|
||||||
static volatile uint8_t bsl; // big shm lock
|
//static volatile uint8_t bsl; // big shm lock
|
||||||
|
static spin_lock_t bsl; // big shm lock
|
||||||
tree_t * shm_tree = NULL;
|
tree_t * shm_tree = NULL;
|
||||||
|
|
||||||
|
|
||||||
@ -250,7 +251,7 @@ static size_t chunk_size (shm_chunk_t * chunk) {
|
|||||||
|
|
||||||
|
|
||||||
void * shm_obtain (char * path, size_t * size) {
|
void * shm_obtain (char * path, size_t * size) {
|
||||||
spin_lock(&bsl);
|
spin_lock(bsl);
|
||||||
process_t * proc = (process_t *)current_process;
|
process_t * proc = (process_t *)current_process;
|
||||||
|
|
||||||
if (proc->group != 0) {
|
if (proc->group != 0) {
|
||||||
@ -268,14 +269,14 @@ void * shm_obtain (char * path, size_t * size) {
|
|||||||
|
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
// The process doesn't want a chunk...?
|
// The process doesn't want a chunk...?
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk = create_chunk(node, *size);
|
chunk = create_chunk(node, *size);
|
||||||
if (chunk == NULL) {
|
if (chunk == NULL) {
|
||||||
debug_print(ERROR, "Could not allocate a shm_chunk_t");
|
debug_print(ERROR, "Could not allocate a shm_chunk_t");
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,14 +288,14 @@ void * shm_obtain (char * path, size_t * size) {
|
|||||||
void * vshm_start = map_in(chunk, proc);
|
void * vshm_start = map_in(chunk, proc);
|
||||||
*size = chunk_size(chunk);
|
*size = chunk_size(chunk);
|
||||||
|
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
invalidate_page_tables();
|
invalidate_page_tables();
|
||||||
|
|
||||||
return vshm_start;
|
return vshm_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
int shm_release (char * path) {
|
int shm_release (char * path) {
|
||||||
spin_lock(&bsl);
|
spin_lock(bsl);
|
||||||
process_t * proc = (process_t *)current_process;
|
process_t * proc = (process_t *)current_process;
|
||||||
|
|
||||||
if (proc->group != 0) {
|
if (proc->group != 0) {
|
||||||
@ -304,7 +305,7 @@ int shm_release (char * path) {
|
|||||||
/* First, find the right chunk */
|
/* First, find the right chunk */
|
||||||
shm_node_t * _node = get_node(path, 0);
|
shm_node_t * _node = get_node(path, 0);
|
||||||
if (!_node) {
|
if (!_node) {
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
shm_chunk_t * chunk = _node->chunk;
|
shm_chunk_t * chunk = _node->chunk;
|
||||||
@ -319,7 +320,7 @@ int shm_release (char * path) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -340,14 +341,14 @@ int shm_release (char * path) {
|
|||||||
free(node);
|
free(node);
|
||||||
free(mapping);
|
free(mapping);
|
||||||
|
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This function should only be called if the process's address space
|
/* This function should only be called if the process's address space
|
||||||
* is about to be destroyed -- chunks will not be unmounted therefrom ! */
|
* is about to be destroyed -- chunks will not be unmounted therefrom ! */
|
||||||
void shm_release_all (process_t * proc) {
|
void shm_release_all (process_t * proc) {
|
||||||
spin_lock(&bsl);
|
spin_lock(bsl);
|
||||||
|
|
||||||
node_t * node;
|
node_t * node;
|
||||||
while ((node = list_pop(proc->shm_mappings)) != NULL) {
|
while ((node = list_pop(proc->shm_mappings)) != NULL) {
|
||||||
@ -362,7 +363,7 @@ void shm_release_all (process_t * proc) {
|
|||||||
proc->shm_mappings->head = proc->shm_mappings->tail = NULL;
|
proc->shm_mappings->head = proc->shm_mappings->tail = NULL;
|
||||||
proc->shm_mappings->length = 0;
|
proc->shm_mappings->length = 0;
|
||||||
|
|
||||||
spin_unlock(&bsl);
|
spin_unlock(bsl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
57
kernel/spin.c
Normal file
57
kernel/spin.c
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
/* vim: tabstop=4 shiftwidth=4 noexpandtab
|
||||||
|
* This file is part of ToaruOS and is released under the terms
|
||||||
|
* of the NCSA / University of Illinois License - see LICENSE.md
|
||||||
|
* Copyright (C) 2015 Dale Weiler
|
||||||
|
*
|
||||||
|
* Spin locks with waiters
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#include <system.h>
|
||||||
|
|
||||||
|
static inline int arch_atomic_swap(volatile int * x, int v) {
|
||||||
|
asm("xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory");
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_atomic_store(volatile int * p, int x) {
|
||||||
|
asm("movl %1, %0" : "=m"(*p) : "r"(x) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_atomic_inc(volatile int * x) {
|
||||||
|
asm("lock; incl %0" : "=m"(*x) : "m"(*x) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void arch_atomic_dec(volatile int * x) {
|
||||||
|
asm("lock; decl %0" : "=m"(*x) : "m"(*x) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
void spin_wait(volatile int * addr, volatile int * waiters) {
|
||||||
|
if (waiters) {
|
||||||
|
arch_atomic_inc(waiters);
|
||||||
|
}
|
||||||
|
while (*addr) {
|
||||||
|
switch_task(1);
|
||||||
|
}
|
||||||
|
if (waiters) {
|
||||||
|
arch_atomic_dec(waiters);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void spin_lock(spin_lock_t lock) {
|
||||||
|
while (arch_atomic_swap(lock, 1)) {
|
||||||
|
spin_wait(lock, lock+1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void spin_init(spin_lock_t lock) {
|
||||||
|
lock[0] = 0;
|
||||||
|
lock[1] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void spin_unlock(spin_lock_t lock) {
|
||||||
|
if (lock[0]) {
|
||||||
|
arch_atomic_store(lock, 0);
|
||||||
|
if (lock[1])
|
||||||
|
switch_task(1);
|
||||||
|
}
|
||||||
|
}
|
@ -24,10 +24,15 @@ list_t * sleep_queue;
|
|||||||
volatile process_t * current_process = NULL;
|
volatile process_t * current_process = NULL;
|
||||||
process_t * kernel_idle_task = NULL;
|
process_t * kernel_idle_task = NULL;
|
||||||
|
|
||||||
static uint8_t volatile tree_lock = 0;
|
//static uint8_t volatile tree_lock = 0;
|
||||||
static uint8_t volatile process_queue_lock = 0;
|
//static uint8_t volatile process_queue_lock = 0;
|
||||||
static uint8_t volatile wait_lock_tmp = 0;
|
//static uint8_t volatile wait_lock_tmp = 0;
|
||||||
static uint8_t volatile sleep_lock = 0;
|
//static uint8_t volatile sleep_lock = 0;
|
||||||
|
|
||||||
|
static spin_lock_t tree_lock = { 0 };
|
||||||
|
static spin_lock_t process_queue_lock = { 0 };
|
||||||
|
static spin_lock_t wait_lock_tmp = { 0 };
|
||||||
|
static spin_lock_t sleep_lock = { 0 };
|
||||||
|
|
||||||
/* Default process name string */
|
/* Default process name string */
|
||||||
char * default_name = "[unnamed]";
|
char * default_name = "[unnamed]";
|
||||||
@ -112,9 +117,9 @@ void make_process_ready(process_t * proc) {
|
|||||||
/* XXX can't wake from timed sleep */
|
/* XXX can't wake from timed sleep */
|
||||||
if (proc->timed_sleep_node) {
|
if (proc->timed_sleep_node) {
|
||||||
IRQ_OFF;
|
IRQ_OFF;
|
||||||
spin_lock(&sleep_lock);
|
spin_lock(sleep_lock);
|
||||||
list_delete(sleep_queue, proc->timed_sleep_node);
|
list_delete(sleep_queue, proc->timed_sleep_node);
|
||||||
spin_unlock(&sleep_lock);
|
spin_unlock(sleep_lock);
|
||||||
IRQ_RES;
|
IRQ_RES;
|
||||||
proc->sleep_node.owner = NULL;
|
proc->sleep_node.owner = NULL;
|
||||||
free(proc->timed_sleep_node->value);
|
free(proc->timed_sleep_node->value);
|
||||||
@ -122,14 +127,14 @@ void make_process_ready(process_t * proc) {
|
|||||||
/* Else: I have no idea what happened. */
|
/* Else: I have no idea what happened. */
|
||||||
} else {
|
} else {
|
||||||
proc->sleep_interrupted = 1;
|
proc->sleep_interrupted = 1;
|
||||||
spin_lock(&wait_lock_tmp);
|
spin_lock(wait_lock_tmp);
|
||||||
list_delete((list_t*)proc->sleep_node.owner, &proc->sleep_node);
|
list_delete((list_t*)proc->sleep_node.owner, &proc->sleep_node);
|
||||||
spin_unlock(&wait_lock_tmp);
|
spin_unlock(wait_lock_tmp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_lock(&process_queue_lock);
|
spin_lock(process_queue_lock);
|
||||||
list_append(process_queue, &proc->sched_node);
|
list_append(process_queue, &proc->sched_node);
|
||||||
spin_unlock(&process_queue_lock);
|
spin_unlock(process_queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -155,11 +160,11 @@ void delete_process(process_t * proc) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Remove the entry. */
|
/* Remove the entry. */
|
||||||
spin_lock(&tree_lock);
|
spin_lock(tree_lock);
|
||||||
/* Reparent everyone below me to init */
|
/* Reparent everyone below me to init */
|
||||||
tree_remove_reparent_root(process_tree, entry);
|
tree_remove_reparent_root(process_tree, entry);
|
||||||
list_delete(process_list, list_find(process_list, proc));
|
list_delete(process_list, list_find(process_list, proc));
|
||||||
spin_unlock(&tree_lock);
|
spin_unlock(tree_lock);
|
||||||
|
|
||||||
/* Uh... */
|
/* Uh... */
|
||||||
free(proc);
|
free(proc);
|
||||||
@ -239,7 +244,8 @@ process_t * spawn_init(void) {
|
|||||||
init->image.user_stack = 0;
|
init->image.user_stack = 0;
|
||||||
init->image.size = 0;
|
init->image.size = 0;
|
||||||
init->image.shm_heap = SHM_START; /* Yeah, a bit of a hack. */
|
init->image.shm_heap = SHM_START; /* Yeah, a bit of a hack. */
|
||||||
init->image.lock = 0;
|
|
||||||
|
spin_init(init->image.lock);
|
||||||
|
|
||||||
/* Process is not finished */
|
/* Process is not finished */
|
||||||
init->finished = 0;
|
init->finished = 0;
|
||||||
@ -291,11 +297,11 @@ void process_disown(process_t * proc) {
|
|||||||
/* Find the process in the tree */
|
/* Find the process in the tree */
|
||||||
tree_node_t * entry = proc->tree_entry;
|
tree_node_t * entry = proc->tree_entry;
|
||||||
/* Break it of from its current parent */
|
/* Break it of from its current parent */
|
||||||
spin_lock(&tree_lock);
|
spin_lock(tree_lock);
|
||||||
tree_break_off(process_tree, entry);
|
tree_break_off(process_tree, entry);
|
||||||
/* And insert it back elsewhere */
|
/* And insert it back elsewhere */
|
||||||
tree_node_insert_child_node(process_tree, process_tree->root, entry);
|
tree_node_insert_child_node(process_tree, process_tree->root, entry);
|
||||||
spin_unlock(&tree_lock);
|
spin_unlock(tree_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -340,7 +346,8 @@ process_t * spawn_process(volatile process_t * parent) {
|
|||||||
debug_print(INFO," }");
|
debug_print(INFO," }");
|
||||||
proc->image.user_stack = parent->image.user_stack;
|
proc->image.user_stack = parent->image.user_stack;
|
||||||
proc->image.shm_heap = SHM_START; /* Yeah, a bit of a hack. */
|
proc->image.shm_heap = SHM_START; /* Yeah, a bit of a hack. */
|
||||||
proc->image.lock = 0;
|
|
||||||
|
spin_init(proc->image.lock);
|
||||||
|
|
||||||
assert(proc->image.stack && "Failed to allocate kernel stack for new process.");
|
assert(proc->image.stack && "Failed to allocate kernel stack for new process.");
|
||||||
|
|
||||||
@ -390,10 +397,10 @@ process_t * spawn_process(volatile process_t * parent) {
|
|||||||
tree_node_t * entry = tree_node_create(proc);
|
tree_node_t * entry = tree_node_create(proc);
|
||||||
assert(entry && "Failed to allocate a process tree node for new process.");
|
assert(entry && "Failed to allocate a process tree node for new process.");
|
||||||
proc->tree_entry = entry;
|
proc->tree_entry = entry;
|
||||||
spin_lock(&tree_lock);
|
spin_lock(tree_lock);
|
||||||
tree_node_insert_child_node(process_tree, parent->tree_entry, entry);
|
tree_node_insert_child_node(process_tree, parent->tree_entry, entry);
|
||||||
list_insert(process_list, (void *)proc);
|
list_insert(process_list, (void *)proc);
|
||||||
spin_unlock(&tree_lock);
|
spin_unlock(tree_lock);
|
||||||
|
|
||||||
/* Return the new process */
|
/* Return the new process */
|
||||||
return proc;
|
return proc;
|
||||||
@ -409,9 +416,9 @@ uint8_t process_compare(void * proc_v, void * pid_v) {
|
|||||||
process_t * process_from_pid(pid_t pid) {
|
process_t * process_from_pid(pid_t pid) {
|
||||||
if (pid < 0) return NULL;
|
if (pid < 0) return NULL;
|
||||||
|
|
||||||
spin_lock(&tree_lock);
|
spin_lock(tree_lock);
|
||||||
tree_node_t * entry = tree_find(process_tree,&pid,process_compare);
|
tree_node_t * entry = tree_find(process_tree,&pid,process_compare);
|
||||||
spin_unlock(&tree_lock);
|
spin_unlock(tree_lock);
|
||||||
if (entry) {
|
if (entry) {
|
||||||
return (process_t *)entry->value;
|
return (process_t *)entry->value;
|
||||||
}
|
}
|
||||||
@ -420,7 +427,7 @@ process_t * process_from_pid(pid_t pid) {
|
|||||||
|
|
||||||
process_t * process_get_parent(process_t * process) {
|
process_t * process_get_parent(process_t * process) {
|
||||||
process_t * result = NULL;
|
process_t * result = NULL;
|
||||||
spin_lock(&tree_lock);
|
spin_lock(tree_lock);
|
||||||
|
|
||||||
tree_node_t * entry = process->tree_entry;
|
tree_node_t * entry = process->tree_entry;
|
||||||
|
|
||||||
@ -428,7 +435,7 @@ process_t * process_get_parent(process_t * process) {
|
|||||||
result = entry->parent->value;
|
result = entry->parent->value;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&tree_lock);
|
spin_unlock(tree_lock);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,9 +546,9 @@ uint32_t process_move_fd(process_t * proc, int src, int dest) {
|
|||||||
int wakeup_queue(list_t * queue) {
|
int wakeup_queue(list_t * queue) {
|
||||||
int awoken_processes = 0;
|
int awoken_processes = 0;
|
||||||
while (queue->length > 0) {
|
while (queue->length > 0) {
|
||||||
spin_lock(&wait_lock_tmp);
|
spin_lock(wait_lock_tmp);
|
||||||
node_t * node = list_pop(queue);
|
node_t * node = list_pop(queue);
|
||||||
spin_unlock(&wait_lock_tmp);
|
spin_unlock(wait_lock_tmp);
|
||||||
if (!((process_t *)node->value)->finished) {
|
if (!((process_t *)node->value)->finished) {
|
||||||
make_process_ready(node->value);
|
make_process_ready(node->value);
|
||||||
}
|
}
|
||||||
@ -553,9 +560,9 @@ int wakeup_queue(list_t * queue) {
|
|||||||
int wakeup_queue_interrupted(list_t * queue) {
|
int wakeup_queue_interrupted(list_t * queue) {
|
||||||
int awoken_processes = 0;
|
int awoken_processes = 0;
|
||||||
while (queue->length > 0) {
|
while (queue->length > 0) {
|
||||||
spin_lock(&wait_lock_tmp);
|
spin_lock(wait_lock_tmp);
|
||||||
node_t * node = list_pop(queue);
|
node_t * node = list_pop(queue);
|
||||||
spin_unlock(&wait_lock_tmp);
|
spin_unlock(wait_lock_tmp);
|
||||||
if (!((process_t *)node->value)->finished) {
|
if (!((process_t *)node->value)->finished) {
|
||||||
process_t * proc = node->value;
|
process_t * proc = node->value;
|
||||||
proc->sleep_interrupted = 1;
|
proc->sleep_interrupted = 1;
|
||||||
@ -574,9 +581,9 @@ int sleep_on(list_t * queue) {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
current_process->sleep_interrupted = 0;
|
current_process->sleep_interrupted = 0;
|
||||||
spin_lock(&wait_lock_tmp);
|
spin_lock(wait_lock_tmp);
|
||||||
list_append(queue, (node_t *)¤t_process->sleep_node);
|
list_append(queue, (node_t *)¤t_process->sleep_node);
|
||||||
spin_unlock(&wait_lock_tmp);
|
spin_unlock(wait_lock_tmp);
|
||||||
switch_task(0);
|
switch_task(0);
|
||||||
return current_process->sleep_interrupted;
|
return current_process->sleep_interrupted;
|
||||||
}
|
}
|
||||||
@ -588,7 +595,7 @@ int process_is_ready(process_t * proc) {
|
|||||||
|
|
||||||
void wakeup_sleepers(unsigned long seconds, unsigned long subseconds) {
|
void wakeup_sleepers(unsigned long seconds, unsigned long subseconds) {
|
||||||
IRQ_OFF;
|
IRQ_OFF;
|
||||||
spin_lock(&sleep_lock);
|
spin_lock(sleep_lock);
|
||||||
if (sleep_queue->length) {
|
if (sleep_queue->length) {
|
||||||
sleeper_t * proc = ((sleeper_t *)sleep_queue->head->value);
|
sleeper_t * proc = ((sleeper_t *)sleep_queue->head->value);
|
||||||
while (proc && (proc->end_tick < seconds || (proc->end_tick == seconds && proc->end_subtick <= subseconds))) {
|
while (proc && (proc->end_tick < seconds || (proc->end_tick == seconds && proc->end_subtick <= subseconds))) {
|
||||||
@ -607,7 +614,7 @@ void wakeup_sleepers(unsigned long seconds, unsigned long subseconds) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&sleep_lock);
|
spin_unlock(sleep_lock);
|
||||||
IRQ_RES;
|
IRQ_RES;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -619,7 +626,7 @@ void sleep_until(process_t * process, unsigned long seconds, unsigned long subse
|
|||||||
process->sleep_node.owner = sleep_queue;
|
process->sleep_node.owner = sleep_queue;
|
||||||
|
|
||||||
IRQ_OFF;
|
IRQ_OFF;
|
||||||
spin_lock(&sleep_lock);
|
spin_lock(sleep_lock);
|
||||||
node_t * before = NULL;
|
node_t * before = NULL;
|
||||||
foreach(node, sleep_queue) {
|
foreach(node, sleep_queue) {
|
||||||
sleeper_t * candidate = ((sleeper_t *)node->value);
|
sleeper_t * candidate = ((sleeper_t *)node->value);
|
||||||
@ -633,7 +640,7 @@ void sleep_until(process_t * process, unsigned long seconds, unsigned long subse
|
|||||||
proc->end_tick = seconds;
|
proc->end_tick = seconds;
|
||||||
proc->end_subtick = subseconds;
|
proc->end_subtick = subseconds;
|
||||||
process->timed_sleep_node = list_insert_after(sleep_queue, before, proc);
|
process->timed_sleep_node = list_insert_after(sleep_queue, before, proc);
|
||||||
spin_unlock(&sleep_lock);
|
spin_unlock(sleep_lock);
|
||||||
IRQ_RES;
|
IRQ_RES;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,8 +36,8 @@ void enter_signal_handler(uintptr_t location, int signum, uintptr_t stack) {
|
|||||||
debug_print(CRITICAL, "Failed to jump to signal handler!");
|
debug_print(CRITICAL, "Failed to jump to signal handler!");
|
||||||
}
|
}
|
||||||
|
|
||||||
static uint8_t volatile sig_lock;
|
static spin_lock_t sig_lock;
|
||||||
static uint8_t volatile sig_lock_b;
|
static spin_lock_t sig_lock_b;
|
||||||
|
|
||||||
char isdeadly[] = {
|
char isdeadly[] = {
|
||||||
0, /* 0? */
|
0, /* 0? */
|
||||||
@ -136,9 +136,9 @@ void return_from_signal_handler(void) {
|
|||||||
rets_from_sig = list_create();
|
rets_from_sig = list_create();
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&sig_lock);
|
spin_lock(sig_lock);
|
||||||
list_insert(rets_from_sig, (process_t *)current_process);
|
list_insert(rets_from_sig, (process_t *)current_process);
|
||||||
spin_unlock(&sig_lock);
|
spin_unlock(sig_lock);
|
||||||
|
|
||||||
switch_next();
|
switch_next();
|
||||||
}
|
}
|
||||||
@ -146,11 +146,11 @@ void return_from_signal_handler(void) {
|
|||||||
void fix_signal_stacks(void) {
|
void fix_signal_stacks(void) {
|
||||||
uint8_t redo_me = 0;
|
uint8_t redo_me = 0;
|
||||||
if (rets_from_sig) {
|
if (rets_from_sig) {
|
||||||
spin_lock(&sig_lock_b);
|
spin_lock(sig_lock_b);
|
||||||
while (rets_from_sig->head) {
|
while (rets_from_sig->head) {
|
||||||
spin_lock(&sig_lock);
|
spin_lock(sig_lock);
|
||||||
node_t * n = list_dequeue(rets_from_sig);
|
node_t * n = list_dequeue(rets_from_sig);
|
||||||
spin_unlock(&sig_lock);
|
spin_unlock(sig_lock);
|
||||||
if (!n) {
|
if (!n) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -168,12 +168,12 @@ void fix_signal_stacks(void) {
|
|||||||
p->signal_kstack = NULL;
|
p->signal_kstack = NULL;
|
||||||
make_process_ready(p);
|
make_process_ready(p);
|
||||||
}
|
}
|
||||||
spin_unlock(&sig_lock_b);
|
spin_unlock(sig_lock_b);
|
||||||
}
|
}
|
||||||
if (redo_me) {
|
if (redo_me) {
|
||||||
spin_lock(&sig_lock);
|
spin_lock(sig_lock);
|
||||||
list_insert(rets_from_sig, (process_t *)current_process);
|
list_insert(rets_from_sig, (process_t *)current_process);
|
||||||
spin_unlock(&sig_lock);
|
spin_unlock(sig_lock);
|
||||||
switch_next();
|
switch_next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,7 @@ static int sys_sbrk(int size) {
|
|||||||
if (proc->group != 0) {
|
if (proc->group != 0) {
|
||||||
proc = process_from_pid(proc->group);
|
proc = process_from_pid(proc->group);
|
||||||
}
|
}
|
||||||
spin_lock(&proc->image.lock);
|
spin_lock(proc->image.lock);
|
||||||
uintptr_t ret = proc->image.heap;
|
uintptr_t ret = proc->image.heap;
|
||||||
uintptr_t i_ret = ret;
|
uintptr_t i_ret = ret;
|
||||||
while (ret % 0x1000) {
|
while (ret % 0x1000) {
|
||||||
@ -164,7 +164,7 @@ static int sys_sbrk(int size) {
|
|||||||
alloc_frame(get_page(proc->image.heap_actual, 1, current_directory), 0, 1);
|
alloc_frame(get_page(proc->image.heap_actual, 1, current_directory), 0, 1);
|
||||||
invalidate_tables_at(proc->image.heap_actual);
|
invalidate_tables_at(proc->image.heap_actual);
|
||||||
}
|
}
|
||||||
spin_unlock(&proc->image.lock);
|
spin_unlock(proc->image.lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,16 +8,6 @@
|
|||||||
*/
|
*/
|
||||||
#include <system.h>
|
#include <system.h>
|
||||||
|
|
||||||
void spin_lock(uint8_t volatile * lock) {
|
|
||||||
while(__sync_lock_test_and_set(lock, 0x01)) {
|
|
||||||
switch_task(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void spin_unlock(uint8_t volatile * lock) {
|
|
||||||
__sync_lock_release(lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
char * boot_arg = NULL;
|
char * boot_arg = NULL;
|
||||||
char * boot_arg_extra = NULL;
|
char * boot_arg_extra = NULL;
|
||||||
|
|
||||||
|
@ -26,7 +26,8 @@ struct ata_device {
|
|||||||
ata_identify_t identity;
|
ata_identify_t identity;
|
||||||
};
|
};
|
||||||
|
|
||||||
static volatile uint8_t ata_lock = 0;
|
//static volatile uint8_t ata_lock = 0;
|
||||||
|
static spin_lock_t ata_lock = { 0 };
|
||||||
|
|
||||||
/* TODO support other sector sizes */
|
/* TODO support other sector sizes */
|
||||||
#define ATA_SECTOR_SIZE 512
|
#define ATA_SECTOR_SIZE 512
|
||||||
@ -302,7 +303,7 @@ static void ata_device_read_sector(struct ata_device * dev, uint32_t lba, uint8_
|
|||||||
uint16_t bus = dev->io_base;
|
uint16_t bus = dev->io_base;
|
||||||
uint8_t slave = dev->slave;
|
uint8_t slave = dev->slave;
|
||||||
|
|
||||||
spin_lock(&ata_lock);
|
spin_lock(ata_lock);
|
||||||
|
|
||||||
int errors = 0;
|
int errors = 0;
|
||||||
try_again:
|
try_again:
|
||||||
@ -323,7 +324,7 @@ try_again:
|
|||||||
errors++;
|
errors++;
|
||||||
if (errors > 4) {
|
if (errors > 4) {
|
||||||
debug_print(WARNING, "-- Too many errors trying to read this block. Bailing.");
|
debug_print(WARNING, "-- Too many errors trying to read this block. Bailing.");
|
||||||
spin_unlock(&ata_lock);
|
spin_unlock(ata_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
goto try_again;
|
goto try_again;
|
||||||
@ -332,14 +333,14 @@ try_again:
|
|||||||
int size = 256;
|
int size = 256;
|
||||||
inportsm(bus,buf,size);
|
inportsm(bus,buf,size);
|
||||||
ata_wait(dev, 0);
|
ata_wait(dev, 0);
|
||||||
spin_unlock(&ata_lock);
|
spin_unlock(ata_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ata_device_write_sector(struct ata_device * dev, uint32_t lba, uint8_t * buf) {
|
static void ata_device_write_sector(struct ata_device * dev, uint32_t lba, uint8_t * buf) {
|
||||||
uint16_t bus = dev->io_base;
|
uint16_t bus = dev->io_base;
|
||||||
uint8_t slave = dev->slave;
|
uint8_t slave = dev->slave;
|
||||||
|
|
||||||
spin_lock(&ata_lock);
|
spin_lock(ata_lock);
|
||||||
|
|
||||||
outportb(bus + ATA_REG_CONTROL, 0x02);
|
outportb(bus + ATA_REG_CONTROL, 0x02);
|
||||||
|
|
||||||
@ -358,7 +359,7 @@ static void ata_device_write_sector(struct ata_device * dev, uint32_t lba, uint8
|
|||||||
outportsm(bus,buf,size);
|
outportsm(bus,buf,size);
|
||||||
outportb(bus + 0x07, ATA_CMD_CACHE_FLUSH);
|
outportb(bus + 0x07, ATA_CMD_CACHE_FLUSH);
|
||||||
ata_wait(dev, 0);
|
ata_wait(dev, 0);
|
||||||
spin_unlock(&ata_lock);
|
spin_unlock(ata_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int buffer_compare(uint32_t * ptr1, uint32_t * ptr2, size_t size) {
|
static int buffer_compare(uint32_t * ptr1, uint32_t * ptr2, size_t size) {
|
||||||
|
@ -38,7 +38,7 @@ typedef struct {
|
|||||||
unsigned int cache_entries; /* Size of ->disk_cache */
|
unsigned int cache_entries; /* Size of ->disk_cache */
|
||||||
unsigned int cache_time; /* "timer" that increments with each cache read/write */
|
unsigned int cache_time; /* "timer" that increments with each cache read/write */
|
||||||
|
|
||||||
uint8_t volatile lock; /* Synchronization lock point */
|
spin_lock_t lock; /* Synchronization lock point */
|
||||||
|
|
||||||
uint8_t bgd_block_span;
|
uint8_t bgd_block_span;
|
||||||
uint8_t bgd_offset;
|
uint8_t bgd_offset;
|
||||||
@ -120,14 +120,14 @@ static int read_block(ext2_fs_t * this, unsigned int block_no, uint8_t * buf) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* This operation requires the filesystem lock to be obtained */
|
/* This operation requires the filesystem lock to be obtained */
|
||||||
spin_lock(&this->lock);
|
spin_lock(this->lock);
|
||||||
|
|
||||||
/* We can make reads without a cache in place. */
|
/* We can make reads without a cache in place. */
|
||||||
if (!DC) {
|
if (!DC) {
|
||||||
/* In such cases, we read directly from the block device */
|
/* In such cases, we read directly from the block device */
|
||||||
read_fs(this->block_device, block_no * this->block_size, this->block_size, (uint8_t *)buf);
|
read_fs(this->block_device, block_no * this->block_size, this->block_size, (uint8_t *)buf);
|
||||||
/* We are done, release the lock */
|
/* We are done, release the lock */
|
||||||
spin_unlock(&this->lock);
|
spin_unlock(this->lock);
|
||||||
/* And return SUCCESS */
|
/* And return SUCCESS */
|
||||||
return E_SUCCESS;
|
return E_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -145,7 +145,7 @@ static int read_block(ext2_fs_t * this, unsigned int block_no, uint8_t * buf) {
|
|||||||
/* Read the block */
|
/* Read the block */
|
||||||
memcpy(buf, DC[i].block, this->block_size);
|
memcpy(buf, DC[i].block, this->block_size);
|
||||||
/* Release the lock */
|
/* Release the lock */
|
||||||
spin_unlock(&this->lock);
|
spin_unlock(this->lock);
|
||||||
/* Success! */
|
/* Success! */
|
||||||
return E_SUCCESS;
|
return E_SUCCESS;
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ static int read_block(ext2_fs_t * this, unsigned int block_no, uint8_t * buf) {
|
|||||||
DC[oldest].dirty = 0;
|
DC[oldest].dirty = 0;
|
||||||
|
|
||||||
/* Release the lock */
|
/* Release the lock */
|
||||||
spin_unlock(&this->lock);
|
spin_unlock(this->lock);
|
||||||
|
|
||||||
/* And return success */
|
/* And return success */
|
||||||
return E_SUCCESS;
|
return E_SUCCESS;
|
||||||
@ -199,7 +199,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* This operation requires the filesystem lock */
|
/* This operation requires the filesystem lock */
|
||||||
spin_lock(&this->lock);
|
spin_lock(this->lock);
|
||||||
|
|
||||||
/* Find the entry in the cache */
|
/* Find the entry in the cache */
|
||||||
int oldest = -1;
|
int oldest = -1;
|
||||||
@ -210,7 +210,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
|
|||||||
DC[i].last_use = get_cache_time(this);
|
DC[i].last_use = get_cache_time(this);
|
||||||
DC[i].dirty = 1;
|
DC[i].dirty = 1;
|
||||||
memcpy(DC[i].block, buf, this->block_size);
|
memcpy(DC[i].block, buf, this->block_size);
|
||||||
spin_unlock(&this->lock);
|
spin_unlock(this->lock);
|
||||||
return E_SUCCESS;
|
return E_SUCCESS;
|
||||||
}
|
}
|
||||||
if (DC[i].last_use < oldest_age) {
|
if (DC[i].last_use < oldest_age) {
|
||||||
@ -233,7 +233,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
|
|||||||
DC[oldest].dirty = 1;
|
DC[oldest].dirty = 1;
|
||||||
|
|
||||||
/* Release the lock */
|
/* Release the lock */
|
||||||
spin_unlock(&this->lock);
|
spin_unlock(this->lock);
|
||||||
|
|
||||||
/* We're done. */
|
/* We're done. */
|
||||||
return E_SUCCESS;
|
return E_SUCCESS;
|
||||||
@ -241,7 +241,7 @@ static int write_block(ext2_fs_t * this, unsigned int block_no, uint8_t *buf) {
|
|||||||
|
|
||||||
static unsigned int ext2_sync(ext2_fs_t * this) {
|
static unsigned int ext2_sync(ext2_fs_t * this) {
|
||||||
/* This operation requires the filesystem lock */
|
/* This operation requires the filesystem lock */
|
||||||
spin_lock(&this->lock);
|
spin_lock(this->lock);
|
||||||
|
|
||||||
/* Flush each cache entry. */
|
/* Flush each cache entry. */
|
||||||
for (unsigned int i = 0; i < this->cache_entries; ++i) {
|
for (unsigned int i = 0; i < this->cache_entries; ++i) {
|
||||||
@ -251,7 +251,7 @@ static unsigned int ext2_sync(ext2_fs_t * this) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Release the lock */
|
/* Release the lock */
|
||||||
spin_unlock(&this->lock);
|
spin_unlock(this->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -450,7 +450,7 @@ static int write_inode(ext2_fs_t * this, ext2_inodetable_t *inode, uint32_t inde
|
|||||||
if (group > BGDS) {
|
if (group > BGDS) {
|
||||||
return E_BADBLOCK;
|
return E_BADBLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t inode_table_block = BGD[group].inode_table;
|
uint32_t inode_table_block = BGD[group].inode_table;
|
||||||
index -= group * this->inodes_per_group;
|
index -= group * this->inodes_per_group;
|
||||||
uint32_t block_offset = ((index - 1) * this->inode_size) / this->block_size;
|
uint32_t block_offset = ((index - 1) * this->inode_size) / this->block_size;
|
||||||
@ -953,7 +953,7 @@ static ext2_dir_t * direntry_ext2(ext2_fs_t * this, ext2_inodetable_t * inode, u
|
|||||||
inode_read_block(this, inode, block_nr, block);
|
inode_read_block(this, inode, block_nr, block);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
free(block);
|
free(block);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -981,7 +981,7 @@ static fs_node_t * finddir_ext2(fs_node_t *node, char *name) {
|
|||||||
inode_read_block(this, inode, block_nr, block);
|
inode_read_block(this, inode, block_nr, block);
|
||||||
}
|
}
|
||||||
ext2_dir_t *d_ent = (ext2_dir_t *)((uintptr_t)block + dir_offset);
|
ext2_dir_t *d_ent = (ext2_dir_t *)((uintptr_t)block + dir_offset);
|
||||||
|
|
||||||
if (d_ent->inode == 0 || strlen(name) != d_ent->name_len) {
|
if (d_ent->inode == 0 || strlen(name) != d_ent->name_len) {
|
||||||
dir_offset += d_ent->rec_len;
|
dir_offset += d_ent->rec_len;
|
||||||
total_offset += d_ent->rec_len;
|
total_offset += d_ent->rec_len;
|
||||||
@ -1043,7 +1043,7 @@ static void unlink_ext2(fs_node_t * node, char * name) {
|
|||||||
inode_read_block(this, inode, block_nr, block);
|
inode_read_block(this, inode, block_nr, block);
|
||||||
}
|
}
|
||||||
ext2_dir_t *d_ent = (ext2_dir_t *)((uintptr_t)block + dir_offset);
|
ext2_dir_t *d_ent = (ext2_dir_t *)((uintptr_t)block + dir_offset);
|
||||||
|
|
||||||
if (d_ent->inode == 0 || strlen(name) != d_ent->name_len) {
|
if (d_ent->inode == 0 || strlen(name) != d_ent->name_len) {
|
||||||
dir_offset += d_ent->rec_len;
|
dir_offset += d_ent->rec_len;
|
||||||
total_offset += d_ent->rec_len;
|
total_offset += d_ent->rec_len;
|
||||||
@ -1439,7 +1439,7 @@ static fs_node_t * mount_ext2(fs_node_t * block_device) {
|
|||||||
char * bg_buffer = malloc(this->block_size * sizeof(char));
|
char * bg_buffer = malloc(this->block_size * sizeof(char));
|
||||||
for (uint32_t i = 0; i < BGDS; ++i) {
|
for (uint32_t i = 0; i < BGDS; ++i) {
|
||||||
debug_print(INFO, "Block Group Descriptor #%d @ %d", i, this->bgd_offset + i * SB->blocks_per_group);
|
debug_print(INFO, "Block Group Descriptor #%d @ %d", i, this->bgd_offset + i * SB->blocks_per_group);
|
||||||
debug_print(INFO, "\tBlock Bitmap @ %d", BGD[i].block_bitmap); {
|
debug_print(INFO, "\tBlock Bitmap @ %d", BGD[i].block_bitmap); {
|
||||||
debug_print(INFO, "\t\tExamining block bitmap at %d", BGD[i].block_bitmap);
|
debug_print(INFO, "\t\tExamining block bitmap at %d", BGD[i].block_bitmap);
|
||||||
read_block(this, BGD[i].block_bitmap, (uint8_t *)bg_buffer);
|
read_block(this, BGD[i].block_bitmap, (uint8_t *)bg_buffer);
|
||||||
uint32_t j = 0;
|
uint32_t j = 0;
|
||||||
|
@ -15,13 +15,13 @@
|
|||||||
typedef struct packet_manager {
|
typedef struct packet_manager {
|
||||||
/* uh, nothing, lol */
|
/* uh, nothing, lol */
|
||||||
list_t * exchanges;
|
list_t * exchanges;
|
||||||
volatile uint8_t lock;
|
spin_lock_t lock;
|
||||||
} pex_t;
|
} pex_t;
|
||||||
|
|
||||||
typedef struct packet_exchange {
|
typedef struct packet_exchange {
|
||||||
char * name;
|
char * name;
|
||||||
char fresh;
|
char fresh;
|
||||||
volatile uint8_t lock;
|
spin_lock_t lock;
|
||||||
fs_node_t * server_pipe;
|
fs_node_t * server_pipe;
|
||||||
list_t * clients;
|
list_t * clients;
|
||||||
} pex_ex_t;
|
} pex_ex_t;
|
||||||
@ -130,12 +130,12 @@ static uint32_t write_server(fs_node_t * node, uint32_t offset, uint32_t size, u
|
|||||||
|
|
||||||
if (head->target == NULL) {
|
if (head->target == NULL) {
|
||||||
/* Brodcast packet */
|
/* Brodcast packet */
|
||||||
spin_lock(&p->lock);
|
spin_lock(p->lock);
|
||||||
foreach(f, p->clients) {
|
foreach(f, p->clients) {
|
||||||
debug_print(INFO, "Sending to client 0x%x", f->value);
|
debug_print(INFO, "Sending to client 0x%x", f->value);
|
||||||
send_to_client(p, (pex_client_t *)f->value, size - sizeof(header_t), head->data);
|
send_to_client(p, (pex_client_t *)f->value, size - sizeof(header_t), head->data);
|
||||||
}
|
}
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
debug_print(INFO, "Done broadcasting to clients.");
|
debug_print(INFO, "Done broadcasting to clients.");
|
||||||
return size;
|
return size;
|
||||||
} else if (head->target->parent != p) {
|
} else if (head->target->parent != p) {
|
||||||
@ -221,7 +221,7 @@ static void close_client(fs_node_t * node) {
|
|||||||
|
|
||||||
debug_print(WARNING, "Closing packetfs client: 0x%x:0x%x", p, c);
|
debug_print(WARNING, "Closing packetfs client: 0x%x:0x%x", p, c);
|
||||||
|
|
||||||
spin_lock(&p->lock);
|
spin_lock(p->lock);
|
||||||
|
|
||||||
node_t * n = list_find(p->clients, c);
|
node_t * n = list_find(p->clients, c);
|
||||||
if (n && n->owner == p->clients) {
|
if (n && n->owner == p->clients) {
|
||||||
@ -229,7 +229,7 @@ static void close_client(fs_node_t * node) {
|
|||||||
free(n);
|
free(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
|
|
||||||
char tmp[1];
|
char tmp[1];
|
||||||
|
|
||||||
@ -301,11 +301,11 @@ static struct dirent * readdir_packetfs(fs_node_t *node, uint32_t index) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&p->lock);
|
spin_lock(p->lock);
|
||||||
|
|
||||||
foreach(f, p->exchanges) {
|
foreach(f, p->exchanges) {
|
||||||
if (i == index) {
|
if (i == index) {
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
pex_ex_t * t = (pex_ex_t *)f->value;
|
pex_ex_t * t = (pex_ex_t *)f->value;
|
||||||
struct dirent * out = malloc(sizeof(struct dirent));
|
struct dirent * out = malloc(sizeof(struct dirent));
|
||||||
memset(out, 0x00, sizeof(struct dirent));
|
memset(out, 0x00, sizeof(struct dirent));
|
||||||
@ -317,7 +317,7 @@ static struct dirent * readdir_packetfs(fs_node_t *node, uint32_t index) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -341,17 +341,17 @@ static fs_node_t * finddir_packetfs(fs_node_t * node, char * name) {
|
|||||||
|
|
||||||
debug_print(INFO, "[pex] finddir(%s)", name);
|
debug_print(INFO, "[pex] finddir(%s)", name);
|
||||||
|
|
||||||
spin_lock(&p->lock);
|
spin_lock(p->lock);
|
||||||
|
|
||||||
foreach(f, p->exchanges) {
|
foreach(f, p->exchanges) {
|
||||||
pex_ex_t * t = (pex_ex_t *)f->value;
|
pex_ex_t * t = (pex_ex_t *)f->value;
|
||||||
if (!strcmp(name, t->name)) {
|
if (!strcmp(name, t->name)) {
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
return file_from_pex(t);
|
return file_from_pex(t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -363,12 +363,12 @@ static void create_packetfs(fs_node_t *parent, char *name, uint16_t permission)
|
|||||||
|
|
||||||
debug_print(NOTICE, "[pex] create(%s)", name);
|
debug_print(NOTICE, "[pex] create(%s)", name);
|
||||||
|
|
||||||
spin_lock(&p->lock);
|
spin_lock(p->lock);
|
||||||
|
|
||||||
foreach(f, p->exchanges) {
|
foreach(f, p->exchanges) {
|
||||||
pex_ex_t * t = (pex_ex_t *)f->value;
|
pex_ex_t * t = (pex_ex_t *)f->value;
|
||||||
if (!strcmp(name, t->name)) {
|
if (!strcmp(name, t->name)) {
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
/* Already exists */
|
/* Already exists */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -379,14 +379,15 @@ static void create_packetfs(fs_node_t *parent, char *name, uint16_t permission)
|
|||||||
|
|
||||||
new_exchange->name = strdup(name);
|
new_exchange->name = strdup(name);
|
||||||
new_exchange->fresh = 1;
|
new_exchange->fresh = 1;
|
||||||
new_exchange->lock = 0;
|
|
||||||
new_exchange->clients = list_create();
|
new_exchange->clients = list_create();
|
||||||
new_exchange->server_pipe = make_pipe(4096);
|
new_exchange->server_pipe = make_pipe(4096);
|
||||||
|
|
||||||
|
spin_init(new_exchange->lock);
|
||||||
/* XXX Create exchange server pipe */
|
/* XXX Create exchange server pipe */
|
||||||
|
|
||||||
list_insert(p->exchanges, new_exchange);
|
list_insert(p->exchanges, new_exchange);
|
||||||
|
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -403,7 +404,7 @@ static void unlink_packetfs(fs_node_t *parent, char *name) {
|
|||||||
|
|
||||||
int i = -1, j = 0;
|
int i = -1, j = 0;
|
||||||
|
|
||||||
spin_lock(&p->lock);
|
spin_lock(p->lock);
|
||||||
|
|
||||||
foreach(f, p->exchanges) {
|
foreach(f, p->exchanges) {
|
||||||
pex_ex_t * t = (pex_ex_t *)f->value;
|
pex_ex_t * t = (pex_ex_t *)f->value;
|
||||||
@ -419,13 +420,15 @@ static void unlink_packetfs(fs_node_t *parent, char *name) {
|
|||||||
list_remove(p->exchanges, i);
|
list_remove(p->exchanges, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&p->lock);
|
spin_unlock(p->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static fs_node_t * packetfs_manager(void) {
|
static fs_node_t * packetfs_manager(void) {
|
||||||
pex_t * pex = malloc(sizeof(pex_t));
|
pex_t * pex = malloc(sizeof(pex_t));
|
||||||
pex->exchanges = list_create();
|
pex->exchanges = list_create();
|
||||||
pex->lock = 0;
|
|
||||||
|
spin_init(pex->lock);
|
||||||
|
|
||||||
fs_node_t * fnode = malloc(sizeof(fs_node_t));
|
fs_node_t * fnode = malloc(sizeof(fs_node_t));
|
||||||
memset(fnode, 0x00, sizeof(fs_node_t));
|
memset(fnode, 0x00, sizeof(fs_node_t));
|
||||||
fnode->inode = 0;
|
fnode->inode = 0;
|
||||||
|
@ -38,7 +38,9 @@ static void find_rtl(uint32_t device, uint16_t vendorid, uint16_t deviceid, void
|
|||||||
|
|
||||||
static void net_handler_enqueue(void * buffer);
|
static void net_handler_enqueue(void * buffer);
|
||||||
static list_t * net_queue = NULL;
|
static list_t * net_queue = NULL;
|
||||||
static volatile uint8_t net_queue_lock = 0;
|
//static volatile uint8_t net_queue_lock = 0;
|
||||||
|
|
||||||
|
static spin_lock_t net_queue_lock = { 0 };
|
||||||
|
|
||||||
static int rtl_irq = 0;
|
static int rtl_irq = 0;
|
||||||
static uint32_t rtl_iobase = 0;
|
static uint32_t rtl_iobase = 0;
|
||||||
@ -62,16 +64,17 @@ static fs_node_t * irc_socket;
|
|||||||
static uint32_t seq_no = 0xff0000;
|
static uint32_t seq_no = 0xff0000;
|
||||||
static uint32_t ack_no = 0x0;
|
static uint32_t ack_no = 0x0;
|
||||||
|
|
||||||
static volatile uint8_t _lock;
|
//static volatile uint8_t _lock;
|
||||||
|
static spin_lock_t _lock;
|
||||||
static int next_tx_buf(void) {
|
static int next_tx_buf(void) {
|
||||||
int out;
|
int out;
|
||||||
spin_lock(&_lock);
|
spin_lock(_lock);
|
||||||
out = next_tx;
|
out = next_tx;
|
||||||
next_tx++;
|
next_tx++;
|
||||||
if (next_tx == 4) {
|
if (next_tx == 4) {
|
||||||
next_tx = 0;
|
next_tx = 0;
|
||||||
}
|
}
|
||||||
spin_unlock(&_lock);
|
spin_unlock(_lock);
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,7 +400,9 @@ static char irc_input[400] = {'\0'};
|
|||||||
static char irc_prompt[100] = {'\0'};
|
static char irc_prompt[100] = {'\0'};
|
||||||
static char irc_nick[32] = {'\0'};
|
static char irc_nick[32] = {'\0'};
|
||||||
static char irc_payload[512];
|
static char irc_payload[512];
|
||||||
static volatile uint8_t irc_tty_lock = 0;
|
|
||||||
|
static spin_lock_t irc_tty_lock = { 0 };
|
||||||
|
//static volatile uint8_t irc_tty_lock = 0;
|
||||||
//static struct netif rtl_netif;
|
//static struct netif rtl_netif;
|
||||||
|
|
||||||
static void irc_send(char * payload) {
|
static void irc_send(char * payload) {
|
||||||
@ -419,7 +424,7 @@ static void handle_irc_packet(fs_node_t * tty, size_t size, uint8_t * packet) {
|
|||||||
if ((uintptr_t)e > (uintptr_t)packet + size) {
|
if ((uintptr_t)e > (uintptr_t)packet + size) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spin_lock(&irc_tty_lock);
|
spin_lock(irc_tty_lock);
|
||||||
|
|
||||||
if (!e) {
|
if (!e) {
|
||||||
/* XXX */
|
/* XXX */
|
||||||
@ -442,7 +447,7 @@ static void handle_irc_packet(fs_node_t * tty, size_t size, uint8_t * packet) {
|
|||||||
char * command;
|
char * command;
|
||||||
char * channel;
|
char * channel;
|
||||||
char * message;
|
char * message;
|
||||||
|
|
||||||
user = c;
|
user = c;
|
||||||
|
|
||||||
command = strstr(user, " ");
|
command = strstr(user, " ");
|
||||||
@ -495,7 +500,7 @@ prompt_:
|
|||||||
fprintf(tty, "%s", irc_prompt);
|
fprintf(tty, "%s", irc_prompt);
|
||||||
fprintf(tty, "%s", irc_input);
|
fprintf(tty, "%s", irc_input);
|
||||||
|
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
|
|
||||||
if (!e) break;
|
if (!e) break;
|
||||||
|
|
||||||
@ -621,11 +626,11 @@ static struct ethernet_packet * net_receive(void) {
|
|||||||
while (!net_queue->length) {
|
while (!net_queue->length) {
|
||||||
sleep_on(rx_wait);
|
sleep_on(rx_wait);
|
||||||
}
|
}
|
||||||
spin_lock(&net_queue_lock);
|
spin_lock(net_queue_lock);
|
||||||
node_t * n = list_dequeue(net_queue);
|
node_t * n = list_dequeue(net_queue);
|
||||||
struct ethernet_packet * eth = (struct ethernet_packet *)n->value;
|
struct ethernet_packet * eth = (struct ethernet_packet *)n->value;
|
||||||
free(n);
|
free(n);
|
||||||
spin_unlock(&net_queue_lock);
|
spin_unlock(net_queue_lock);
|
||||||
|
|
||||||
return eth;
|
return eth;
|
||||||
}
|
}
|
||||||
@ -654,11 +659,11 @@ static void net_handler(void * data, char * name) {
|
|||||||
static void net_handler_enqueue(void * buffer) {
|
static void net_handler_enqueue(void * buffer) {
|
||||||
/* XXX size? source? */
|
/* XXX size? source? */
|
||||||
|
|
||||||
spin_lock(&net_queue_lock);
|
spin_lock(net_queue_lock);
|
||||||
|
|
||||||
list_insert(net_queue, buffer);
|
list_insert(net_queue, buffer);
|
||||||
|
|
||||||
spin_unlock(&net_queue_lock);
|
spin_unlock(net_queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void parse_dns_response(fs_node_t * tty, void * last_packet) {
|
static void parse_dns_response(fs_node_t * tty, void * last_packet) {
|
||||||
@ -953,11 +958,11 @@ static int tty_readline(fs_node_t * dev, char * linebuf, int max) {
|
|||||||
debug_print(WARNING, "Read nothing?");
|
debug_print(WARNING, "Read nothing?");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_lock(&irc_tty_lock);
|
spin_lock(irc_tty_lock);
|
||||||
linebuf[read] = buf[0];
|
linebuf[read] = buf[0];
|
||||||
if (buf[0] == '\n') {
|
if (buf[0] == '\n') {
|
||||||
linebuf[read] = 0;
|
linebuf[read] = 0;
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
break;
|
break;
|
||||||
} else if (buf[0] == 0x08) {
|
} else if (buf[0] == 0x08) {
|
||||||
if (read > 0) {
|
if (read > 0) {
|
||||||
@ -969,18 +974,18 @@ static int tty_readline(fs_node_t * dev, char * linebuf, int max) {
|
|||||||
switch (buf[0]) {
|
switch (buf[0]) {
|
||||||
case 0x0C: /* ^L */
|
case 0x0C: /* ^L */
|
||||||
/* Should reset display here */
|
/* Should reset display here */
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* do nothing */
|
/* do nothing */
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fprintf(dev, "%c", buf[0]);
|
fprintf(dev, "%c", buf[0]);
|
||||||
read += r;
|
read += r;
|
||||||
}
|
}
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
}
|
}
|
||||||
tty_set_buffered(dev);
|
tty_set_buffered(dev);
|
||||||
return read;
|
return read;
|
||||||
@ -1050,7 +1055,7 @@ DEFINE_SHELL_FUNCTION(irc_join, "irc channel tool") {
|
|||||||
fprintf(tty, irc_prompt);
|
fprintf(tty, irc_prompt);
|
||||||
int c = tty_readline(tty, irc_input, 400);
|
int c = tty_readline(tty, irc_input, 400);
|
||||||
|
|
||||||
spin_lock(&irc_tty_lock);
|
spin_lock(irc_tty_lock);
|
||||||
|
|
||||||
irc_input[c] = '\0';
|
irc_input[c] = '\0';
|
||||||
|
|
||||||
@ -1058,7 +1063,7 @@ DEFINE_SHELL_FUNCTION(irc_join, "irc channel tool") {
|
|||||||
fprintf(tty, "\n");
|
fprintf(tty, "\n");
|
||||||
sprintf(irc_payload, "PART %s\r\n", channel);
|
sprintf(irc_payload, "PART %s\r\n", channel);
|
||||||
irc_send(irc_payload);
|
irc_send(irc_payload);
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1079,7 +1084,7 @@ DEFINE_SHELL_FUNCTION(irc_join, "irc channel tool") {
|
|||||||
}
|
}
|
||||||
|
|
||||||
memset(irc_input, 0x00, sizeof(irc_input));
|
memset(irc_input, 0x00, sizeof(irc_input));
|
||||||
spin_unlock(&irc_tty_lock);
|
spin_unlock(irc_tty_lock);
|
||||||
}
|
}
|
||||||
memset(irc_prompt, 0x00, sizeof(irc_prompt));
|
memset(irc_prompt, 0x00, sizeof(irc_prompt));
|
||||||
memset(irc_input, 0x00, sizeof(irc_input));
|
memset(irc_input, 0x00, sizeof(irc_input));
|
||||||
@ -1112,7 +1117,7 @@ DEFINE_SHELL_FUNCTION(http, "Open a prompt to send HTTP commands.") {
|
|||||||
|
|
||||||
/* /posting.php?mode=post&f=7 */
|
/* /posting.php?mode=post&f=7 */
|
||||||
|
|
||||||
char * content =
|
char * content =
|
||||||
"-----------------------------2611311029845263341299213952\r\n"
|
"-----------------------------2611311029845263341299213952\r\n"
|
||||||
"Content-Disposition: form-data; name=\"subject\"\r\n"
|
"Content-Disposition: form-data; name=\"subject\"\r\n"
|
||||||
"\r\n"
|
"\r\n"
|
||||||
|
@ -34,7 +34,8 @@ static int snd_mixer_ioctl(fs_node_t * node, int request, void * argp);
|
|||||||
static void snd_mixer_open(fs_node_t * node, unsigned int flags);
|
static void snd_mixer_open(fs_node_t * node, unsigned int flags);
|
||||||
static void snd_mixer_close(fs_node_t * node);
|
static void snd_mixer_close(fs_node_t * node);
|
||||||
|
|
||||||
static uint8_t _devices_lock;
|
static spin_lock_t _devices_lock;
|
||||||
|
|
||||||
static list_t _devices;
|
static list_t _devices;
|
||||||
static fs_node_t _dsp_fnode = {
|
static fs_node_t _dsp_fnode = {
|
||||||
.name = "dsp",
|
.name = "dsp",
|
||||||
@ -51,7 +52,7 @@ static fs_node_t _mixer_fnode = {
|
|||||||
.open = snd_mixer_open,
|
.open = snd_mixer_open,
|
||||||
.close = snd_mixer_close,
|
.close = snd_mixer_close,
|
||||||
};
|
};
|
||||||
static uint8_t _buffers_lock;
|
static spin_lock_t _buffers_lock;
|
||||||
static list_t _buffers;
|
static list_t _buffers;
|
||||||
static uint32_t _next_device_id = SND_DEVICE_MAIN;
|
static uint32_t _next_device_id = SND_DEVICE_MAIN;
|
||||||
|
|
||||||
@ -59,7 +60,7 @@ int snd_register(snd_device_t * device) {
|
|||||||
int rv = 0;
|
int rv = 0;
|
||||||
|
|
||||||
debug_print(WARNING, "[snd] _devices lock: %d", _devices_lock);
|
debug_print(WARNING, "[snd] _devices lock: %d", _devices_lock);
|
||||||
spin_lock(&_devices_lock);
|
spin_lock(_devices_lock);
|
||||||
device->id = _next_device_id;
|
device->id = _next_device_id;
|
||||||
_next_device_id++;
|
_next_device_id++;
|
||||||
if (list_find(&_devices, device)) {
|
if (list_find(&_devices, device)) {
|
||||||
@ -71,7 +72,7 @@ int snd_register(snd_device_t * device) {
|
|||||||
debug_print(NOTICE, "[snd] %s registered", device->name);
|
debug_print(NOTICE, "[snd] %s registered", device->name);
|
||||||
|
|
||||||
snd_register_cleanup:
|
snd_register_cleanup:
|
||||||
spin_unlock(&_devices_lock);
|
spin_unlock(_devices_lock);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +89,7 @@ int snd_unregister(snd_device_t * device) {
|
|||||||
debug_print(NOTICE, "[snd] %s unregistered", device->name);
|
debug_print(NOTICE, "[snd] %s unregistered", device->name);
|
||||||
|
|
||||||
snd_unregister_cleanup:
|
snd_unregister_cleanup:
|
||||||
spin_unlock(&_devices_lock);
|
spin_unlock(_devices_lock);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,19 +109,19 @@ static void snd_dsp_open(fs_node_t * node, unsigned int flags) {
|
|||||||
*/
|
*/
|
||||||
/* Allocate a buffer for the node and keep a reference for ourselves */
|
/* Allocate a buffer for the node and keep a reference for ourselves */
|
||||||
node->device = ring_buffer_create(SND_BUF_SIZE);
|
node->device = ring_buffer_create(SND_BUF_SIZE);
|
||||||
spin_lock(&_buffers_lock);
|
spin_lock(_buffers_lock);
|
||||||
list_insert(&_buffers, node->device);
|
list_insert(&_buffers, node->device);
|
||||||
spin_unlock(&_buffers_lock);
|
spin_unlock(_buffers_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void snd_dsp_close(fs_node_t * node) {
|
static void snd_dsp_close(fs_node_t * node) {
|
||||||
spin_lock(&_buffers_lock);
|
spin_lock(_buffers_lock);
|
||||||
list_delete(&_buffers, list_find(&_buffers, node->device));
|
list_delete(&_buffers, list_find(&_buffers, node->device));
|
||||||
spin_unlock(&_buffers_lock);
|
spin_unlock(_buffers_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static snd_device_t * snd_device_by_id(uint32_t device_id) {
|
static snd_device_t * snd_device_by_id(uint32_t device_id) {
|
||||||
spin_lock(&_devices_lock);
|
spin_lock(_devices_lock);
|
||||||
snd_device_t * out = NULL;
|
snd_device_t * out = NULL;
|
||||||
snd_device_t * cur = NULL;
|
snd_device_t * cur = NULL;
|
||||||
|
|
||||||
@ -130,7 +131,7 @@ static snd_device_t * snd_device_by_id(uint32_t device_id) {
|
|||||||
out = cur;
|
out = cur;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&_devices_lock);
|
spin_unlock(_devices_lock);
|
||||||
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
@ -198,7 +199,7 @@ int snd_request_buf(snd_device_t * device, uint32_t size, uint8_t *buffer) {
|
|||||||
|
|
||||||
memset(buffer, 0, size);
|
memset(buffer, 0, size);
|
||||||
|
|
||||||
spin_lock(&_buffers_lock);
|
spin_lock(_buffers_lock);
|
||||||
foreach(buf_node, &_buffers) {
|
foreach(buf_node, &_buffers) {
|
||||||
ring_buffer_t * buf = buf_node->value;
|
ring_buffer_t * buf = buf_node->value;
|
||||||
/* ~0x3 is to ensure we don't read partial samples or just a single channel */
|
/* ~0x3 is to ensure we don't read partial samples or just a single channel */
|
||||||
@ -222,19 +223,19 @@ int snd_request_buf(snd_device_t * device, uint32_t size, uint8_t *buffer) {
|
|||||||
bytes_left -= this_read_size;
|
bytes_left -= this_read_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&_buffers_lock);
|
spin_unlock(_buffers_lock);
|
||||||
|
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static snd_device_t * snd_main_device() {
|
static snd_device_t * snd_main_device() {
|
||||||
spin_lock(&_devices_lock);
|
spin_lock(_devices_lock);
|
||||||
foreach(node, &_devices) {
|
foreach(node, &_devices) {
|
||||||
spin_unlock(&_devices_lock);
|
spin_unlock(_devices_lock);
|
||||||
return node->value;
|
return node->value;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&_devices_lock);
|
spin_unlock(_devices_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#define TMPFS_TYPE_FILE 1
|
#define TMPFS_TYPE_FILE 1
|
||||||
#define TMPFS_TYPE_DIR 2
|
#define TMPFS_TYPE_DIR 2
|
||||||
|
|
||||||
uint8_t volatile tmpfs_lock = 0;
|
static spin_lock_t tmpfs_lock = { 0 };
|
||||||
|
|
||||||
struct tmpfs_dir * tmpfs_root = NULL;
|
struct tmpfs_dir * tmpfs_root = NULL;
|
||||||
|
|
||||||
@ -26,7 +26,7 @@ static fs_node_t * tmpfs_from_dir(struct tmpfs_dir * d);
|
|||||||
|
|
||||||
static struct tmpfs_file * tmpfs_file_new(char * name) {
|
static struct tmpfs_file * tmpfs_file_new(char * name) {
|
||||||
|
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
|
|
||||||
struct tmpfs_file * t = malloc(sizeof(struct tmpfs_file));
|
struct tmpfs_file * t = malloc(sizeof(struct tmpfs_file));
|
||||||
t->name = strdup(name);
|
t->name = strdup(name);
|
||||||
@ -45,12 +45,12 @@ static struct tmpfs_file * tmpfs_file_new(char * name) {
|
|||||||
t->blocks[i] = NULL;
|
t->blocks[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tmpfs_dir * tmpfs_dir_new(char * name, struct tmpfs_dir * parent) {
|
static struct tmpfs_dir * tmpfs_dir_new(char * name, struct tmpfs_dir * parent) {
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
|
|
||||||
struct tmpfs_dir * d = malloc(sizeof(struct tmpfs_dir));
|
struct tmpfs_dir * d = malloc(sizeof(struct tmpfs_dir));
|
||||||
d->name = strdup(name);
|
d->name = strdup(name);
|
||||||
@ -63,7 +63,7 @@ static struct tmpfs_dir * tmpfs_dir_new(char * name, struct tmpfs_dir * parent)
|
|||||||
d->ctime = d->atime;
|
d->ctime = d->atime;
|
||||||
d->files = list_create();
|
d->files = list_create();
|
||||||
|
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
return d;
|
return d;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ static void tmpfs_file_blocks_embiggen(struct tmpfs_file * t) {
|
|||||||
static char * tmpfs_file_getset_block(struct tmpfs_file * t, size_t blockid, int create) {
|
static char * tmpfs_file_getset_block(struct tmpfs_file * t, size_t blockid, int create) {
|
||||||
debug_print(INFO, "Reading block %d from file %s", blockid, t->name);
|
debug_print(INFO, "Reading block %d from file %s", blockid, t->name);
|
||||||
if (create) {
|
if (create) {
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
while (blockid >= t->pointers) {
|
while (blockid >= t->pointers) {
|
||||||
tmpfs_file_blocks_embiggen(t);
|
tmpfs_file_blocks_embiggen(t);
|
||||||
}
|
}
|
||||||
@ -91,7 +91,7 @@ static char * tmpfs_file_getset_block(struct tmpfs_file * t, size_t blockid, int
|
|||||||
t->blocks[t->block_count] = malloc(BLOCKSIZE);
|
t->blocks[t->block_count] = malloc(BLOCKSIZE);
|
||||||
t->block_count += 1;
|
t->block_count += 1;
|
||||||
}
|
}
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
} else {
|
} else {
|
||||||
if (blockid >= t->block_count) {
|
if (blockid >= t->block_count) {
|
||||||
debug_print(CRITICAL, "This will probably end badly.");
|
debug_print(CRITICAL, "This will probably end badly.");
|
||||||
@ -283,22 +283,23 @@ static fs_node_t * finddir_tmpfs(fs_node_t * node, char * name) {
|
|||||||
|
|
||||||
struct tmpfs_dir * d = (struct tmpfs_dir *)node->device;
|
struct tmpfs_dir * d = (struct tmpfs_dir *)node->device;
|
||||||
|
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
|
|
||||||
foreach(f, d->files) {
|
foreach(f, d->files) {
|
||||||
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
||||||
if (!strcmp(name, t->name)) {
|
if (!strcmp(name, t->name)) {
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
switch (t->type) {
|
switch (t->type) {
|
||||||
case TMPFS_TYPE_FILE:
|
case TMPFS_TYPE_FILE:
|
||||||
return tmpfs_from_file(t);
|
return tmpfs_from_file(t);
|
||||||
case TMPFS_TYPE_DIR:
|
case TMPFS_TYPE_DIR:
|
||||||
return tmpfs_from_dir((struct tmpfs_dir *)t);
|
return tmpfs_from_dir((struct tmpfs_dir *)t);
|
||||||
}
|
}
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -306,7 +307,7 @@ static fs_node_t * finddir_tmpfs(fs_node_t * node, char * name) {
|
|||||||
static void unlink_tmpfs(fs_node_t * node, char * name) {
|
static void unlink_tmpfs(fs_node_t * node, char * name) {
|
||||||
struct tmpfs_dir * d = (struct tmpfs_dir *)node->device;
|
struct tmpfs_dir * d = (struct tmpfs_dir *)node->device;
|
||||||
int i = -1, j = 0;
|
int i = -1, j = 0;
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
|
|
||||||
foreach(f, d->files) {
|
foreach(f, d->files) {
|
||||||
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
||||||
@ -323,7 +324,7 @@ static void unlink_tmpfs(fs_node_t * node, char * name) {
|
|||||||
list_remove(d->files, i);
|
list_remove(d->files, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -333,16 +334,16 @@ static void create_tmpfs(fs_node_t *parent, char *name, uint16_t permission) {
|
|||||||
struct tmpfs_dir * d = (struct tmpfs_dir *)parent->device;
|
struct tmpfs_dir * d = (struct tmpfs_dir *)parent->device;
|
||||||
debug_print(CRITICAL, "Creating TMPFS file %s in %s", name, d->name);
|
debug_print(CRITICAL, "Creating TMPFS file %s in %s", name, d->name);
|
||||||
|
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
foreach(f, d->files) {
|
foreach(f, d->files) {
|
||||||
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
||||||
if (!strcmp(name, t->name)) {
|
if (!strcmp(name, t->name)) {
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
debug_print(WARNING, "... already exists.");
|
debug_print(WARNING, "... already exists.");
|
||||||
return; /* Already exists */
|
return; /* Already exists */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
|
|
||||||
debug_print(NOTICE, "... creating a new file.");
|
debug_print(NOTICE, "... creating a new file.");
|
||||||
struct tmpfs_file * t = tmpfs_file_new(name);
|
struct tmpfs_file * t = tmpfs_file_new(name);
|
||||||
@ -350,9 +351,9 @@ static void create_tmpfs(fs_node_t *parent, char *name, uint16_t permission) {
|
|||||||
t->uid = current_process->user;
|
t->uid = current_process->user;
|
||||||
t->gid = current_process->user;
|
t->gid = current_process->user;
|
||||||
|
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
list_insert(d->files, t);
|
list_insert(d->files, t);
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
|
static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
|
||||||
@ -361,16 +362,16 @@ static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
|
|||||||
struct tmpfs_dir * d = (struct tmpfs_dir *)parent->device;
|
struct tmpfs_dir * d = (struct tmpfs_dir *)parent->device;
|
||||||
debug_print(CRITICAL, "Creating TMPFS directory %s (in %s)", name, d->name);
|
debug_print(CRITICAL, "Creating TMPFS directory %s (in %s)", name, d->name);
|
||||||
|
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
foreach(f, d->files) {
|
foreach(f, d->files) {
|
||||||
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
struct tmpfs_file * t = (struct tmpfs_file *)f->value;
|
||||||
if (!strcmp(name, t->name)) {
|
if (!strcmp(name, t->name)) {
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
debug_print(WARNING, "... already exists.");
|
debug_print(WARNING, "... already exists.");
|
||||||
return; /* Already exists */
|
return; /* Already exists */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
|
|
||||||
debug_print(NOTICE, "... creating a new directory.");
|
debug_print(NOTICE, "... creating a new directory.");
|
||||||
struct tmpfs_dir * out = tmpfs_dir_new(name, d);
|
struct tmpfs_dir * out = tmpfs_dir_new(name, d);
|
||||||
@ -378,9 +379,9 @@ static void mkdir_tmpfs(fs_node_t * parent, char * name, uint16_t permission) {
|
|||||||
out->uid = current_process->user;
|
out->uid = current_process->user;
|
||||||
out->gid = current_process->user;
|
out->gid = current_process->user;
|
||||||
|
|
||||||
spin_lock(&tmpfs_lock);
|
spin_lock(tmpfs_lock);
|
||||||
list_insert(d->files, out);
|
list_insert(d->files, out);
|
||||||
spin_unlock(&tmpfs_lock);
|
spin_unlock(tmpfs_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static fs_node_t * tmpfs_from_dir(struct tmpfs_dir * d) {
|
static fs_node_t * tmpfs_from_dir(struct tmpfs_dir * d) {
|
||||||
|
Loading…
Reference in New Issue
Block a user