* Replaced the vm_get_physical_page() "flags"
PHYSICAL_PAGE_{NO,CAN}_WAIT into an actual flag PHYSICAL_PAGE_DONT_WAIT. * Pass the flags through to the chunk mapper callback. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27979 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
9fc81c5394
commit
1b6eff280f
@ -49,8 +49,7 @@
|
||||
|
||||
// flags for vm_get_physical_page()
|
||||
enum {
|
||||
PHYSICAL_PAGE_NO_WAIT = 0,
|
||||
PHYSICAL_PAGE_CAN_WAIT,
|
||||
PHYSICAL_PAGE_DONT_WAIT = 0x01
|
||||
};
|
||||
|
||||
// mapping argument for several internal VM functions
|
||||
|
@ -43,7 +43,7 @@ ide_request_sense(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
request->data_resid = request->data_length - transferSize;
|
||||
|
||||
// normally, all flags are set to "success", but for Request Sense
|
||||
// normally, all flags are set to "success", but for Request Sense
|
||||
// this would have overwritten the sense we want to read
|
||||
device->subsys_status = SCSI_REQ_CMP;
|
||||
request->device_status = SCSI_STATUS_GOOD;
|
||||
@ -68,7 +68,7 @@ copy_sg_data(scsi_ccb *request, uint offset, uint allocationLength,
|
||||
int sgCount = request->sg_count;
|
||||
int requestSize;
|
||||
|
||||
SHOW_FLOW(3, "offset=%u, req_size_limit=%d, size=%d, sg_list=%p, sg_cnt=%d, %s buffer",
|
||||
SHOW_FLOW(3, "offset=%u, req_size_limit=%d, size=%d, sg_list=%p, sg_cnt=%d, %s buffer",
|
||||
offset, allocationLength, size, sgList, sgCount, toBuffer ? "to" : "from");
|
||||
|
||||
// skip unused S/G entries
|
||||
@ -81,7 +81,7 @@ copy_sg_data(scsi_ccb *request, uint offset, uint allocationLength,
|
||||
if (sgCount == 0)
|
||||
return 0;
|
||||
|
||||
// remaining bytes we are allowed to copy from/to request
|
||||
// remaining bytes we are allowed to copy from/to request
|
||||
requestSize = min(allocationLength, request->data_length) - offset;
|
||||
|
||||
// copy one S/G entry at a time
|
||||
@ -92,8 +92,8 @@ copy_sg_data(scsi_ccb *request, uint offset, uint allocationLength,
|
||||
bytes = min(size, requestSize);
|
||||
bytes = min(bytes, sgList->size);
|
||||
|
||||
if (vm_get_physical_page((addr_t)sgList->address, &virtualAddress,
|
||||
PHYSICAL_PAGE_CAN_WAIT) != B_OK)
|
||||
if (vm_get_physical_page((addr_t)sgList->address, &virtualAddress, 0)
|
||||
!= B_OK)
|
||||
return false;
|
||||
|
||||
SHOW_FLOW(4, "buffer=%p, virt_addr=%p, bytes=%d, to_buffer=%d",
|
||||
|
@ -9,30 +9,30 @@
|
||||
PIO data transmission
|
||||
|
||||
This file is more difficult then you might expect as the SCSI system
|
||||
uses physical addresses everywhere which have to be mapped into
|
||||
uses physical addresses everywhere which have to be mapped into
|
||||
virtual address space during transmission. Additionally, during ATAPI
|
||||
commands we may have to transmit more data then exist because the
|
||||
data len specified by the command doesn't need to be the same as
|
||||
data len specified by the command doesn't need to be the same as
|
||||
of the data buffer provided.
|
||||
|
||||
The handling of S/G entries of odd size may look superfluous as the
|
||||
|
||||
The handling of S/G entries of odd size may look superfluous as the
|
||||
SCSI bus manager can take care of that. In general, this would be possible
|
||||
as most controllers need even alignment for DMA as well, but some can
|
||||
handle _any_ S/G list and it wouldn't be sensitive to enforce stricter
|
||||
alignement just for some rare PIO transmissions.
|
||||
|
||||
|
||||
Little hint for the meaning of "transferred": this is the number of bytes
|
||||
sent over the bus. For read-transmissions, this may be one more then copied
|
||||
into the buffer (the extra byte read is stored in device->odd_byte), for
|
||||
write-transmissions, this may be one less (the waiting byte is pending in
|
||||
device->odd_byte).
|
||||
|
||||
|
||||
In terms of error handling: we don't bother checking transmission of every
|
||||
single byte via read/write_pio(). At least at the end of the request, when
|
||||
single byte via read/write_pio(). At least at the end of the request, when
|
||||
the status bits are verified, we will see that something has gone wrong.
|
||||
|
||||
|
||||
TBD: S/G entries may have odd start address. For non-Intel architecture
|
||||
we either have to copy data to an aligned buffer or have to modify
|
||||
we either have to copy data to an aligned buffer or have to modify
|
||||
PIO-handling in controller drivers.
|
||||
*/
|
||||
|
||||
@ -73,7 +73,7 @@ transfer_PIO_virtcont(ide_device_info *device, uint8 *virtualAddress,
|
||||
|
||||
if (write) {
|
||||
// if there is a byte left from last chunk, transmit it together
|
||||
// with the first byte of the current chunk (IDE requires 16 bits
|
||||
// with the first byte of the current chunk (IDE requires 16 bits
|
||||
// to be transmitted at once)
|
||||
if (device->has_odd_byte) {
|
||||
uint8 buffer[2];
|
||||
@ -90,7 +90,7 @@ transfer_PIO_virtcont(ide_device_info *device, uint8 *virtualAddress,
|
||||
controller->write_pio(channel_cookie, (uint16 *)virtualAddress,
|
||||
length / 2, false);
|
||||
|
||||
// take care if chunk size was odd, which means that 1 byte remains
|
||||
// take care if chunk size was odd, which means that 1 byte remains
|
||||
virtualAddress += length & ~1;
|
||||
*transferred += length & ~1;
|
||||
|
||||
@ -111,7 +111,7 @@ transfer_PIO_virtcont(ide_device_info *device, uint8 *virtualAddress,
|
||||
length / 2, false);
|
||||
|
||||
// take care of odd chunk size;
|
||||
// in this case we read 1 byte to few!
|
||||
// in this case we read 1 byte to few!
|
||||
virtualAddress += length & ~1;
|
||||
*transferred += length & ~1;
|
||||
|
||||
@ -150,15 +150,14 @@ transfer_PIO_physcont(ide_device_info *device, addr_t physicalAddress,
|
||||
SHOW_FLOW(4, "Transmitting to/from physical address %lx, %d bytes left",
|
||||
physicalAddress, length);
|
||||
|
||||
if (vm_get_physical_page(physicalAddress, &virtualAddress,
|
||||
PHYSICAL_PAGE_CAN_WAIT) != B_OK) {
|
||||
if (vm_get_physical_page(physicalAddress, &virtualAddress, 0) != B_OK) {
|
||||
// ouch: this should never ever happen
|
||||
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
// if chunks starts in the middle of a page, we have even less then
|
||||
// a page left
|
||||
// a page left
|
||||
page_left = B_PAGE_SIZE - physicalAddress % B_PAGE_SIZE;
|
||||
|
||||
SHOW_FLOW(4, "page_left=%d", page_left);
|
||||
@ -175,7 +174,7 @@ transfer_PIO_physcont(ide_device_info *device, addr_t physicalAddress,
|
||||
if (err != B_OK)
|
||||
return err;
|
||||
|
||||
length -= cur_len;
|
||||
length -= cur_len;
|
||||
physicalAddress += cur_len;
|
||||
}
|
||||
|
||||
@ -242,7 +241,7 @@ write_discard_PIO(ide_device_info *device, int length)
|
||||
int cur_len;
|
||||
|
||||
// if device asks for odd number of bytes, append an extra byte to
|
||||
// make length even (this is the "length + 1" term)
|
||||
// make length even (this is the "length + 1" term)
|
||||
cur_len = min(length + 1, (int)(sizeof(buffer))) / 2;
|
||||
|
||||
bus->controller->write_pio(bus->channel_cookie, (uint16 *)buffer, cur_len, false);
|
||||
|
@ -119,8 +119,8 @@ scsi_copy_dma_buffer(scsi_ccb *request, uint32 size, bool to_buffer)
|
||||
|
||||
bytes = min( size, sg_list->size );
|
||||
|
||||
if (vm_get_physical_page((addr_t)sg_list->address, &virtualAddress,
|
||||
PHYSICAL_PAGE_CAN_WAIT) != B_OK)
|
||||
if (vm_get_physical_page((addr_t)sg_list->address, &virtualAddress, 0)
|
||||
!= B_OK)
|
||||
return false;
|
||||
|
||||
if (to_buffer)
|
||||
|
@ -464,8 +464,8 @@ copy_sg_data(scsi_ccb *request, uint offset, uint allocation_length,
|
||||
bytes = min(size, req_size);
|
||||
bytes = min(bytes, sg_list->size);
|
||||
|
||||
if (vm_get_physical_page((addr_t)sg_list->address, (void *)&virtualAddress,
|
||||
PHYSICAL_PAGE_CAN_WAIT) != B_OK)
|
||||
if (vm_get_physical_page((addr_t)sg_list->address,
|
||||
(void*)&virtualAddress, 0) != B_OK)
|
||||
return false;
|
||||
|
||||
SHOW_FLOW(0, "buffer = %p, virt_addr = %#lx, bytes = %lu, to_buffer = %d",
|
||||
|
@ -93,8 +93,8 @@ sg_memcpy(const physical_entry *sgTable, int sgCount, const void *data,
|
||||
size_t size = min_c(dataSize, sgTable[i].size);
|
||||
addr_t address;
|
||||
|
||||
if (vm_get_physical_page((addr_t)sgTable[i].address, &address,
|
||||
PHYSICAL_PAGE_CAN_WAIT) < B_OK)
|
||||
if (vm_get_physical_page((addr_t)sgTable[i].address, &address, 0)
|
||||
< B_OK)
|
||||
return B_ERROR;
|
||||
|
||||
TRACE("sg_memcpy phyAddr %p, addr %p, size %lu\n", sgTable[i].address, (void *)address, size);
|
||||
|
@ -90,7 +90,8 @@ restart:
|
||||
break;
|
||||
}
|
||||
|
||||
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
|
||||
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize,
|
||||
flags);
|
||||
mutex_unlock(&sMutex);
|
||||
|
||||
return B_OK;
|
||||
@ -99,7 +100,7 @@ restart:
|
||||
// replace an earlier mapping
|
||||
if (queue_peek(&mapped_paddr_lru) == NULL) {
|
||||
// no free slots available
|
||||
if (flags == PHYSICAL_PAGE_NO_WAIT) {
|
||||
if ((flags & PHYSICAL_PAGE_DONT_WAIT) != 0) {
|
||||
// put back to the caller and let them handle this
|
||||
mutex_unlock(&sMutex);
|
||||
return B_NO_MEMORY;
|
||||
@ -123,7 +124,7 @@ restart:
|
||||
virtual_pmappings[(*va - sIOSpaceBase) / sIOSpaceChunkSize]
|
||||
= paddr_desc + index;
|
||||
|
||||
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize);
|
||||
sMapIOSpaceChunk(paddr_desc[index].va, index * sIOSpaceChunkSize, flags);
|
||||
|
||||
mutex_unlock(&sMutex);
|
||||
return B_OK;
|
||||
|
@ -12,7 +12,8 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef status_t (*generic_map_iospace_chunk_func)(addr_t, addr_t);
|
||||
typedef status_t (*generic_map_iospace_chunk_func)(addr_t virtualAddress,
|
||||
addr_t physicalAddress, uint32 flags);
|
||||
|
||||
status_t generic_get_physical_page(addr_t pa, addr_t *va, uint32 flags);
|
||||
status_t generic_put_physical_page(addr_t va);
|
||||
|
@ -17,10 +17,10 @@
|
||||
#endif
|
||||
|
||||
/* (mmu_man) Implementation details on 68030 and others:
|
||||
|
||||
|
||||
Unlike on x86 we can't just switch the context to another team by just
|
||||
setting a register to another page directory, since we only have one
|
||||
page table containing both kernel and user address mappings.
|
||||
page table containing both kernel and user address mappings.
|
||||
The 030 supports arbitrary layout of the page directory tree, including
|
||||
a 1-bit first level (2 entries top level table) that would map kernel
|
||||
and user land at a single place. But 040 and later only support a fixed
|
||||
@ -28,7 +28,7 @@
|
||||
|
||||
Since 68k SMP hardware is rare enough we don't want to support them, we
|
||||
can take some shortcuts.
|
||||
|
||||
|
||||
As we don't want a separate user and kernel space, we'll use a single
|
||||
table. With the 7/7/6 split the 2nd level would require 32KB of tables,
|
||||
which is small enough to not want to use the list hack from x86.
|
||||
@ -227,7 +227,7 @@ early_query(addr_t va, addr_t *_physicalAddress)
|
||||
int32 index;
|
||||
status_t err = B_ERROR; // no pagetable here
|
||||
TRACE(("%s(%p,)\n", __FUNCTION__, va));
|
||||
|
||||
|
||||
index = VADDR_TO_PRENT(va);
|
||||
TRACE(("%s: pr[%d].type %d\n", __FUNCTION__, index, pr[index].type));
|
||||
if (pr && pr[index].type == DT_ROOT) {
|
||||
@ -252,7 +252,7 @@ early_query(addr_t va, addr_t *_physicalAddress)
|
||||
pt = (page_table_entry *)pa;
|
||||
index = 0; // single descriptor
|
||||
}
|
||||
|
||||
|
||||
if (pt && pt[index].type == DT_PAGE) {
|
||||
*_physicalAddress = PTE_TO_PA(pt[index]);
|
||||
// we should only be passed page va, but just in case.
|
||||
@ -261,7 +261,7 @@ early_query(addr_t va, addr_t *_physicalAddress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -370,7 +370,7 @@ destroy_tmap(vm_translation_map *map)
|
||||
pgtbl_pn = PDE_TO_PN(pgdir[j]);
|
||||
page = vm_lookup_page(pgtbl_pn);
|
||||
pgtbl = (page_table_entry *)page;
|
||||
|
||||
|
||||
if (!page) {
|
||||
panic("destroy_tmap: didn't find pgtable page\n");
|
||||
return;
|
||||
@ -504,7 +504,7 @@ map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
|
||||
need = 1;
|
||||
need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE;
|
||||
}
|
||||
|
||||
|
||||
return need;
|
||||
}
|
||||
|
||||
@ -552,7 +552,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
|
||||
unsigned aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
|
||||
page_root_entry *apr = &pr[aindex + i];
|
||||
|
||||
|
||||
// put in the pgdir
|
||||
put_pgdir_in_pgroot(apr, pgdir, attributes
|
||||
| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
|
||||
@ -571,7 +571,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// now, fill in the pentry
|
||||
do {
|
||||
err = get_physical_page_tmap(PRE_TO_PA(pr[rindex]),
|
||||
&pd_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (err < 0);
|
||||
pd = (page_directory_entry *)pd_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -598,7 +598,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
|
||||
unsigned aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
|
||||
page_directory_entry *apd = &pd[aindex + i];
|
||||
|
||||
|
||||
// put in the pgdir
|
||||
put_pgtable_in_pgdir(apd, pgtable, attributes
|
||||
| (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
|
||||
@ -615,7 +615,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// now, fill in the pentry
|
||||
do {
|
||||
err = get_physical_page_tmap(PDE_TO_PA(pd[dindex]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (err < 0);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -668,7 +668,7 @@ restart:
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
|
||||
&pd_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pd = (page_directory_entry *)pd_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -684,7 +684,7 @@ restart:
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -751,7 +751,7 @@ query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
pt = (page_table_entry *)sQueryPage;
|
||||
index = 0; // single descriptor
|
||||
}
|
||||
|
||||
|
||||
if (pt /*&& pt[index].type == DT_PAGE*/) {
|
||||
*_physical = PTE_TO_PA(pt[index]);
|
||||
// we should only be passed page va, but just in case.
|
||||
@ -764,10 +764,10 @@ query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// unmap the pg table from the indirect desc.
|
||||
sQueryDesc.type = DT_INVALID;
|
||||
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -795,7 +795,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
|
||||
&pd_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pd = (page_directory_entry *)pd_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -811,7 +811,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -825,7 +825,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags
|
||||
pi_pg = pt_pg;
|
||||
do {
|
||||
status = get_physical_page_tmap(PIE_TO_PA(pi[index]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// add offset from start of page
|
||||
@ -880,7 +880,7 @@ protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attribute
|
||||
restart:
|
||||
if (start >= end)
|
||||
return B_OK;
|
||||
|
||||
|
||||
index = VADDR_TO_PRENT(start);
|
||||
if (pr[index].type != DT_ROOT) {
|
||||
// no pagedir here, move the start up to access the next page table
|
||||
@ -890,7 +890,7 @@ restart:
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
|
||||
&pd_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pd = (page_directory_entry *)pd_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -906,7 +906,7 @@ restart:
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -962,7 +962,7 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PRE_TO_PA(pr[index]),
|
||||
&pd_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pd_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pd = (page_directory_entry *)pd_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -978,7 +978,7 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(PDE_TO_PA(pd[index]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// we want the table at rindex, not at rindex%(tbl/page)
|
||||
@ -992,7 +992,7 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
pi_pg = pt_pg;
|
||||
do {
|
||||
status = get_physical_page_tmap(PIE_TO_PA(pi[index]),
|
||||
&pt_pg, PHYSICAL_PAGE_NO_WAIT);
|
||||
&pt_pg, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
pt = (page_table_entry *)pt_pg;
|
||||
// add offset from start of page
|
||||
@ -1059,7 +1059,7 @@ flush_tmap(vm_translation_map *map)
|
||||
|
||||
|
||||
static status_t
|
||||
map_iospace_chunk(addr_t va, addr_t pa)
|
||||
map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
|
||||
{
|
||||
int i;
|
||||
page_table_entry *pt;
|
||||
@ -1218,7 +1218,7 @@ m68k_vm_translation_map_init(kernel_args *args)
|
||||
// clear out the bottom 2 GB, unmap everything
|
||||
memset(page_hole_pgdir + FIRST_USER_PGDIR_ENT, 0, sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
|
||||
#endif
|
||||
|
||||
|
||||
sKernelPhysicalPageRoot = (page_root_entry *)args->arch_args.phys_pgroot;
|
||||
sKernelVirtualPageRoot = (page_root_entry *)args->arch_args.vir_pgroot;
|
||||
|
||||
@ -1349,18 +1349,18 @@ m68k_vm_translation_map_init_post_area(kernel_args *args)
|
||||
int32 index;
|
||||
|
||||
// first get pa for the indirect descriptor
|
||||
|
||||
|
||||
index = VADDR_TO_PRENT((addr_t)&sQueryDesc);
|
||||
physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
|
||||
|
||||
get_physical_page_tmap(physicalPageDir,
|
||||
(addr_t *)&pageDirEntry, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pageDirEntry, PHYSICAL_PAGE_DONT_WAIT);
|
||||
|
||||
index = VADDR_TO_PDENT((addr_t)&sQueryDesc);
|
||||
physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
|
||||
|
||||
get_physical_page_tmap(physicalPageTable,
|
||||
(addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
|
||||
|
||||
index = VADDR_TO_PTENT((addr_t)&sQueryDesc);
|
||||
|
||||
@ -1371,25 +1371,25 @@ m68k_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
put_physical_page_tmap((addr_t)pageTableEntry);
|
||||
put_physical_page_tmap((addr_t)pageDirEntry);
|
||||
|
||||
|
||||
// then the va for the page table for the query page.
|
||||
|
||||
|
||||
//sQueryPageTable = (page_indirect_entry *)(queryPage);
|
||||
|
||||
index = VADDR_TO_PRENT(queryPage);
|
||||
physicalPageDir = PRE_TO_PA(sKernelVirtualPageRoot[index]);
|
||||
|
||||
get_physical_page_tmap(physicalPageDir,
|
||||
(addr_t *)&pageDirEntry, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pageDirEntry, PHYSICAL_PAGE_DONT_WAIT);
|
||||
|
||||
index = VADDR_TO_PDENT(queryPage);
|
||||
physicalPageTable = PDE_TO_PA(pageDirEntry[index]);
|
||||
|
||||
get_physical_page_tmap(physicalPageTable,
|
||||
(addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
|
||||
|
||||
index = VADDR_TO_PTENT(queryPage);
|
||||
|
||||
|
||||
put_page_indirect_entry_in_pgtable(&pageTableEntry[index], physicalIndirectDesc,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false);
|
||||
|
||||
@ -1424,7 +1424,7 @@ m68k_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa,
|
||||
uint32 index;
|
||||
uint32 i;
|
||||
TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
|
||||
|
||||
|
||||
// everything much simpler here because pa = va
|
||||
// thanks to transparent translation which hasn't been disabled yet
|
||||
|
||||
|
@ -119,7 +119,7 @@ typedef struct vm_translation_map_arch_info {
|
||||
} vm_translation_map_arch_info;
|
||||
|
||||
|
||||
void
|
||||
void
|
||||
ppc_translation_map_change_asid(vm_translation_map *map)
|
||||
{
|
||||
// this code depends on the kernel being at 0x80000000, fix if we change that
|
||||
@ -157,7 +157,7 @@ unlock_tmap(vm_translation_map *map)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static void
|
||||
destroy_tmap(vm_translation_map *map)
|
||||
{
|
||||
if (map->map_count > 0) {
|
||||
@ -167,7 +167,7 @@ destroy_tmap(vm_translation_map *map)
|
||||
|
||||
// mark the vsid base not in use
|
||||
int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
|
||||
atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
|
||||
atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
|
||||
~(1 << (baseBit % 32)));
|
||||
|
||||
free(map->arch_data);
|
||||
@ -177,7 +177,7 @@ destroy_tmap(vm_translation_map *map)
|
||||
|
||||
static void
|
||||
fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
|
||||
addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
|
||||
addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
|
||||
bool secondaryHash)
|
||||
{
|
||||
// lower 32 bit - set at once
|
||||
@ -237,7 +237,7 @@ map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress,
|
||||
if (entry->valid)
|
||||
continue;
|
||||
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
protection, false);
|
||||
map->map_count++;
|
||||
return B_OK;
|
||||
@ -254,7 +254,7 @@ map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress,
|
||||
if (entry->valid)
|
||||
continue;
|
||||
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
protection, false);
|
||||
map->map_count++;
|
||||
return B_OK;
|
||||
@ -374,7 +374,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_ou
|
||||
|
||||
|
||||
static status_t
|
||||
map_iospace_chunk(addr_t va, addr_t pa)
|
||||
map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
|
||||
{
|
||||
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
|
||||
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
|
||||
@ -386,7 +386,7 @@ map_iospace_chunk(addr_t va, addr_t pa)
|
||||
}
|
||||
|
||||
|
||||
static addr_t
|
||||
static addr_t
|
||||
get_mapped_size_tmap(vm_translation_map *map)
|
||||
{
|
||||
return map->map_count;
|
||||
@ -432,7 +432,7 @@ clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static void
|
||||
flush_tmap(vm_translation_map *map)
|
||||
{
|
||||
// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
|
||||
@ -586,7 +586,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
}
|
||||
|
||||
// create an area to cover the page table
|
||||
sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
|
||||
sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
|
||||
sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
// init physical page mapper
|
||||
@ -613,7 +613,7 @@ arch_vm_translation_map_init_post_sem(kernel_args *args)
|
||||
*/
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
|
||||
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
|
||||
uint8 attributes, addr_t (*get_free_page)(kernel_args *))
|
||||
{
|
||||
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
|
||||
@ -647,7 +647,7 @@ arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t
|
||||
|
||||
// XXX currently assumes this translation map is active
|
||||
|
||||
status_t
|
||||
status_t
|
||||
arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
|
||||
{
|
||||
//PANIC_UNIMPLEMENTED();
|
||||
|
@ -379,7 +379,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// now, fill in the pentry
|
||||
do {
|
||||
err = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (err < 0);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
@ -425,7 +425,7 @@ restart:
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
|
||||
@ -517,7 +517,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
@ -573,7 +573,7 @@ restart:
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end; index++, start += B_PAGE_SIZE) {
|
||||
@ -619,7 +619,7 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
|
||||
do {
|
||||
status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr),
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pt, PHYSICAL_PAGE_DONT_WAIT);
|
||||
} while (status < B_OK);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
@ -706,7 +706,7 @@ flush_tmap(vm_translation_map *map)
|
||||
|
||||
|
||||
static status_t
|
||||
map_iospace_chunk(addr_t va, addr_t pa)
|
||||
map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
|
||||
{
|
||||
int i;
|
||||
page_table_entry *pt;
|
||||
@ -979,7 +979,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
physicalPageTable = ADDR_REVERSE_SHIFT(sKernelVirtualPageDirectory[index].addr);
|
||||
|
||||
get_physical_page_tmap(physicalPageTable,
|
||||
(addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT);
|
||||
(addr_t *)&pageTableEntry, PHYSICAL_PAGE_DONT_WAIT);
|
||||
|
||||
index = VADDR_TO_PTENT((addr_t)sQueryPageTable);
|
||||
put_page_table_entry_in_pgtable(&pageTableEntry[index], physicalPageTable,
|
||||
|
6
src/system/kernel/cache/file_cache.cpp
vendored
6
src/system/kernel/cache/file_cache.cpp
vendored
@ -235,7 +235,7 @@ read_into_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
addr_t virtualAddress;
|
||||
if (vm_get_physical_page(
|
||||
pages[i]->physical_page_number * B_PAGE_SIZE,
|
||||
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT) < B_OK) {
|
||||
&virtualAddress, 0) < B_OK) {
|
||||
panic("could not get physical page");
|
||||
}
|
||||
|
||||
@ -334,7 +334,7 @@ write_to_cache(file_cache_ref *ref, void *cookie, off_t offset,
|
||||
|
||||
addr_t virtualAddress;
|
||||
vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
|
||||
&virtualAddress, 0);
|
||||
|
||||
add_to_iovec(vecs, vecCount, MAX_IO_VECS, virtualAddress, B_PAGE_SIZE);
|
||||
// ToDo: check if the array is large enough!
|
||||
@ -649,7 +649,7 @@ cache_io(void *_cacheRef, void *cookie, off_t offset, addr_t buffer,
|
||||
|
||||
addr_t virtualAddress;
|
||||
vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
|
||||
&virtualAddress, 0);
|
||||
|
||||
// copy the contents of the page already in memory
|
||||
if (doWrite) {
|
||||
|
@ -3107,7 +3107,8 @@ display_mem(int argc, char **argv)
|
||||
gKernelStartup = true;
|
||||
// vm_get_physical_page() needs to lock...
|
||||
|
||||
if (vm_get_physical_page(address, ©Address, PHYSICAL_PAGE_NO_WAIT) != B_OK) {
|
||||
if (vm_get_physical_page(address, ©Address, PHYSICAL_PAGE_DONT_WAIT)
|
||||
!= B_OK) {
|
||||
kprintf("getting the hardware page failed.");
|
||||
gKernelStartup = false;
|
||||
return 0;
|
||||
@ -4624,11 +4625,13 @@ if (cacheOffset == 0x12000)
|
||||
|
||||
// try to get a mapping for the src and dest page so we can copy it
|
||||
for (;;) {
|
||||
map->ops->get_physical_page(sourcePage->physical_page_number * B_PAGE_SIZE,
|
||||
(addr_t *)&source, PHYSICAL_PAGE_CAN_WAIT);
|
||||
map->ops->get_physical_page(
|
||||
sourcePage->physical_page_number * B_PAGE_SIZE,
|
||||
(addr_t *)&source, 0);
|
||||
|
||||
if (map->ops->get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
(addr_t *)&dest, PHYSICAL_PAGE_NO_WAIT) == B_OK)
|
||||
if (map->ops->get_physical_page(
|
||||
page->physical_page_number * B_PAGE_SIZE,
|
||||
(addr_t *)&dest, PHYSICAL_PAGE_DONT_WAIT) == B_OK)
|
||||
break;
|
||||
|
||||
// it couldn't map the second one, so sleep and retry
|
||||
|
@ -817,7 +817,7 @@ clear_page(struct vm_page *page)
|
||||
{
|
||||
addr_t virtualAddress;
|
||||
vm_get_physical_page(page->physical_page_number << PAGE_SHIFT,
|
||||
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT);
|
||||
&virtualAddress, 0);
|
||||
|
||||
memset((void *)virtualAddress, 0, B_PAGE_SIZE);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user