mirror of
https://github.com/KolibriOS/kolibrios.git
synced 2024-12-27 08:49:40 +03:00
ddk: 3.19-rc1
git-svn-id: svn://kolibrios.org@5270 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
bade30c7b8
commit
16bc56fa96
@ -1,13 +1,18 @@
|
||||
|
||||
|
||||
CC = gcc
|
||||
AS = as
|
||||
|
||||
DRV_TOPDIR = $(CURDIR)/..
|
||||
DRV_INCLUDES = $(DRV_TOPDIR)/include
|
||||
|
||||
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/asm
|
||||
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI
|
||||
|
||||
INCLUDES = -I$(DRV_INCLUDES) \
|
||||
-I$(DRV_INCLUDES)/asm \
|
||||
-I$(DRV_INCLUDES)/uapi
|
||||
|
||||
DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU
|
||||
DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE
|
||||
|
||||
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \
|
||||
-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2
|
||||
|
||||
@ -25,6 +30,7 @@ NAME_SRCS:= \
|
||||
io/write.c \
|
||||
linux/bitmap.c \
|
||||
linux/dmi.c \
|
||||
linux/find_next_bit.c \
|
||||
linux/idr.c \
|
||||
linux/interval_tree.c \
|
||||
linux/firmware.c \
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
#include <ddk.h>
|
||||
#include <mutex.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <syscall.h>
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
@ -132,7 +132,9 @@ void __bitmap_shift_right(unsigned long *dst,
|
||||
lower = src[off + k];
|
||||
if (left && off + k == lim - 1)
|
||||
lower &= mask;
|
||||
dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
|
||||
dst[k] = lower >> rem;
|
||||
if (rem)
|
||||
dst[k] |= upper << (BITS_PER_LONG - rem);
|
||||
if (left && k == lim - 1)
|
||||
dst[k] &= mask;
|
||||
}
|
||||
@ -173,7 +175,9 @@ void __bitmap_shift_left(unsigned long *dst,
|
||||
upper = src[k];
|
||||
if (left && k == lim - 1)
|
||||
upper &= (1UL << left) - 1;
|
||||
dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
|
||||
dst[k + off] = upper << rem;
|
||||
if (rem)
|
||||
dst[k + off] |= lower >> (BITS_PER_LONG - rem);
|
||||
if (left && k + off == lim - 1)
|
||||
dst[k + off] &= (1UL << left) - 1;
|
||||
}
|
||||
@ -323,30 +327,32 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len)
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_clear);
|
||||
|
||||
/*
|
||||
* bitmap_find_next_zero_area - find a contiguous aligned zero area
|
||||
/**
|
||||
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area
|
||||
* @map: The address to base the search on
|
||||
* @size: The bitmap size in bits
|
||||
* @start: The bitnumber to start searching at
|
||||
* @nr: The number of zeroed bits we're looking for
|
||||
* @align_mask: Alignment mask for zero area
|
||||
* @align_offset: Alignment offset for zero area.
|
||||
*
|
||||
* The @align_mask should be one less than a power of 2; the effect is that
|
||||
* the bit offset of all zero areas this function finds is multiples of that
|
||||
* power of 2. A @align_mask of 0 means no alignment is required.
|
||||
* the bit offset of all zero areas this function finds plus @align_offset
|
||||
* is multiple of that power of 2.
|
||||
*/
|
||||
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
|
||||
unsigned long size,
|
||||
unsigned long start,
|
||||
unsigned int nr,
|
||||
unsigned long align_mask)
|
||||
unsigned long align_mask,
|
||||
unsigned long align_offset)
|
||||
{
|
||||
unsigned long index, end, i;
|
||||
again:
|
||||
index = find_next_zero_bit(map, size, start);
|
||||
|
||||
/* Align allocation */
|
||||
index = __ALIGN_MASK(index, align_mask);
|
||||
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
|
||||
|
||||
end = index + nr;
|
||||
if (end > size)
|
||||
@ -358,7 +364,7 @@ again:
|
||||
}
|
||||
return index;
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_find_next_zero_area);
|
||||
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
|
||||
|
||||
/*
|
||||
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
|
||||
@ -599,7 +605,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
|
||||
*
|
||||
* Further lets say we use the following code, invoking
|
||||
* bitmap_fold() then bitmap_onto, as suggested above to
|
||||
* avoid the possitility of an empty @dst result:
|
||||
* avoid the possibility of an empty @dst result:
|
||||
*
|
||||
* unsigned long *tmp; // a temporary bitmap's bits
|
||||
*
|
||||
|
@ -24,9 +24,11 @@
|
||||
|
||||
|
||||
#include <ddk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <syscall.h>
|
||||
|
||||
|
||||
@ -142,7 +144,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool)
|
||||
{
|
||||
struct dma_page *page;
|
||||
|
||||
page = malloc(sizeof(*page));
|
||||
page = __builtin_malloc(sizeof(*page));
|
||||
if (!page)
|
||||
return NULL;
|
||||
page->vaddr = (void*)KernelAlloc(pool->allocation);
|
||||
@ -228,7 +230,7 @@ void dma_pool_destroy(struct dma_pool *pool)
|
||||
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
u32_t efl;
|
||||
u32 efl;
|
||||
struct dma_page *page;
|
||||
size_t offset;
|
||||
void *retval;
|
||||
@ -262,7 +264,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
|
||||
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
|
||||
{
|
||||
struct dma_page *page;
|
||||
u32_t efl;
|
||||
u32 efl;
|
||||
|
||||
efl = safe_cli();
|
||||
|
||||
@ -294,7 +296,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||
unsigned long flags;
|
||||
unsigned int offset;
|
||||
|
||||
u32_t efl;
|
||||
u32 efl;
|
||||
|
||||
page = pool_find_page(pool, dma);
|
||||
if (!page) {
|
||||
|
@ -7,12 +7,9 @@
|
||||
#include <linux/dmi.h>
|
||||
#include <syscall.h>
|
||||
|
||||
#define pr_debug dbgprintf
|
||||
#define pr_info printf
|
||||
|
||||
static void *dmi_alloc(unsigned len)
|
||||
{
|
||||
return malloc(len);
|
||||
return __builtin_malloc(len);
|
||||
};
|
||||
|
||||
/*
|
||||
|
285
drivers/ddk/linux/find_next_bit.c
Normal file
285
drivers/ddk/linux/find_next_bit.c
Normal file
@ -0,0 +1,285 @@
|
||||
/* find_next_bit.c: fallback find next bit implementation
|
||||
*
|
||||
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
||||
#ifndef find_next_bit
|
||||
/*
|
||||
* Find the next set bit in a memory region.
|
||||
*/
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp &= (~0UL << offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
|
||||
found_first:
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit
|
||||
/*
|
||||
* This implementation of find_{first,next}_zero_bit was stolen from
|
||||
* Linus' asm-alpha/bitops.h.
|
||||
*/
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp |= ~0UL >> (BITS_PER_LONG - offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (~tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if (~(tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
|
||||
found_first:
|
||||
tmp |= ~0UL << size;
|
||||
if (tmp == ~0UL) /* Are any bits zero? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + ffz(tmp);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_first_bit
|
||||
/*
|
||||
* Find the first set bit in a memory region.
|
||||
*/
|
||||
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = 0;
|
||||
unsigned long tmp;
|
||||
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
|
||||
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
EXPORT_SYMBOL(find_first_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit
|
||||
/*
|
||||
* Find the first cleared bit in a memory region.
|
||||
*/
|
||||
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = 0;
|
||||
unsigned long tmp;
|
||||
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if (~(tmp = *(p++)))
|
||||
goto found;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
|
||||
tmp = (*p) | (~0UL << size);
|
||||
if (tmp == ~0UL) /* Are any bits zero? */
|
||||
return result + size; /* Nope. */
|
||||
found:
|
||||
return result + ffz(tmp);
|
||||
}
|
||||
EXPORT_SYMBOL(find_first_zero_bit);
|
||||
#endif
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
||||
/* include/linux/byteorder does not support "unsigned long" type */
|
||||
static inline unsigned long ext2_swabp(const unsigned long * x)
|
||||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
return (unsigned long) __swab64p((u64 *) x);
|
||||
#elif BITS_PER_LONG == 32
|
||||
return (unsigned long) __swab32p((u32 *) x);
|
||||
#else
|
||||
#error BITS_PER_LONG not defined
|
||||
#endif
|
||||
}
|
||||
|
||||
/* include/linux/byteorder doesn't support "unsigned long" type */
|
||||
static inline unsigned long ext2_swab(const unsigned long y)
|
||||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
return (unsigned long) __swab64((u64) y);
|
||||
#elif BITS_PER_LONG == 32
|
||||
return (unsigned long) __swab32((u32) y);
|
||||
#else
|
||||
#error BITS_PER_LONG not defined
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef find_next_zero_bit_le
|
||||
unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = offset & ~(BITS_PER_LONG - 1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
p += BITOP_WORD(offset);
|
||||
size -= result;
|
||||
offset &= (BITS_PER_LONG - 1UL);
|
||||
if (offset) {
|
||||
tmp = ext2_swabp(p++);
|
||||
tmp |= (~0UL >> (BITS_PER_LONG - offset));
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (~tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
|
||||
while (size & ~(BITS_PER_LONG - 1)) {
|
||||
if (~(tmp = *(p++)))
|
||||
goto found_middle_swap;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = ext2_swabp(p);
|
||||
found_first:
|
||||
tmp |= ~0UL << size;
|
||||
if (tmp == ~0UL) /* Are any bits zero? */
|
||||
return result + size; /* Nope. Skip ffz */
|
||||
found_middle:
|
||||
return result + ffz(tmp);
|
||||
|
||||
found_middle_swap:
|
||||
return result + ffz(ext2_swab(tmp));
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit_le);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_bit_le
|
||||
unsigned long find_next_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = offset & ~(BITS_PER_LONG - 1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
p += BITOP_WORD(offset);
|
||||
size -= result;
|
||||
offset &= (BITS_PER_LONG - 1UL);
|
||||
if (offset) {
|
||||
tmp = ext2_swabp(p++);
|
||||
tmp &= (~0UL << offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
|
||||
while (size & ~(BITS_PER_LONG - 1)) {
|
||||
tmp = *(p++);
|
||||
if (tmp)
|
||||
goto found_middle_swap;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = ext2_swabp(p);
|
||||
found_first:
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + __ffs(tmp);
|
||||
|
||||
found_middle_swap:
|
||||
return result + __ffs(ext2_swab(tmp));
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit_le);
|
||||
#endif
|
||||
|
||||
#endif /* __BIG_ENDIAN */
|
@ -1,6 +1,8 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/byteorder/little_endian.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
|
@ -20,20 +20,16 @@
|
||||
* that id to this code and it returns your pointer.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#ifndef TEST // to test in user space...
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#endif
|
||||
#include <linux/err.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/idr.h>
|
||||
//#include <stdlib.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
static inline void * __must_check ERR_PTR(long error)
|
||||
{
|
||||
return (void *) error;
|
||||
}
|
||||
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset);
|
||||
|
||||
|
||||
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
|
||||
@ -132,7 +128,7 @@ static inline void free_layer(struct idr *idr, struct idr_layer *p)
|
||||
{
|
||||
if (idr->hint == p)
|
||||
RCU_INIT_POINTER(idr->hint, NULL);
|
||||
idr_layer_rcu_free(&p->rcu_head);
|
||||
call_rcu(&p->rcu_head, idr_layer_rcu_free);
|
||||
}
|
||||
|
||||
/* only called when idp->lock is held */
|
||||
@ -500,7 +496,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
|
||||
n = id & IDR_MASK;
|
||||
if (likely(p != NULL && test_bit(n, p->bitmap))) {
|
||||
__clear_bit(n, p->bitmap);
|
||||
rcu_assign_pointer(p->ary[n], NULL);
|
||||
RCU_INIT_POINTER(p->ary[n], NULL);
|
||||
to_free = NULL;
|
||||
while(*paa && ! --((**paa)->count)){
|
||||
if (to_free)
|
||||
@ -564,7 +560,7 @@ static void __idr_remove_all(struct idr *idp)
|
||||
|
||||
n = idp->layers * IDR_BITS;
|
||||
*paa = idp->top;
|
||||
rcu_assign_pointer(idp->top, NULL);
|
||||
RCU_INIT_POINTER(idp->top, NULL);
|
||||
max = idr_max(idp->layers);
|
||||
|
||||
id = 0;
|
||||
@ -599,7 +595,7 @@ static void __idr_remove_all(struct idr *idp)
|
||||
* idr_destroy().
|
||||
*
|
||||
* A typical clean-up sequence for objects stored in an idr tree will use
|
||||
* idr_for_each() to free all objects, if necessay, then idr_destroy() to
|
||||
* idr_for_each() to free all objects, if necessary, then idr_destroy() to
|
||||
* free up the id mappings and cached idr_layers.
|
||||
*/
|
||||
void idr_destroy(struct idr *idp)
|
||||
@ -1119,129 +1115,3 @@ void ida_init(struct ida *ida)
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(ida_init);
|
||||
|
||||
|
||||
|
||||
unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = 0;
|
||||
unsigned long tmp;
|
||||
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
|
||||
tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp &= (~0UL << offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if ((tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
|
||||
found_first:
|
||||
tmp &= (~0UL >> (BITS_PER_LONG - size));
|
||||
if (tmp == 0UL) /* Are any bits set? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + __ffs(tmp);
|
||||
}
|
||||
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
const unsigned long *p = addr + BITOP_WORD(offset);
|
||||
unsigned long result = offset & ~(BITS_PER_LONG-1);
|
||||
unsigned long tmp;
|
||||
|
||||
if (offset >= size)
|
||||
return size;
|
||||
size -= result;
|
||||
offset %= BITS_PER_LONG;
|
||||
if (offset) {
|
||||
tmp = *(p++);
|
||||
tmp |= ~0UL >> (BITS_PER_LONG - offset);
|
||||
if (size < BITS_PER_LONG)
|
||||
goto found_first;
|
||||
if (~tmp)
|
||||
goto found_middle;
|
||||
size -= BITS_PER_LONG;
|
||||
result += BITS_PER_LONG;
|
||||
}
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if (~(tmp = *(p++)))
|
||||
goto found_middle;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
tmp = *p;
|
||||
|
||||
found_first:
|
||||
tmp |= ~0UL << size;
|
||||
if (tmp == ~0UL) /* Are any bits zero? */
|
||||
return result + size; /* Nope. */
|
||||
found_middle:
|
||||
return result + ffz(tmp);
|
||||
}
|
||||
|
||||
unsigned int hweight32(unsigned int w)
|
||||
{
|
||||
unsigned int res = w - ((w >> 1) & 0x55555555);
|
||||
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
||||
res = (res + (res >> 4)) & 0x0F0F0F0F;
|
||||
res = res + (res >> 8);
|
||||
return (res + (res >> 16)) & 0x000000FF;
|
||||
}
|
||||
|
||||
unsigned long hweight64(__u64 w)
|
||||
{
|
||||
#if BITS_PER_LONG == 32
|
||||
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
|
||||
#elif BITS_PER_LONG == 64
|
||||
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
|
||||
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
|
||||
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
|
||||
res = res + (res >> 8);
|
||||
res = res + (res >> 16);
|
||||
return (res + (res >> 32)) & 0x00000000000000FFul;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1,101 +1,145 @@
|
||||
|
||||
#define pr_fmt(fmt) "list_sort_test: " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define MAX_LIST_LENGTH_BITS 20
|
||||
|
||||
/*
|
||||
* Returns a list organized in an intermediate format suited
|
||||
* to chaining of merge() calls: null-terminated, no reserved or
|
||||
* sentinel head node, "prev" links not maintained.
|
||||
*/
|
||||
static struct list_head *merge(void *priv,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b),
|
||||
struct list_head *a, struct list_head *b)
|
||||
{
|
||||
struct list_head head, *tail = &head;
|
||||
|
||||
while (a && b) {
|
||||
/* if equal, take 'a' -- important for sort stability */
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a = a->next;
|
||||
} else {
|
||||
tail->next = b;
|
||||
b = b->next;
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
tail->next = a?:b;
|
||||
return head.next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Combine final list merge with restoration of standard doubly-linked
|
||||
* list structure. This approach duplicates code from merge(), but
|
||||
* runs faster than the tidier alternatives of either a separate final
|
||||
* prev-link restoration pass, or maintaining the prev links
|
||||
* throughout.
|
||||
*/
|
||||
static void merge_and_restore_back_links(void *priv,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b),
|
||||
struct list_head *head,
|
||||
struct list_head *a, struct list_head *b)
|
||||
{
|
||||
struct list_head *tail = head;
|
||||
u8 count = 0;
|
||||
|
||||
while (a && b) {
|
||||
/* if equal, take 'a' -- important for sort stability */
|
||||
if ((*cmp)(priv, a, b) <= 0) {
|
||||
tail->next = a;
|
||||
a->prev = tail;
|
||||
a = a->next;
|
||||
} else {
|
||||
tail->next = b;
|
||||
b->prev = tail;
|
||||
b = b->next;
|
||||
}
|
||||
tail = tail->next;
|
||||
}
|
||||
tail->next = a ? : b;
|
||||
|
||||
do {
|
||||
/*
|
||||
* In worst cases this loop may run many iterations.
|
||||
* Continue callbacks to the client even though no
|
||||
* element comparison is needed, so the client's cmp()
|
||||
* routine can invoke cond_resched() periodically.
|
||||
*/
|
||||
if (unlikely(!(++count)))
|
||||
(*cmp)(priv, tail->next, tail->next);
|
||||
|
||||
tail->next->prev = tail;
|
||||
tail = tail->next;
|
||||
} while (tail->next);
|
||||
|
||||
tail->next = head;
|
||||
head->prev = tail;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_sort - sort a list.
|
||||
* @priv: private data, passed to @cmp
|
||||
* list_sort - sort a list
|
||||
* @priv: private data, opaque to list_sort(), passed to @cmp
|
||||
* @head: the list to sort
|
||||
* @cmp: the elements comparison function
|
||||
*
|
||||
* This function has been implemented by Mark J Roberts <mjr@znex.org>. It
|
||||
* implements "merge sort" which has O(nlog(n)) complexity. The list is sorted
|
||||
* in ascending order.
|
||||
* This function implements "merge sort", which has O(nlog(n))
|
||||
* complexity.
|
||||
*
|
||||
* The comparison function @cmp is supposed to return a negative value if @a is
|
||||
* less than @b, and a positive value if @a is greater than @b. If @a and @b
|
||||
* are equivalent, then it does not matter what this function returns.
|
||||
* The comparison function @cmp must return a negative value if @a
|
||||
* should sort before @b, and a positive value if @a should sort after
|
||||
* @b. If @a and @b are equivalent, and their original relative
|
||||
* ordering is to be preserved, @cmp must return 0.
|
||||
*/
|
||||
void list_sort(void *priv, struct list_head *head,
|
||||
int (*cmp)(void *priv, struct list_head *a,
|
||||
struct list_head *b))
|
||||
{
|
||||
struct list_head *p, *q, *e, *list, *tail, *oldhead;
|
||||
int insize, nmerges, psize, qsize, i;
|
||||
struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
|
||||
-- last slot is a sentinel */
|
||||
int lev; /* index into part[] */
|
||||
int max_lev = 0;
|
||||
struct list_head *list;
|
||||
|
||||
if (list_empty(head))
|
||||
return;
|
||||
|
||||
memset(part, 0, sizeof(part));
|
||||
|
||||
head->prev->next = NULL;
|
||||
list = head->next;
|
||||
list_del(head);
|
||||
insize = 1;
|
||||
for (;;) {
|
||||
p = oldhead = list;
|
||||
list = tail = NULL;
|
||||
nmerges = 0;
|
||||
|
||||
while (p) {
|
||||
nmerges++;
|
||||
q = p;
|
||||
psize = 0;
|
||||
for (i = 0; i < insize; i++) {
|
||||
psize++;
|
||||
q = q->next == oldhead ? NULL : q->next;
|
||||
if (!q)
|
||||
break;
|
||||
}
|
||||
while (list) {
|
||||
struct list_head *cur = list;
|
||||
list = list->next;
|
||||
cur->next = NULL;
|
||||
|
||||
qsize = insize;
|
||||
while (psize > 0 || (qsize > 0 && q)) {
|
||||
if (!psize) {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
} else if (!qsize || !q) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else if (cmp(priv, p, q) <= 0) {
|
||||
e = p;
|
||||
p = p->next;
|
||||
psize--;
|
||||
if (p == oldhead)
|
||||
p = NULL;
|
||||
} else {
|
||||
e = q;
|
||||
q = q->next;
|
||||
qsize--;
|
||||
if (q == oldhead)
|
||||
q = NULL;
|
||||
for (lev = 0; part[lev]; lev++) {
|
||||
cur = merge(priv, cmp, part[lev], cur);
|
||||
part[lev] = NULL;
|
||||
}
|
||||
if (lev > max_lev) {
|
||||
if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
|
||||
printk_once(KERN_DEBUG "list too long for efficiency\n");
|
||||
lev--;
|
||||
}
|
||||
if (tail)
|
||||
tail->next = e;
|
||||
else
|
||||
list = e;
|
||||
e->prev = tail;
|
||||
tail = e;
|
||||
max_lev = lev;
|
||||
}
|
||||
p = q;
|
||||
part[lev] = cur;
|
||||
}
|
||||
|
||||
tail->next = list;
|
||||
list->prev = tail;
|
||||
for (lev = 0; lev < max_lev; lev++)
|
||||
if (part[lev])
|
||||
list = merge(priv, cmp, part[lev], list);
|
||||
|
||||
if (nmerges <= 1)
|
||||
break;
|
||||
|
||||
insize *= 2;
|
||||
}
|
||||
|
||||
head->next = list;
|
||||
head->prev = list->prev;
|
||||
list->prev->next = head;
|
||||
list->prev = head;
|
||||
merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(list_sort);
|
||||
|
@ -101,7 +101,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
* / \ / \
|
||||
* p u --> P U
|
||||
* / /
|
||||
* n N
|
||||
* n n
|
||||
*
|
||||
* However, since g's parent might be red, and
|
||||
* 4) does not allow this, we need to recurse
|
||||
|
@ -7,6 +7,7 @@
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/**
|
||||
@ -70,7 +71,7 @@ EXPORT_SYMBOL(sg_nents);
|
||||
**/
|
||||
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
|
||||
{
|
||||
#ifndef ARCH_HAS_SG_CHAIN
|
||||
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
struct scatterlist *ret = &sgl[nents - 1];
|
||||
#else
|
||||
struct scatterlist *sg, *ret = NULL;
|
||||
@ -182,10 +183,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
|
||||
}
|
||||
|
||||
table->orig_nents -= sg_size;
|
||||
if (!skip_first_chunk) {
|
||||
free_fn(sgl, alloc_size);
|
||||
if (skip_first_chunk)
|
||||
skip_first_chunk = false;
|
||||
}
|
||||
else
|
||||
free_fn(sgl, alloc_size);
|
||||
sgl = next;
|
||||
}
|
||||
|
||||
@ -234,7 +235,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
|
||||
|
||||
if (nents == 0)
|
||||
return -EINVAL;
|
||||
#ifndef ARCH_HAS_SG_CHAIN
|
||||
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
if (WARN_ON_ONCE(nents > max_ents))
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
@ -27,7 +27,7 @@
|
||||
|
||||
#ifndef __HAVE_ARCH_STRLCPY
|
||||
/**
|
||||
* strlcpy - Copy a %NUL terminated string into a sized buffer
|
||||
* strlcpy - Copy a C-string into a sized buffer
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @size: size of destination buffer
|
||||
|
@ -1,4 +1,4 @@
|
||||
#include <jiffies.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
|
||||
|
||||
@ -131,6 +131,7 @@ unsigned long msecs_to_jiffies(const unsigned int m)
|
||||
>> MSEC_TO_HZ_SHR32;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(msecs_to_jiffies);
|
||||
|
||||
unsigned long usecs_to_jiffies(const unsigned int u)
|
||||
{
|
||||
@ -145,12 +146,27 @@ unsigned long usecs_to_jiffies(const unsigned int u)
|
||||
>> USEC_TO_HZ_SHR32;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(usecs_to_jiffies);
|
||||
|
||||
unsigned long
|
||||
timespec_to_jiffies(const struct timespec *value)
|
||||
/*
|
||||
* The TICK_NSEC - 1 rounds up the value to the next resolution. Note
|
||||
* that a remainder subtract here would not do the right thing as the
|
||||
* resolution values don't fall on second boundries. I.e. the line:
|
||||
* nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
|
||||
* Note that due to the small error in the multiplier here, this
|
||||
* rounding is incorrect for sufficiently large values of tv_nsec, but
|
||||
* well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
|
||||
* OK.
|
||||
*
|
||||
* Rather, we just shift the bits off the right.
|
||||
*
|
||||
* The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
|
||||
* value to a scaled second value.
|
||||
*/
|
||||
static unsigned long
|
||||
__timespec_to_jiffies(unsigned long sec, long nsec)
|
||||
{
|
||||
unsigned long sec = value->tv_sec;
|
||||
long nsec = value->tv_nsec + TICK_NSEC - 1;
|
||||
nsec = nsec + TICK_NSEC - 1;
|
||||
|
||||
if (sec >= MAX_SEC_IN_JIFFIES){
|
||||
sec = MAX_SEC_IN_JIFFIES;
|
||||
@ -162,6 +178,28 @@ timespec_to_jiffies(const struct timespec *value)
|
||||
|
||||
}
|
||||
|
||||
unsigned long
|
||||
timespec_to_jiffies(const struct timespec *value)
|
||||
{
|
||||
return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(timespec_to_jiffies);
|
||||
|
||||
void
|
||||
jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
|
||||
{
|
||||
/*
|
||||
* Convert jiffies to nanoseconds and separate with
|
||||
* one divide.
|
||||
*/
|
||||
u32 rem;
|
||||
value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
|
||||
NSEC_PER_SEC, &rem);
|
||||
value->tv_nsec = rem;
|
||||
}
|
||||
EXPORT_SYMBOL(jiffies_to_timespec);
|
||||
|
||||
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
|
||||
{
|
||||
u64 quotient;
|
||||
|
@ -1,5 +1,39 @@
|
||||
/*
|
||||
* kernel/workqueue.c - generic async execution with shared worker pool
|
||||
*
|
||||
* Copyright (C) 2002 Ingo Molnar
|
||||
*
|
||||
* Derived from the taskqueue/keventd code by:
|
||||
* David Woodhouse <dwmw2@infradead.org>
|
||||
* Andrew Morton
|
||||
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
|
||||
* Theodore Ts'o <tytso@mit.edu>
|
||||
*
|
||||
* Made to use alloc_percpu by Christoph Lameter.
|
||||
*
|
||||
* Copyright (C) 2010 SUSE Linux Products GmbH
|
||||
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
|
||||
*
|
||||
* This is the generic async execution mechanism. Work items as are
|
||||
* executed in process context. The worker pool is shared and
|
||||
* automatically managed. There are two worker pools for each CPU (one for
|
||||
* normal work items and the other for high priority ones) and some extra
|
||||
* pools for workqueues which are not bound to any specific CPU - the
|
||||
* number of these backing pools is dynamic.
|
||||
*
|
||||
* Please read Documentation/workqueue.txt for details.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
|
||||
#include <ddk.h>
|
||||
|
||||
extern int driver_wq_state;
|
||||
|
@ -522,7 +522,7 @@ MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP
|
||||
*/
|
||||
|
||||
#include <ddk.h>
|
||||
#include <mutex.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <syscall.h>
|
||||
|
||||
/* Version identifier to allow people to support multiple versions */
|
||||
|
@ -22,11 +22,12 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <errno-base.h>
|
||||
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
#include <asm/page.h> /* for PAGE_SIZE */
|
||||
|
||||
|
||||
static inline u64 div_u64(u64 dividend, u32 divisor)
|
||||
@ -41,10 +42,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
|
||||
return div_s64_rem(dividend, divisor, &remainder);
|
||||
}
|
||||
|
||||
struct va_format {
|
||||
const char *fmt;
|
||||
va_list *va;
|
||||
};
|
||||
|
||||
#define ZERO_SIZE_PTR ((void *)16)
|
||||
|
||||
@ -62,13 +59,6 @@ const char hex_asc[] = "0123456789abcdef";
|
||||
/* Works only for digits and letters, but small and fast */
|
||||
#define TOLOWER(x) ((x) | 0x20)
|
||||
|
||||
static inline char *hex_byte_pack(char *buf, u8 byte)
|
||||
{
|
||||
*buf++ = hex_asc_hi(byte);
|
||||
*buf++ = hex_asc_lo(byte);
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
char *skip_spaces(const char *str)
|
||||
{
|
||||
@ -1297,6 +1287,7 @@ qualifier:
|
||||
* %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
|
||||
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
|
||||
* case.
|
||||
* %*pE[achnops] print an escaped buffer
|
||||
* %*ph[CDN] a variable-length hex string with a separator (supports up to 64
|
||||
* bytes of the input)
|
||||
* %n is ignored
|
||||
|
43
drivers/include/asm-generic/bitops/const_hweight.h
Normal file
43
drivers/include/asm-generic/bitops/const_hweight.h
Normal file
@ -0,0 +1,43 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
|
||||
#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
|
||||
|
||||
/*
|
||||
* Compile time versions of __arch_hweightN()
|
||||
*/
|
||||
#define __const_hweight8(w) \
|
||||
((unsigned int) \
|
||||
((!!((w) & (1ULL << 0))) + \
|
||||
(!!((w) & (1ULL << 1))) + \
|
||||
(!!((w) & (1ULL << 2))) + \
|
||||
(!!((w) & (1ULL << 3))) + \
|
||||
(!!((w) & (1ULL << 4))) + \
|
||||
(!!((w) & (1ULL << 5))) + \
|
||||
(!!((w) & (1ULL << 6))) + \
|
||||
(!!((w) & (1ULL << 7)))))
|
||||
|
||||
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
|
||||
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
|
||||
#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
|
||||
|
||||
/*
|
||||
* Generic interface.
|
||||
*/
|
||||
#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
|
||||
#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
|
||||
#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
|
||||
#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
|
||||
|
||||
/*
|
||||
* Interface for known constant arguments
|
||||
*/
|
||||
#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
|
||||
#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
|
||||
#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
|
||||
#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
|
||||
|
||||
/*
|
||||
* Type invariant interface to the compile time constant hweight functions.
|
||||
*/
|
||||
#define HWEIGHT(w) HWEIGHT64((u64)w)
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
|
11
drivers/include/asm-generic/bitops/ext2-atomic-setbit.h
Normal file
11
drivers/include/asm-generic/bitops/ext2-atomic-setbit.h
Normal file
@ -0,0 +1,11 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
|
||||
#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_
|
||||
|
||||
/*
|
||||
* Atomic bitops based version of ext2 atomic bitops
|
||||
*/
|
||||
|
||||
#define ext2_set_bit_atomic(l, nr, addr) test_and_set_bit_le(nr, addr)
|
||||
#define ext2_clear_bit_atomic(l, nr, addr) test_and_clear_bit_le(nr, addr)
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_SETBIT_H_ */
|
62
drivers/include/asm-generic/bitops/find.h
Normal file
62
drivers/include/asm-generic/bitops/find.h
Normal file
@ -0,0 +1,62 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_FIND_H_
|
||||
#define _ASM_GENERIC_BITOPS_FIND_H_
|
||||
|
||||
#ifndef find_next_bit
|
||||
/**
|
||||
* find_next_bit - find the next set bit in a memory region
|
||||
* @addr: The address to base the search on
|
||||
* @offset: The bitnumber to start searching at
|
||||
* @size: The bitmap size in bits
|
||||
*
|
||||
* Returns the bit number for the next set bit
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
|
||||
size, unsigned long offset);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit
|
||||
/**
|
||||
* find_next_zero_bit - find the next cleared bit in a memory region
|
||||
* @addr: The address to base the search on
|
||||
* @offset: The bitnumber to start searching at
|
||||
* @size: The bitmap size in bits
|
||||
*
|
||||
* Returns the bit number of the next zero bit
|
||||
* If no bits are zero, returns @size.
|
||||
*/
|
||||
extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
|
||||
long size, unsigned long offset);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
|
||||
|
||||
/**
|
||||
* find_first_bit - find the first set bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum number of bits to search
|
||||
*
|
||||
* Returns the bit number of the first set bit.
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
extern unsigned long find_first_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
|
||||
/**
|
||||
* find_first_zero_bit - find the first cleared bit in a memory region
|
||||
* @addr: The address to start the search at
|
||||
* @size: The maximum number of bits to search
|
||||
*
|
||||
* Returns the bit number of the first cleared bit.
|
||||
* If no bits are zero, returns @size.
|
||||
*/
|
||||
extern unsigned long find_first_zero_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
|
||||
|
||||
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
|
||||
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
|
||||
|
||||
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
|
||||
|
||||
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
|
7
drivers/include/asm-generic/bitops/hweight.h
Normal file
7
drivers/include/asm-generic/bitops/hweight.h
Normal file
@ -0,0 +1,7 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
|
||||
#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
|
||||
|
||||
#include <asm-generic/bitops/arch_hweight.h>
|
||||
#include <asm-generic/bitops/const_hweight.h>
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
|
97
drivers/include/asm-generic/bitops/le.h
Normal file
97
drivers/include/asm-generic/bitops/le.h
Normal file
@ -0,0 +1,97 @@
|
||||
#ifndef _ASM_GENERIC_BITOPS_LE_H_
|
||||
#define _ASM_GENERIC_BITOPS_LE_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
|
||||
#define BITOP_LE_SWIZZLE 0
|
||||
|
||||
static inline unsigned long find_next_zero_bit_le(const void *addr,
|
||||
unsigned long size, unsigned long offset)
|
||||
{
|
||||
return find_next_zero_bit(addr, size, offset);
|
||||
}
|
||||
|
||||
static inline unsigned long find_next_bit_le(const void *addr,
|
||||
unsigned long size, unsigned long offset)
|
||||
{
|
||||
return find_next_bit(addr, size, offset);
|
||||
}
|
||||
|
||||
static inline unsigned long find_first_zero_bit_le(const void *addr,
|
||||
unsigned long size)
|
||||
{
|
||||
return find_first_zero_bit(addr, size);
|
||||
}
|
||||
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
|
||||
#ifndef find_next_zero_bit_le
|
||||
extern unsigned long find_next_zero_bit_le(const void *addr,
|
||||
unsigned long size, unsigned long offset);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_bit_le
|
||||
extern unsigned long find_next_bit_le(const void *addr,
|
||||
unsigned long size, unsigned long offset);
|
||||
#endif
|
||||
|
||||
#ifndef find_first_zero_bit_le
|
||||
#define find_first_zero_bit_le(addr, size) \
|
||||
find_next_zero_bit_le((addr), (size), 0)
|
||||
#endif
|
||||
|
||||
#else
|
||||
#error "Please fix <asm/byteorder.h>"
|
||||
#endif
|
||||
|
||||
static inline int test_bit_le(int nr, const void *addr)
|
||||
{
|
||||
return test_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline void set_bit_le(int nr, void *addr)
|
||||
{
|
||||
set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline void clear_bit_le(int nr, void *addr)
|
||||
{
|
||||
clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline void __set_bit_le(int nr, void *addr)
|
||||
{
|
||||
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline void __clear_bit_le(int nr, void *addr)
|
||||
{
|
||||
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit_le(int nr, void *addr)
|
||||
{
|
||||
return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit_le(int nr, void *addr)
|
||||
{
|
||||
return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline int __test_and_set_bit_le(int nr, void *addr)
|
||||
{
|
||||
return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
static inline int __test_and_clear_bit_le(int nr, void *addr)
|
||||
{
|
||||
return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
|
@ -1,18 +1,8 @@
|
||||
#ifndef __ASM_GENERIC_BITS_PER_LONG
|
||||
#define __ASM_GENERIC_BITS_PER_LONG
|
||||
|
||||
/*
|
||||
* There seems to be no way of detecting this automatically from user
|
||||
* space, so 64 bit architectures should override this in their
|
||||
* bitsperlong.h. In particular, an architecture that supports
|
||||
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
|
||||
* to decide it, but rather check a compiler provided macro.
|
||||
*/
|
||||
#ifndef __BITS_PER_LONG
|
||||
#define __BITS_PER_LONG 32
|
||||
#endif
|
||||
#include <uapi/asm-generic/bitsperlong.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define BITS_PER_LONG 64
|
||||
@ -28,5 +18,8 @@
|
||||
#error Inconsistent word size. Check asm/bitsperlong.h
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#ifndef BITS_PER_LONG_LONG
|
||||
#define BITS_PER_LONG_LONG 64
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_GENERIC_BITS_PER_LONG */
|
34
drivers/include/asm-generic/cacheflush.h
Normal file
34
drivers/include/asm-generic/cacheflush.h
Normal file
@ -0,0 +1,34 @@
|
||||
#ifndef __ASM_CACHEFLUSH_H
|
||||
#define __ASM_CACHEFLUSH_H
|
||||
|
||||
/* Keep includes the same across arches. */
|
||||
#include <linux/mm.h>
|
||||
|
||||
/*
|
||||
* The cache doesn't need to be flushed when TLB entries change when
|
||||
* the cache is mapped to physical memory, not virtual memory
|
||||
*/
|
||||
#define flush_cache_all() do { } while (0)
|
||||
#define flush_cache_mm(mm) do { } while (0)
|
||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_range(start, end) do { } while (0)
|
||||
#define flush_icache_page(vma,pg) do { } while (0)
|
||||
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
||||
#define flush_cache_vmap(start, end) do { } while (0)
|
||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||
|
||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||
do { \
|
||||
memcpy(dst, src, len); \
|
||||
flush_icache_user_range(vma, page, vaddr, len); \
|
||||
} while (0)
|
||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||
memcpy(dst, src, len)
|
||||
|
||||
#endif /* __ASM_CACHEFLUSH_H */
|
44
drivers/include/asm-generic/delay.h
Normal file
44
drivers/include/asm-generic/delay.h
Normal file
@ -0,0 +1,44 @@
|
||||
#ifndef __ASM_GENERIC_DELAY_H
|
||||
#define __ASM_GENERIC_DELAY_H
|
||||
|
||||
/* Undefined functions to get compile-time errors */
|
||||
extern void __bad_udelay(void);
|
||||
extern void __bad_ndelay(void);
|
||||
|
||||
extern void __udelay(unsigned long usecs);
|
||||
extern void __ndelay(unsigned long nsecs);
|
||||
extern void __const_udelay(unsigned long xloops);
|
||||
extern void __delay(unsigned long loops);
|
||||
|
||||
/*
|
||||
* The weird n/20000 thing suppresses a "comparison is always false due to
|
||||
* limited range of data type" warning with non-const 8-bit arguments.
|
||||
*/
|
||||
|
||||
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
|
||||
#define udelay(n) \
|
||||
({ \
|
||||
if (__builtin_constant_p(n)) { \
|
||||
if ((n) / 20000 >= 1) \
|
||||
__bad_udelay(); \
|
||||
else \
|
||||
__const_udelay((n) * 0x10c7ul); \
|
||||
} else { \
|
||||
__udelay(n); \
|
||||
} \
|
||||
})
|
||||
|
||||
/* 0x5 is 2**32 / 1000000000 (rounded up) */
|
||||
#define ndelay(n) \
|
||||
({ \
|
||||
if (__builtin_constant_p(n)) { \
|
||||
if ((n) / 20000 >= 1) \
|
||||
__bad_ndelay(); \
|
||||
else \
|
||||
__const_udelay((n) * 5ul); \
|
||||
} else { \
|
||||
__ndelay(n); \
|
||||
} \
|
||||
})
|
||||
|
||||
#endif /* __ASM_GENERIC_DELAY_H */
|
61
drivers/include/asm-generic/getorder.h
Normal file
61
drivers/include/asm-generic/getorder.h
Normal file
@ -0,0 +1,61 @@
|
||||
#ifndef __ASM_GENERIC_GETORDER_H
|
||||
#define __ASM_GENERIC_GETORDER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
/*
|
||||
* Runtime evaluation of get_order()
|
||||
*/
|
||||
static inline __attribute_const__
|
||||
int __get_order(unsigned long size)
|
||||
{
|
||||
int order;
|
||||
|
||||
size--;
|
||||
size >>= PAGE_SHIFT;
|
||||
#if BITS_PER_LONG == 32
|
||||
order = fls(size);
|
||||
#else
|
||||
order = fls64(size);
|
||||
#endif
|
||||
return order;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_order - Determine the allocation order of a memory size
|
||||
* @size: The size for which to get the order
|
||||
*
|
||||
* Determine the allocation order of a particular sized block of memory. This
|
||||
* is on a logarithmic scale, where:
|
||||
*
|
||||
* 0 -> 2^0 * PAGE_SIZE and below
|
||||
* 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
|
||||
* 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
|
||||
* 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
|
||||
* 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
|
||||
* ...
|
||||
*
|
||||
* The order returned is used to find the smallest allocation granule required
|
||||
* to hold an object of the specified size.
|
||||
*
|
||||
* The result is undefined if the size is 0.
|
||||
*
|
||||
* This function may be used to initialise variables with compile time
|
||||
* evaluations of constants.
|
||||
*/
|
||||
#define get_order(n) \
|
||||
( \
|
||||
__builtin_constant_p(n) ? ( \
|
||||
((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
|
||||
(((n) < (1UL << PAGE_SHIFT)) ? 0 : \
|
||||
ilog2((n) - 1) - PAGE_SHIFT + 1) \
|
||||
) : \
|
||||
__get_order(n) \
|
||||
)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_GENERIC_GETORDER_H */
|
@ -4,38 +4,11 @@
|
||||
* Integer declarations for architectures which use "long long"
|
||||
* for 64-bit types.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_GENERIC_INT_LL64_H
|
||||
#define _ASM_GENERIC_INT_LL64_H
|
||||
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <uapi/asm-generic/int-ll64.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
|
||||
* header files exported to user space
|
||||
*/
|
||||
|
||||
typedef __signed__ char __s8;
|
||||
typedef unsigned char __u8;
|
||||
|
||||
typedef __signed__ short __s16;
|
||||
typedef unsigned short __u16;
|
||||
|
||||
typedef __signed__ int __s32;
|
||||
typedef unsigned int __u32;
|
||||
|
||||
#ifdef __GNUC__
|
||||
__extension__ typedef __signed__ long long __s64;
|
||||
__extension__ typedef unsigned long long __u64;
|
||||
#else
|
||||
typedef __signed__ long long __s64;
|
||||
typedef unsigned long long __u64;
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -73,6 +46,4 @@ typedef unsigned long long u64;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_GENERIC_INT_LL64_H */
|
77
drivers/include/asm-generic/memory_model.h
Normal file
77
drivers/include/asm-generic/memory_model.h
Normal file
@ -0,0 +1,77 @@
|
||||
#ifndef __ASM_MEMORY_MODEL_H
|
||||
#define __ASM_MEMORY_MODEL_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#if defined(CONFIG_FLATMEM)
|
||||
|
||||
#ifndef ARCH_PFN_OFFSET
|
||||
#define ARCH_PFN_OFFSET (0UL)
|
||||
#endif
|
||||
|
||||
#elif defined(CONFIG_DISCONTIGMEM)
|
||||
|
||||
#ifndef arch_pfn_to_nid
|
||||
#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
|
||||
#endif
|
||||
|
||||
#ifndef arch_local_page_offset
|
||||
#define arch_local_page_offset(pfn, nid) \
|
||||
((pfn) - NODE_DATA(nid)->node_start_pfn)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_DISCONTIGMEM */
|
||||
|
||||
/*
|
||||
* supports 3 memory models.
|
||||
*/
|
||||
#if defined(CONFIG_FLATMEM)
|
||||
|
||||
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
|
||||
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
|
||||
ARCH_PFN_OFFSET)
|
||||
#elif defined(CONFIG_DISCONTIGMEM)
|
||||
|
||||
#define __pfn_to_page(pfn) \
|
||||
({ unsigned long __pfn = (pfn); \
|
||||
unsigned long __nid = arch_pfn_to_nid(__pfn); \
|
||||
NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
|
||||
})
|
||||
|
||||
#define __page_to_pfn(pg) \
|
||||
({ const struct page *__pg = (pg); \
|
||||
struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
|
||||
(unsigned long)(__pg - __pgdat->node_mem_map) + \
|
||||
__pgdat->node_start_pfn; \
|
||||
})
|
||||
|
||||
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
|
||||
/* memmap is virtually contiguous. */
|
||||
#define __pfn_to_page(pfn) (vmemmap + (pfn))
|
||||
#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
|
||||
|
||||
#elif defined(CONFIG_SPARSEMEM)
|
||||
/*
|
||||
* Note: section's mem_map is encoded to reflect its start_pfn.
|
||||
* section[i].section_mem_map == mem_map's address - start_pfn;
|
||||
*/
|
||||
#define __page_to_pfn(pg) \
|
||||
({ const struct page *__pg = (pg); \
|
||||
int __sec = page_to_section(__pg); \
|
||||
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
|
||||
})
|
||||
|
||||
#define __pfn_to_page(pfn) \
|
||||
({ unsigned long __pfn = (pfn); \
|
||||
struct mem_section *__sec = __pfn_to_section(__pfn); \
|
||||
__section_mem_map_addr(__sec) + __pfn; \
|
||||
})
|
||||
#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
|
||||
|
||||
#define page_to_pfn __page_to_pfn
|
||||
#define pfn_to_page __pfn_to_page
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
420
drivers/include/asm-generic/percpu.h
Normal file
420
drivers/include/asm-generic/percpu.h
Normal file
@ -0,0 +1,420 @@
|
||||
#ifndef _ASM_GENERIC_PERCPU_H_
|
||||
#define _ASM_GENERIC_PERCPU_H_
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* per_cpu_offset() is the offset that has to be added to a
|
||||
* percpu variable to get to the instance for a certain processor.
|
||||
*
|
||||
* Most arches use the __per_cpu_offset array for those offsets but
|
||||
* some arches have their own ways of determining the offset (x86_64, s390).
|
||||
*/
|
||||
#ifndef __per_cpu_offset
|
||||
extern unsigned long __per_cpu_offset[NR_CPUS];
|
||||
|
||||
#define per_cpu_offset(x) (__per_cpu_offset[x])
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Determine the offset for the currently active processor.
|
||||
* An arch may define __my_cpu_offset to provide a more effective
|
||||
* means of obtaining the offset to the per cpu variables of the
|
||||
* current processor.
|
||||
*/
|
||||
#ifndef __my_cpu_offset
|
||||
#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
#define my_cpu_offset per_cpu_offset(smp_processor_id())
|
||||
#else
|
||||
#define my_cpu_offset __my_cpu_offset
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Arch may define arch_raw_cpu_ptr() to provide more efficient address
|
||||
* translations for raw_cpu_ptr().
|
||||
*/
|
||||
#ifndef arch_raw_cpu_ptr
|
||||
#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
||||
extern void setup_per_cpu_areas(void);
|
||||
#endif
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
#ifndef PER_CPU_BASE_SECTION
|
||||
#ifdef CONFIG_SMP
|
||||
#define PER_CPU_BASE_SECTION ".data..percpu"
|
||||
#else
|
||||
#define PER_CPU_BASE_SECTION ".data"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef PER_CPU_ATTRIBUTES
|
||||
#define PER_CPU_ATTRIBUTES
|
||||
#endif
|
||||
|
||||
#ifndef PER_CPU_DEF_ATTRIBUTES
|
||||
#define PER_CPU_DEF_ATTRIBUTES
|
||||
#endif
|
||||
|
||||
#define raw_cpu_generic_to_op(pcp, val, op) \
|
||||
do { \
|
||||
*raw_cpu_ptr(&(pcp)) op val; \
|
||||
} while (0)
|
||||
|
||||
#define raw_cpu_generic_add_return(pcp, val) \
|
||||
({ \
|
||||
raw_cpu_add(pcp, val); \
|
||||
raw_cpu_read(pcp); \
|
||||
})
|
||||
|
||||
#define raw_cpu_generic_xchg(pcp, nval) \
|
||||
({ \
|
||||
typeof(pcp) __ret; \
|
||||
__ret = raw_cpu_read(pcp); \
|
||||
raw_cpu_write(pcp, nval); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
|
||||
({ \
|
||||
typeof(pcp) __ret; \
|
||||
__ret = raw_cpu_read(pcp); \
|
||||
if (__ret == (oval)) \
|
||||
raw_cpu_write(pcp, nval); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (raw_cpu_read(pcp1) == (oval1) && \
|
||||
raw_cpu_read(pcp2) == (oval2)) { \
|
||||
raw_cpu_write(pcp1, nval1); \
|
||||
raw_cpu_write(pcp2, nval2); \
|
||||
__ret = 1; \
|
||||
} \
|
||||
(__ret); \
|
||||
})
|
||||
|
||||
#define this_cpu_generic_read(pcp) \
|
||||
({ \
|
||||
typeof(pcp) __ret; \
|
||||
preempt_disable(); \
|
||||
__ret = *this_cpu_ptr(&(pcp)); \
|
||||
preempt_enable(); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define this_cpu_generic_to_op(pcp, val, op) \
|
||||
do { \
|
||||
unsigned long __flags; \
|
||||
raw_local_irq_save(__flags); \
|
||||
*raw_cpu_ptr(&(pcp)) op val; \
|
||||
raw_local_irq_restore(__flags); \
|
||||
} while (0)
|
||||
|
||||
#define this_cpu_generic_add_return(pcp, val) \
|
||||
({ \
|
||||
typeof(pcp) __ret; \
|
||||
unsigned long __flags; \
|
||||
raw_local_irq_save(__flags); \
|
||||
raw_cpu_add(pcp, val); \
|
||||
__ret = raw_cpu_read(pcp); \
|
||||
raw_local_irq_restore(__flags); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define this_cpu_generic_xchg(pcp, nval) \
|
||||
({ \
|
||||
typeof(pcp) __ret; \
|
||||
unsigned long __flags; \
|
||||
raw_local_irq_save(__flags); \
|
||||
__ret = raw_cpu_read(pcp); \
|
||||
raw_cpu_write(pcp, nval); \
|
||||
raw_local_irq_restore(__flags); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
|
||||
({ \
|
||||
typeof(pcp) __ret; \
|
||||
unsigned long __flags; \
|
||||
raw_local_irq_save(__flags); \
|
||||
__ret = raw_cpu_read(pcp); \
|
||||
if (__ret == (oval)) \
|
||||
raw_cpu_write(pcp, nval); \
|
||||
raw_local_irq_restore(__flags); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
({ \
|
||||
int __ret; \
|
||||
unsigned long __flags; \
|
||||
raw_local_irq_save(__flags); \
|
||||
__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
|
||||
oval1, oval2, nval1, nval2); \
|
||||
raw_local_irq_restore(__flags); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#ifndef raw_cpu_read_1
|
||||
#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
|
||||
#endif
|
||||
#ifndef raw_cpu_read_2
|
||||
#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
|
||||
#endif
|
||||
#ifndef raw_cpu_read_4
|
||||
#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
|
||||
#endif
|
||||
#ifndef raw_cpu_read_8
|
||||
#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_write_1
|
||||
#define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
#ifndef raw_cpu_write_2
|
||||
#define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
#ifndef raw_cpu_write_4
|
||||
#define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
#ifndef raw_cpu_write_8
|
||||
#define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_add_1
|
||||
#define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
#ifndef raw_cpu_add_2
|
||||
#define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
#ifndef raw_cpu_add_4
|
||||
#define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
#ifndef raw_cpu_add_8
|
||||
#define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_and_1
|
||||
#define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
#ifndef raw_cpu_and_2
|
||||
#define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
#ifndef raw_cpu_and_4
|
||||
#define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
#ifndef raw_cpu_and_8
|
||||
#define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_or_1
|
||||
#define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
#ifndef raw_cpu_or_2
|
||||
#define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
#ifndef raw_cpu_or_4
|
||||
#define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
#ifndef raw_cpu_or_8
|
||||
#define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_add_return_1
|
||||
#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
#ifndef raw_cpu_add_return_2
|
||||
#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
#ifndef raw_cpu_add_return_4
|
||||
#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
#ifndef raw_cpu_add_return_8
|
||||
#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_xchg_1
|
||||
#define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
#ifndef raw_cpu_xchg_2
|
||||
#define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
#ifndef raw_cpu_xchg_4
|
||||
#define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
#ifndef raw_cpu_xchg_8
|
||||
#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_cmpxchg_1
|
||||
#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
|
||||
raw_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
#ifndef raw_cpu_cmpxchg_2
|
||||
#define raw_cpu_cmpxchg_2(pcp, oval, nval) \
|
||||
raw_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
#ifndef raw_cpu_cmpxchg_4
|
||||
#define raw_cpu_cmpxchg_4(pcp, oval, nval) \
|
||||
raw_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
#ifndef raw_cpu_cmpxchg_8
|
||||
#define raw_cpu_cmpxchg_8(pcp, oval, nval) \
|
||||
raw_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
|
||||
#ifndef raw_cpu_cmpxchg_double_1
|
||||
#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
#ifndef raw_cpu_cmpxchg_double_2
|
||||
#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
#ifndef raw_cpu_cmpxchg_double_4
|
||||
#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
#ifndef raw_cpu_cmpxchg_double_8
|
||||
#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_read_1
|
||||
#define this_cpu_read_1(pcp) this_cpu_generic_read(pcp)
|
||||
#endif
|
||||
#ifndef this_cpu_read_2
|
||||
#define this_cpu_read_2(pcp) this_cpu_generic_read(pcp)
|
||||
#endif
|
||||
#ifndef this_cpu_read_4
|
||||
#define this_cpu_read_4(pcp) this_cpu_generic_read(pcp)
|
||||
#endif
|
||||
#ifndef this_cpu_read_8
|
||||
#define this_cpu_read_8(pcp) this_cpu_generic_read(pcp)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_write_1
|
||||
#define this_cpu_write_1(pcp, val) this_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
#ifndef this_cpu_write_2
|
||||
#define this_cpu_write_2(pcp, val) this_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
#ifndef this_cpu_write_4
|
||||
#define this_cpu_write_4(pcp, val) this_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
#ifndef this_cpu_write_8
|
||||
#define this_cpu_write_8(pcp, val) this_cpu_generic_to_op(pcp, val, =)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_add_1
|
||||
#define this_cpu_add_1(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
#ifndef this_cpu_add_2
|
||||
#define this_cpu_add_2(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
#ifndef this_cpu_add_4
|
||||
#define this_cpu_add_4(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
#ifndef this_cpu_add_8
|
||||
#define this_cpu_add_8(pcp, val) this_cpu_generic_to_op(pcp, val, +=)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_and_1
|
||||
#define this_cpu_and_1(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
#ifndef this_cpu_and_2
|
||||
#define this_cpu_and_2(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
#ifndef this_cpu_and_4
|
||||
#define this_cpu_and_4(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
#ifndef this_cpu_and_8
|
||||
#define this_cpu_and_8(pcp, val) this_cpu_generic_to_op(pcp, val, &=)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_or_1
|
||||
#define this_cpu_or_1(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
#ifndef this_cpu_or_2
|
||||
#define this_cpu_or_2(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
#ifndef this_cpu_or_4
|
||||
#define this_cpu_or_4(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
#ifndef this_cpu_or_8
|
||||
#define this_cpu_or_8(pcp, val) this_cpu_generic_to_op(pcp, val, |=)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_add_return_1
|
||||
#define this_cpu_add_return_1(pcp, val) this_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
#ifndef this_cpu_add_return_2
|
||||
#define this_cpu_add_return_2(pcp, val) this_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
#ifndef this_cpu_add_return_4
|
||||
#define this_cpu_add_return_4(pcp, val) this_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
#ifndef this_cpu_add_return_8
|
||||
#define this_cpu_add_return_8(pcp, val) this_cpu_generic_add_return(pcp, val)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_xchg_1
|
||||
#define this_cpu_xchg_1(pcp, nval) this_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
#ifndef this_cpu_xchg_2
|
||||
#define this_cpu_xchg_2(pcp, nval) this_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
#ifndef this_cpu_xchg_4
|
||||
#define this_cpu_xchg_4(pcp, nval) this_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
#ifndef this_cpu_xchg_8
|
||||
#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_cmpxchg_1
|
||||
#define this_cpu_cmpxchg_1(pcp, oval, nval) \
|
||||
this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
#ifndef this_cpu_cmpxchg_2
|
||||
#define this_cpu_cmpxchg_2(pcp, oval, nval) \
|
||||
this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
#ifndef this_cpu_cmpxchg_4
|
||||
#define this_cpu_cmpxchg_4(pcp, oval, nval) \
|
||||
this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
#ifndef this_cpu_cmpxchg_8
|
||||
#define this_cpu_cmpxchg_8(pcp, oval, nval) \
|
||||
this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
#endif
|
||||
|
||||
#ifndef this_cpu_cmpxchg_double_1
|
||||
#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
#ifndef this_cpu_cmpxchg_double_2
|
||||
#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
#ifndef this_cpu_cmpxchg_double_4
|
||||
#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
#ifndef this_cpu_cmpxchg_double_8
|
||||
#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_GENERIC_PERCPU_H_ */
|
69
drivers/include/asm-generic/pgtable-nopmd.h
Normal file
69
drivers/include/asm-generic/pgtable-nopmd.h
Normal file
@ -0,0 +1,69 @@
|
||||
#ifndef _PGTABLE_NOPMD_H
|
||||
#define _PGTABLE_NOPMD_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
#define __PAGETABLE_PMD_FOLDED
|
||||
|
||||
/*
|
||||
* Having the pmd type consist of a pud gets the size right, and allows
|
||||
* us to conceptually access the pud entry that this pmd is folded into
|
||||
* without casting.
|
||||
*/
|
||||
typedef struct { pud_t pud; } pmd_t;
|
||||
|
||||
#define PMD_SHIFT PUD_SHIFT
|
||||
#define PTRS_PER_PMD 1
|
||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
|
||||
/*
|
||||
* The "pud_xxx()" functions here are trivial for a folded two-level
|
||||
* setup: the pmd is never bad, and a pmd always exists (as it's folded
|
||||
* into the pud entry)
|
||||
*/
|
||||
static inline int pud_none(pud_t pud) { return 0; }
|
||||
static inline int pud_bad(pud_t pud) { return 0; }
|
||||
static inline int pud_present(pud_t pud) { return 1; }
|
||||
static inline void pud_clear(pud_t *pud) { }
|
||||
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
|
||||
|
||||
#define pud_populate(mm, pmd, pte) do { } while (0)
|
||||
|
||||
/*
|
||||
* (pmds are folded into puds so this doesn't get actually called,
|
||||
* but the define is needed for a generic inline function.)
|
||||
*/
|
||||
#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
|
||||
|
||||
static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
|
||||
{
|
||||
return (pmd_t *)pud;
|
||||
}
|
||||
|
||||
#define pmd_val(x) (pud_val((x).pud))
|
||||
#define __pmd(x) ((pmd_t) { __pud(x) } )
|
||||
|
||||
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
|
||||
#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
|
||||
|
||||
/*
|
||||
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
||||
* inside the pud, so has no extra memory associated with it.
|
||||
*/
|
||||
#define pmd_alloc_one(mm, address) NULL
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
}
|
||||
#define __pmd_free_tlb(tlb, x, a) do { } while (0)
|
||||
|
||||
#undef pmd_addr_end
|
||||
#define pmd_addr_end(addr, end) (end)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _PGTABLE_NOPMD_H */
|
61
drivers/include/asm-generic/pgtable-nopud.h
Normal file
61
drivers/include/asm-generic/pgtable-nopud.h
Normal file
@ -0,0 +1,61 @@
|
||||
#ifndef _PGTABLE_NOPUD_H
|
||||
#define _PGTABLE_NOPUD_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define __PAGETABLE_PUD_FOLDED
|
||||
|
||||
/*
|
||||
* Having the pud type consist of a pgd gets the size right, and allows
|
||||
* us to conceptually access the pgd entry that this pud is folded into
|
||||
* without casting.
|
||||
*/
|
||||
typedef struct { pgd_t pgd; } pud_t;
|
||||
|
||||
#define PUD_SHIFT PGDIR_SHIFT
|
||||
#define PTRS_PER_PUD 1
|
||||
#define PUD_SIZE (1UL << PUD_SHIFT)
|
||||
#define PUD_MASK (~(PUD_SIZE-1))
|
||||
|
||||
/*
|
||||
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
||||
* setup: the pud is never bad, and a pud always exists (as it's folded
|
||||
* into the pgd entry)
|
||||
*/
|
||||
static inline int pgd_none(pgd_t pgd) { return 0; }
|
||||
static inline int pgd_bad(pgd_t pgd) { return 0; }
|
||||
static inline int pgd_present(pgd_t pgd) { return 1; }
|
||||
static inline void pgd_clear(pgd_t *pgd) { }
|
||||
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
|
||||
|
||||
#define pgd_populate(mm, pgd, pud) do { } while (0)
|
||||
/*
|
||||
* (puds are folded into pgds so this doesn't get actually called,
|
||||
* but the define is needed for a generic inline function.)
|
||||
*/
|
||||
#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
|
||||
|
||||
static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
|
||||
{
|
||||
return (pud_t *)pgd;
|
||||
}
|
||||
|
||||
#define pud_val(x) (pgd_val((x).pgd))
|
||||
#define __pud(x) ((pud_t) { __pgd(x) } )
|
||||
|
||||
#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
|
||||
#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
|
||||
|
||||
/*
|
||||
* allocating and freeing a pud is trivial: the 1-entry pud is
|
||||
* inside the pgd, so has no extra memory associated with it.
|
||||
*/
|
||||
#define pud_alloc_one(mm, address) NULL
|
||||
#define pud_free(mm, x) do { } while (0)
|
||||
#define __pud_free_tlb(tlb, x, a) do { } while (0)
|
||||
|
||||
#undef pud_addr_end
|
||||
#define pud_addr_end(addr, end) (end)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _PGTABLE_NOPUD_H */
|
74
drivers/include/asm-generic/ptrace.h
Normal file
74
drivers/include/asm-generic/ptrace.h
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Common low level (register) ptrace helpers
|
||||
*
|
||||
* Copyright 2004-2011 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_GENERIC_PTRACE_H__
|
||||
#define __ASM_GENERIC_PTRACE_H__
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Helpers for working with the instruction pointer */
|
||||
#ifndef GET_IP
|
||||
#define GET_IP(regs) ((regs)->pc)
|
||||
#endif
|
||||
#ifndef SET_IP
|
||||
#define SET_IP(regs, val) (GET_IP(regs) = (val))
|
||||
#endif
|
||||
|
||||
static inline unsigned long instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return GET_IP(regs);
|
||||
}
|
||||
static inline void instruction_pointer_set(struct pt_regs *regs,
|
||||
unsigned long val)
|
||||
{
|
||||
SET_IP(regs, val);
|
||||
}
|
||||
|
||||
#ifndef profile_pc
|
||||
#define profile_pc(regs) instruction_pointer(regs)
|
||||
#endif
|
||||
|
||||
/* Helpers for working with the user stack pointer */
|
||||
#ifndef GET_USP
|
||||
#define GET_USP(regs) ((regs)->usp)
|
||||
#endif
|
||||
#ifndef SET_USP
|
||||
#define SET_USP(regs, val) (GET_USP(regs) = (val))
|
||||
#endif
|
||||
|
||||
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return GET_USP(regs);
|
||||
}
|
||||
static inline void user_stack_pointer_set(struct pt_regs *regs,
|
||||
unsigned long val)
|
||||
{
|
||||
SET_USP(regs, val);
|
||||
}
|
||||
|
||||
/* Helpers for working with the frame pointer */
|
||||
#ifndef GET_FP
|
||||
#define GET_FP(regs) ((regs)->fp)
|
||||
#endif
|
||||
#ifndef SET_FP
|
||||
#define SET_FP(regs, val) (GET_FP(regs) = (val))
|
||||
#endif
|
||||
|
||||
static inline unsigned long frame_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return GET_FP(regs);
|
||||
}
|
||||
static inline void frame_pointer_set(struct pt_regs *regs,
|
||||
unsigned long val)
|
||||
{
|
||||
SET_FP(regs, val);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
31
drivers/include/asm/agp.h
Normal file
31
drivers/include/asm/agp.h
Normal file
@ -0,0 +1,31 @@
|
||||
#ifndef _ASM_X86_AGP_H
|
||||
#define _ASM_X86_AGP_H
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* Functions to keep the agpgart mappings coherent with the MMU. The
|
||||
* GART gives the CPU a physical alias of pages in memory. The alias
|
||||
* region is mapped uncacheable. Make sure there are no conflicting
|
||||
* mappings with different cachability attributes for the same
|
||||
* page. This avoids data corruption on some CPUs.
|
||||
*/
|
||||
|
||||
#define map_page_into_agp(page) set_pages_uc(page, 1)
|
||||
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
|
||||
|
||||
/*
|
||||
* Could use CLFLUSH here if the cpu supports it. But then it would
|
||||
* need to be called for each cacheline of the whole page so it may
|
||||
* not be worth it. Would need a page for it.
|
||||
*/
|
||||
#define flush_agp_cache() wbinvd()
|
||||
|
||||
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
|
||||
#define alloc_gatt_pages(order) \
|
||||
((char *)__get_free_pages(GFP_KERNEL, (order)))
|
||||
#define free_gatt_pages(table, order) \
|
||||
free_pages((unsigned long)(table), (order))
|
||||
|
||||
#endif /* _ASM_X86_AGP_H */
|
@ -28,30 +28,26 @@
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define LOCK_PREFIX \
|
||||
".section .smp_locks,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR "661f\n" /* address */ \
|
||||
".previous\n" \
|
||||
"661:\n\tlock; "
|
||||
#define LOCK_PREFIX_HERE \
|
||||
".pushsection .smp_locks,\"a\"\n" \
|
||||
".balign 4\n" \
|
||||
".long 671f - .\n" /* offset */ \
|
||||
".popsection\n" \
|
||||
"671:"
|
||||
|
||||
#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
|
||||
|
||||
#else /* ! CONFIG_SMP */
|
||||
#define LOCK_PREFIX_HERE ""
|
||||
#define LOCK_PREFIX ""
|
||||
#endif
|
||||
|
||||
/* This must be included *after* the definition of LOCK_PREFIX */
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
struct alt_instr {
|
||||
u8 *instr; /* original instruction */
|
||||
u8 *replacement;
|
||||
u8 cpuid; /* cpuid bit set for replacement */
|
||||
s32 instr_offset; /* original instruction */
|
||||
s32 repl_offset; /* offset to replacement instruction */
|
||||
u16 cpuid; /* cpuid bit set for replacement */
|
||||
u8 instrlen; /* length of original instruction */
|
||||
u8 replacementlen; /* length of new instruction, <= instrlen */
|
||||
u8 pad1;
|
||||
#ifdef CONFIG_X86_64
|
||||
u32 pad2;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern void alternative_instructions(void);
|
||||
@ -64,31 +60,75 @@ extern void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *locks, void *locks_end,
|
||||
void *text, void *text_end);
|
||||
extern void alternatives_smp_module_del(struct module *mod);
|
||||
extern void alternatives_smp_switch(int smp);
|
||||
extern void alternatives_enable_smp(void);
|
||||
extern int alternatives_text_reserved(void *start, void *end);
|
||||
extern bool skip_smp_alternatives;
|
||||
#else
|
||||
static inline void alternatives_smp_module_add(struct module *mod, char *name,
|
||||
void *locks, void *locks_end,
|
||||
void *text, void *text_end) {}
|
||||
static inline void alternatives_smp_module_del(struct module *mod) {}
|
||||
static inline void alternatives_smp_switch(int smp) {}
|
||||
static inline void alternatives_enable_smp(void) {}
|
||||
static inline int alternatives_text_reserved(void *start, void *end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
|
||||
|
||||
#define b_replacement(number) "663"#number
|
||||
#define e_replacement(number) "664"#number
|
||||
|
||||
#define alt_slen "662b-661b"
|
||||
#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
|
||||
|
||||
#define ALTINSTR_ENTRY(feature, number) \
|
||||
" .long 661b - .\n" /* label */ \
|
||||
" .long " b_replacement(number)"f - .\n" /* new instruction */ \
|
||||
" .word " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte " alt_slen "\n" /* source len */ \
|
||||
" .byte " alt_rlen(number) "\n" /* replacement len */
|
||||
|
||||
#define DISCARD_ENTRY(number) /* rlen <= slen */ \
|
||||
" .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
|
||||
|
||||
#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
|
||||
b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
|
||||
|
||||
/* alternative assembly primitive: */
|
||||
#define ALTERNATIVE(oldinstr, newinstr, feature) \
|
||||
\
|
||||
"661:\n\t" oldinstr "\n662:\n" \
|
||||
".section .altinstructions,\"a\"\n" \
|
||||
_ASM_ALIGN "\n" \
|
||||
_ASM_PTR "661b\n" /* label */ \
|
||||
_ASM_PTR "663f\n" /* new instruction */ \
|
||||
" .byte " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* sourcelen */ \
|
||||
" .byte 664f-663f\n" /* replacementlen */ \
|
||||
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
|
||||
".previous\n" \
|
||||
".section .altinstr_replacement, \"ax\"\n" \
|
||||
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
|
||||
".previous"
|
||||
OLDINSTR(oldinstr) \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature, 1) \
|
||||
".popsection\n" \
|
||||
".pushsection .discard,\"aw\",@progbits\n" \
|
||||
DISCARD_ENTRY(1) \
|
||||
".popsection\n" \
|
||||
".pushsection .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
|
||||
".popsection"
|
||||
|
||||
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
|
||||
OLDINSTR(oldinstr) \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature1, 1) \
|
||||
ALTINSTR_ENTRY(feature2, 2) \
|
||||
".popsection\n" \
|
||||
".pushsection .discard,\"aw\",@progbits\n" \
|
||||
DISCARD_ENTRY(1) \
|
||||
DISCARD_ENTRY(2) \
|
||||
".popsection\n" \
|
||||
".pushsection .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
|
||||
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
|
||||
".popsection"
|
||||
|
||||
/*
|
||||
* This must be included *after* the definition of ALTERNATIVE due to
|
||||
* <asm/arch_hweight.h>
|
||||
*/
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
/*
|
||||
* Alternative instructions for different CPU types or capabilities.
|
||||
@ -120,16 +160,54 @@ static inline void alternatives_smp_switch(int smp) {}
|
||||
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
|
||||
: : "i" (0), ## input)
|
||||
|
||||
/*
|
||||
* This is similar to alternative_input. But it has two features and
|
||||
* respective instructions.
|
||||
*
|
||||
* If CPU has feature2, newinstr2 is used.
|
||||
* Otherwise, if CPU has feature1, newinstr1 is used.
|
||||
* Otherwise, oldinstr is used.
|
||||
*/
|
||||
#define alternative_input_2(oldinstr, newinstr1, feature1, newinstr2, \
|
||||
feature2, input...) \
|
||||
asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, \
|
||||
newinstr2, feature2) \
|
||||
: : "i" (0), ## input)
|
||||
|
||||
/* Like alternative_input, but with a single output argument */
|
||||
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
|
||||
asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) \
|
||||
: output : "i" (0), ## input)
|
||||
|
||||
/* Like alternative_io, but for replacing a direct call with another one. */
|
||||
#define alternative_call(oldfunc, newfunc, feature, output, input...) \
|
||||
asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
|
||||
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
|
||||
|
||||
/*
|
||||
* Like alternative_call, but there are two features and respective functions.
|
||||
* If CPU has feature2, function2 is used.
|
||||
* Otherwise, if CPU has feature1, function1 is used.
|
||||
* Otherwise, old function is used.
|
||||
*/
|
||||
#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
|
||||
output, input...) \
|
||||
asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
|
||||
"call %P[new2]", feature2) \
|
||||
: output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
|
||||
[new2] "i" (newfunc2), ## input)
|
||||
|
||||
/*
|
||||
* use this macro(s) if you need more than one output parameter
|
||||
* in alternative_io
|
||||
*/
|
||||
#define ASM_OUTPUT2(a) a
|
||||
#define ASM_OUTPUT2(a...) a
|
||||
|
||||
/*
|
||||
* use this macro if you need clobbers but no inputs in
|
||||
* alternative_{input,io,call}()
|
||||
*/
|
||||
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
|
||||
|
||||
struct paravirt_patch_site;
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
@ -143,6 +221,8 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
|
||||
#define __parainstructions_end NULL
|
||||
#endif
|
||||
|
||||
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
|
||||
|
||||
/*
|
||||
* Clear and restore the kernel write-protection flag on the local CPU.
|
||||
* Allows the kernel to edit read-only pages.
|
||||
@ -154,11 +234,10 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
|
||||
* no thread can be preempted in the instructions being modified (no iret to an
|
||||
* invalid instruction possible) or if the instructions are changed from a
|
||||
* consistent state to another consistent state atomically.
|
||||
* More care must be taken when modifying code in the SMP case because of
|
||||
* Intel's errata.
|
||||
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
|
||||
* inconsistent instruction while you patch.
|
||||
*/
|
||||
extern void *text_poke(void *addr, const void *opcode, size_t len);
|
||||
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
|
||||
|
||||
#endif /* _ASM_X86_ALTERNATIVE_H */
|
61
drivers/include/asm/arch_hweight.h
Normal file
61
drivers/include/asm/arch_hweight.h
Normal file
@ -0,0 +1,61 @@
|
||||
#ifndef _ASM_X86_HWEIGHT_H
|
||||
#define _ASM_X86_HWEIGHT_H
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* popcnt %edi, %eax -- redundant REX prefix for alignment */
|
||||
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
|
||||
/* popcnt %rdi, %rax */
|
||||
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
|
||||
#define REG_IN "D"
|
||||
#define REG_OUT "a"
|
||||
#else
|
||||
/* popcnt %eax, %eax */
|
||||
#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
|
||||
#define REG_IN "a"
|
||||
#define REG_OUT "a"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __sw_hweightXX are called from within the alternatives below
|
||||
* and callee-clobbered registers need to be taken care of. See
|
||||
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
|
||||
* compiler switches.
|
||||
*/
|
||||
static inline unsigned int __arch_hweight32(unsigned int w)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
asm ("call __sw_hweight32"
|
||||
: "="REG_OUT (res)
|
||||
: REG_IN (w));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight16(unsigned int w)
|
||||
{
|
||||
return __arch_hweight32(w & 0xffff);
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight8(unsigned int w)
|
||||
{
|
||||
return __arch_hweight32(w & 0xff);
|
||||
}
|
||||
|
||||
static inline unsigned long __arch_hweight64(__u64 w)
|
||||
{
|
||||
unsigned long res = 0;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
return __arch_hweight32((u32)w) +
|
||||
__arch_hweight32((u32)(w >> 32));
|
||||
#else
|
||||
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
|
||||
: "="REG_OUT (res)
|
||||
: REG_IN (w));
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif
|
83
drivers/include/asm/asm.h
Normal file
83
drivers/include/asm/asm.h
Normal file
@ -0,0 +1,83 @@
|
||||
#ifndef _ASM_X86_ASM_H
|
||||
#define _ASM_X86_ASM_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
# define __ASM_FORM(x) x
|
||||
# define __ASM_FORM_RAW(x) x
|
||||
# define __ASM_FORM_COMMA(x) x,
|
||||
#else
|
||||
# define __ASM_FORM(x) " " #x " "
|
||||
# define __ASM_FORM_RAW(x) #x
|
||||
# define __ASM_FORM_COMMA(x) " " #x ","
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# define __ASM_SEL(a,b) __ASM_FORM(a)
|
||||
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
|
||||
#else
|
||||
# define __ASM_SEL(a,b) __ASM_FORM(b)
|
||||
# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
|
||||
#endif
|
||||
|
||||
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
|
||||
inst##q##__VA_ARGS__)
|
||||
#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
|
||||
|
||||
#define _ASM_PTR __ASM_SEL(.long, .quad)
|
||||
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
|
||||
|
||||
#define _ASM_MOV __ASM_SIZE(mov)
|
||||
#define _ASM_INC __ASM_SIZE(inc)
|
||||
#define _ASM_DEC __ASM_SIZE(dec)
|
||||
#define _ASM_ADD __ASM_SIZE(add)
|
||||
#define _ASM_SUB __ASM_SIZE(sub)
|
||||
#define _ASM_XADD __ASM_SIZE(xadd)
|
||||
|
||||
#define _ASM_AX __ASM_REG(ax)
|
||||
#define _ASM_BX __ASM_REG(bx)
|
||||
#define _ASM_CX __ASM_REG(cx)
|
||||
#define _ASM_DX __ASM_REG(dx)
|
||||
#define _ASM_SP __ASM_REG(sp)
|
||||
#define _ASM_BP __ASM_REG(bp)
|
||||
#define _ASM_SI __ASM_REG(si)
|
||||
#define _ASM_DI __ASM_REG(di)
|
||||
|
||||
/* Exception table entry */
|
||||
#ifdef __ASSEMBLY__
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
.pushsection "__ex_table","a" ; \
|
||||
.balign 8 ; \
|
||||
.long (from) - . ; \
|
||||
.long (to) - . ; \
|
||||
.popsection
|
||||
|
||||
# define _ASM_EXTABLE_EX(from,to) \
|
||||
.pushsection "__ex_table","a" ; \
|
||||
.balign 8 ; \
|
||||
.long (from) - . ; \
|
||||
.long (to) - . + 0x7ffffff0 ; \
|
||||
.popsection
|
||||
|
||||
# define _ASM_NOKPROBE(entry) \
|
||||
.pushsection "_kprobe_blacklist","aw" ; \
|
||||
_ASM_ALIGN ; \
|
||||
_ASM_PTR (entry); \
|
||||
.popsection
|
||||
#else
|
||||
# define _ASM_EXTABLE(from,to) \
|
||||
" .pushsection \"__ex_table\",\"a\"\n" \
|
||||
" .balign 8\n" \
|
||||
" .long (" #from ") - .\n" \
|
||||
" .long (" #to ") - .\n" \
|
||||
" .popsection\n"
|
||||
|
||||
# define _ASM_EXTABLE_EX(from,to) \
|
||||
" .pushsection \"__ex_table\",\"a\"\n" \
|
||||
" .balign 8\n" \
|
||||
" .long (" #from ") - .\n" \
|
||||
" .long (" #to ") - . + 0x7ffffff0\n" \
|
||||
" .popsection\n"
|
||||
/* For C file, we already have NOKPROBE_SYMBOL macro */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_ASM_H */
|
238
drivers/include/asm/atomic.h
Normal file
238
drivers/include/asm/atomic.h
Normal file
@ -0,0 +1,238 @@
|
||||
#ifndef _ASM_X86_ATOMIC_H
|
||||
#define _ASM_X86_ATOMIC_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/rmwcc.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/*
|
||||
* Atomic operations that C can't guarantee us. Useful for
|
||||
* resource counting etc..
|
||||
*/
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
/**
|
||||
* atomic_read - read atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return ACCESS_ONCE((v)->counter);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_set - set atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: required value
|
||||
*
|
||||
* Atomically sets the value of @v to @i.
|
||||
*/
|
||||
static inline void atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
v->counter = i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add - add integer to atomic variable
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "addl %1,%0"
|
||||
: "+m" (v->counter)
|
||||
: "ir" (i));
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_sub - subtract integer from atomic variable
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v.
|
||||
*/
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "subl %1,%0"
|
||||
: "+m" (v->counter)
|
||||
: "ir" (i));
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_inc - increment atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1.
|
||||
*/
|
||||
static inline void atomic_inc(atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "incl %0"
|
||||
: "+m" (v->counter));
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_dec - decrement atomic variable
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1.
|
||||
*/
|
||||
static inline void atomic_dec(atomic_t *v)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX "decl %0"
|
||||
: "+m" (v->counter));
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_dec_and_test - decrement and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline int atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_inc_and_test - increment and test
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic_inc_and_test(atomic_t *v)
|
||||
{
|
||||
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_negative - add and test if negative
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline int atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_return - add integer and return
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return i + xadd(&v->counter, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_sub_return - subtract integer and return
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to subtract
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns @v - @i
|
||||
*/
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(-i, v);
|
||||
}
|
||||
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
/**
|
||||
* __atomic_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type atomic_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns the old value of @v.
|
||||
*/
|
||||
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == (u)))
|
||||
break;
|
||||
old = atomic_cmpxchg((v), c, c + (a));
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_inc_short - increment of a short integer
|
||||
* @v: pointer to type int
|
||||
*
|
||||
* Atomically adds 1 to @v
|
||||
* Returns the new value of @u
|
||||
*/
|
||||
static inline short int atomic_inc_short(short int *v)
|
||||
{
|
||||
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
|
||||
return *v;
|
||||
}
|
||||
|
||||
/* These are x86-specific, used by some header files */
|
||||
#define atomic_clear_mask(mask, addr) \
|
||||
asm volatile(LOCK_PREFIX "andl %0,%1" \
|
||||
: : "r" (~(mask)), "m" (*(addr)) : "memory")
|
||||
|
||||
#define atomic_set_mask(mask, addr) \
|
||||
asm volatile(LOCK_PREFIX "orl %0,%1" \
|
||||
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
|
||||
: "memory")
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/atomic64_32.h>
|
||||
#else
|
||||
# include <asm/atomic64_64.h>
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_ATOMIC_H */
|
315
drivers/include/asm/atomic64_32.h
Normal file
315
drivers/include/asm/atomic64_32.h
Normal file
@ -0,0 +1,315 @@
|
||||
#ifndef _ASM_X86_ATOMIC64_32_H
|
||||
#define _ASM_X86_ATOMIC64_32_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/processor.h>
|
||||
//#include <asm/cmpxchg.h>
|
||||
|
||||
/* An 64bit atomic type */
|
||||
|
||||
typedef struct {
|
||||
u64 __aligned(8) counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
||||
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
|
||||
#ifndef ATOMIC64_EXPORT
|
||||
#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
|
||||
#else
|
||||
#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
|
||||
ATOMIC64_EXPORT(atomic64_##sym)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define __alternative_atomic64(f, g, out, in...) \
|
||||
asm volatile("call %P[func]" \
|
||||
: out : [func] "i" (atomic64_##g##_cx8), ## in)
|
||||
|
||||
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
|
||||
#else
|
||||
#define __alternative_atomic64(f, g, out, in...) \
|
||||
alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
|
||||
X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
|
||||
|
||||
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
|
||||
ATOMIC64_DECL_ONE(sym##_386)
|
||||
|
||||
ATOMIC64_DECL_ONE(add_386);
|
||||
ATOMIC64_DECL_ONE(sub_386);
|
||||
ATOMIC64_DECL_ONE(inc_386);
|
||||
ATOMIC64_DECL_ONE(dec_386);
|
||||
#endif
|
||||
|
||||
#define alternative_atomic64(f, out, in...) \
|
||||
__alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
|
||||
|
||||
ATOMIC64_DECL(read);
|
||||
ATOMIC64_DECL(set);
|
||||
ATOMIC64_DECL(xchg);
|
||||
ATOMIC64_DECL(add_return);
|
||||
ATOMIC64_DECL(sub_return);
|
||||
ATOMIC64_DECL(inc_return);
|
||||
ATOMIC64_DECL(dec_return);
|
||||
ATOMIC64_DECL(dec_if_positive);
|
||||
ATOMIC64_DECL(inc_not_zero);
|
||||
ATOMIC64_DECL(add_unless);
|
||||
|
||||
#undef ATOMIC64_DECL
|
||||
#undef ATOMIC64_DECL_ONE
|
||||
#undef __ATOMIC64_DECL
|
||||
#undef ATOMIC64_EXPORT
|
||||
|
||||
/**
|
||||
* atomic64_cmpxchg - cmpxchg atomic64 variable
|
||||
* @v: pointer to type atomic64_t
|
||||
* @o: expected value
|
||||
* @n: new value
|
||||
*
|
||||
* Atomically sets @v to @n if it was equal to @o and returns
|
||||
* the old value.
|
||||
*/
|
||||
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
||||
{
|
||||
return cmpxchg64(&v->counter, o, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_xchg - xchg atomic64 variable
|
||||
* @v: pointer to type atomic64_t
|
||||
* @n: value to assign
|
||||
*
|
||||
* Atomically xchgs the value of @v to @n and returns
|
||||
* the old value.
|
||||
*/
|
||||
static inline long long atomic64_xchg(atomic64_t *v, long long n)
|
||||
{
|
||||
long long o;
|
||||
unsigned high = (unsigned)(n >> 32);
|
||||
unsigned low = (unsigned)n;
|
||||
|
||||
asm volatile(
|
||||
"1: \n\t"
|
||||
"cmpxchg8b (%%esi) \n\t"
|
||||
"jnz 1b \n\t"
|
||||
:"=&A" (o)
|
||||
:"S" (v), "b" (low), "c" (high)
|
||||
: "memory", "cc");
|
||||
return o;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_set - set atomic64 variable
|
||||
* @v: pointer to type atomic64_t
|
||||
* @i: value to assign
|
||||
*
|
||||
* Atomically sets the value of @v to @n.
|
||||
*/
|
||||
static inline void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
__sync_lock_test_and_set((long long *)&v->counter, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_read - read atomic64 variable
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically reads the value of @v and returns it.
|
||||
*/
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return __sync_fetch_and_add( (long long *)&v->counter, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_return - add and return
|
||||
* @i: integer value to add
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + *@v
|
||||
*/
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
{
|
||||
alternative_atomic64(add_return,
|
||||
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
||||
ASM_NO_INPUT_CLOBBER("memory"));
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
* Other variants with different arithmetic operators:
|
||||
*/
|
||||
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
|
||||
{
|
||||
alternative_atomic64(sub_return,
|
||||
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
||||
ASM_NO_INPUT_CLOBBER("memory"));
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline long long atomic64_inc_return(atomic64_t *v)
|
||||
{
|
||||
long long a;
|
||||
alternative_atomic64(inc_return, "=&A" (a),
|
||||
"S" (v) : "memory", "ecx");
|
||||
return a;
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_return(atomic64_t *v)
|
||||
{
|
||||
long long a;
|
||||
alternative_atomic64(dec_return, "=&A" (a),
|
||||
"S" (v) : "memory", "ecx");
|
||||
return a;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add - add integer to atomic64 variable
|
||||
* @i: integer value to add
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline long long atomic64_add(long long i, atomic64_t *v)
|
||||
{
|
||||
__alternative_atomic64(add, add_return,
|
||||
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
||||
ASM_NO_INPUT_CLOBBER("memory"));
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_sub - subtract the atomic64 variable
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically subtracts @i from @v.
|
||||
*/
|
||||
static inline long long atomic64_sub(long long i, atomic64_t *v)
|
||||
{
|
||||
__alternative_atomic64(sub, sub_return,
|
||||
ASM_OUTPUT2("+A" (i), "+c" (v)),
|
||||
ASM_NO_INPUT_CLOBBER("memory"));
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic64_sub_and_test(long long i, atomic64_t *v)
|
||||
{
|
||||
return atomic64_sub_return(i, v) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_inc - increment atomic64 variable
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically increments @v by 1.
|
||||
*/
|
||||
static inline void atomic64_inc(atomic64_t *v)
|
||||
{
|
||||
__alternative_atomic64(inc, inc_return, /* no output */,
|
||||
"S" (v) : "memory", "eax", "ecx", "edx");
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_dec - decrement atomic64 variable
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically decrements @v by 1.
|
||||
*/
|
||||
static inline void atomic64_dec(atomic64_t *v)
|
||||
{
|
||||
__alternative_atomic64(dec, dec_return, /* no output */,
|
||||
"S" (v) : "memory", "eax", "ecx", "edx");
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_dec_and_test - decrement and test
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline int atomic64_dec_and_test(atomic64_t *v)
|
||||
{
|
||||
return atomic64_dec_return(v) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_inc_and_test - increment and test
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline int atomic64_inc_and_test(atomic64_t *v)
|
||||
{
|
||||
return atomic64_inc_return(v) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_negative - add and test if negative
|
||||
* @i: integer value to add
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline int atomic64_add_negative(long long i, atomic64_t *v)
|
||||
{
|
||||
return atomic64_add_return(i, v) < 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic64_add_unless - add unless the number is a given value
|
||||
* @v: pointer of type atomic64_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as it was not @u.
|
||||
* Returns non-zero if the add was done, zero otherwise.
|
||||
*/
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
unsigned low = (unsigned)u;
|
||||
unsigned high = (unsigned)(u >> 32);
|
||||
alternative_atomic64(add_unless,
|
||||
ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
|
||||
"S" (v) : "memory");
|
||||
return (int)a;
|
||||
}
|
||||
|
||||
|
||||
static inline int atomic64_inc_not_zero(atomic64_t *v)
|
||||
{
|
||||
int r;
|
||||
alternative_atomic64(inc_not_zero, "=&a" (r),
|
||||
"S" (v) : "ecx", "edx", "memory");
|
||||
return r;
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long r;
|
||||
alternative_atomic64(dec_if_positive, "=&A" (r),
|
||||
"S" (v) : "ecx", "memory");
|
||||
return r;
|
||||
}
|
||||
|
||||
#undef alternative_atomic64
|
||||
#undef __alternative_atomic64
|
||||
|
||||
#endif /* _ASM_X86_ATOMIC64_32_H */
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
//#include <asm/processor.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
/*
|
107
drivers/include/asm/barrier.h
Normal file
107
drivers/include/asm/barrier.h
Normal file
@ -0,0 +1,107 @@
|
||||
#ifndef _ASM_X86_BARRIER_H
|
||||
#define _ASM_X86_BARRIER_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
/*
|
||||
* Force strict CPU ordering.
|
||||
* And yes, this is required on UP too when we're talking
|
||||
* to devices.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Some non-Intel clones support out of order store. wmb() ceases to be a
|
||||
* nop for these.
|
||||
*/
|
||||
#define mb() asm volatile ("lock; addl $0,0(%esp)")/*, "mfence", X86_FEATURE_XMM2) */
|
||||
#define rmb() asm volatile("lock; addl $0,0(%esp)")/*, "lfence", X86_FEATURE_XMM2) */
|
||||
#define wmb() asm volatile("lock; addl $0,0(%esp)")/*, "sfence", X86_FEATURE_XMM) */
|
||||
#else
|
||||
#define mb() asm volatile("mfence":::"memory")
|
||||
#define rmb() asm volatile("lfence":::"memory")
|
||||
#define wmb() asm volatile("sfence" ::: "memory")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else
|
||||
#define dma_rmb() barrier()
|
||||
#endif
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() dma_rmb()
|
||||
#define smp_wmb() barrier()
|
||||
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
#else /* !SMP */
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define set_mb(var, value) do { var = value; barrier(); } while (0)
|
||||
#endif /* SMP */
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
|
||||
/*
|
||||
* For this option x86 doesn't have a strong TSO memory
|
||||
* model and we should fall back to full barriers.
|
||||
*/
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
ACCESS_ONCE(*p) = (v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#else /* regular x86 TSO memory ordering */
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
ACCESS_ONCE(*p) = (v); \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = ACCESS_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
barrier(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
/* Atomic operations are already serializing on x86 */
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
|
||||
/*
|
||||
* Stop RDTSC speculation. This is needed when you need to use RDTSC
|
||||
* (or get_cycles or vread that possibly accesses the TSC) in a defined
|
||||
* code region.
|
||||
*
|
||||
* (Could use an alternative three way for this if there was one.)
|
||||
*/
|
||||
static __always_inline void rdtsc_barrier(void)
|
||||
{
|
||||
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
|
||||
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_BARRIER_H */
|
@ -14,6 +14,16 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/rmwcc.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
# define _BITOPS_LONG_SHIFT 5
|
||||
#elif BITS_PER_LONG == 64
|
||||
# define _BITOPS_LONG_SHIFT 6
|
||||
#else
|
||||
# error "Unexpected BITS_PER_LONG"
|
||||
#endif
|
||||
|
||||
#define BIT_64(n) (U64_C(1) << (n))
|
||||
|
||||
@ -59,7 +69,7 @@
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static __always_inline void
|
||||
set_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
asm volatile(LOCK_PREFIX "orb %1,%0"
|
||||
@ -81,7 +91,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
static inline void __set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
|
||||
}
|
||||
@ -93,11 +103,11 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
*
|
||||
* clear_bit() is atomic and may not be reordered. However, it does
|
||||
* not contain a memory barrier, so if it is used for locking purposes,
|
||||
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
|
||||
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
|
||||
* in order to ensure changes are visible on other processors.
|
||||
*/
|
||||
static __always_inline void
|
||||
clear_bit(int nr, volatile unsigned long *addr)
|
||||
clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
asm volatile(LOCK_PREFIX "andb %1,%0"
|
||||
@ -118,13 +128,13 @@ clear_bit(int nr, volatile unsigned long *addr)
|
||||
* clear_bit() is atomic and implies release semantics before the memory
|
||||
* operation. It can be used for an unlock.
|
||||
*/
|
||||
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
|
||||
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
barrier();
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
static inline void __clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
|
||||
}
|
||||
@ -141,15 +151,12 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
* No memory barrier is required here, because x86 cannot reorder stores past
|
||||
* older loads. Same principle as spin_unlock.
|
||||
*/
|
||||
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
|
||||
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
barrier();
|
||||
__clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
@ -159,7 +166,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
static inline void __change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
|
||||
}
|
||||
@ -173,7 +180,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static inline void change_bit(int nr, volatile unsigned long *addr)
|
||||
static inline void change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
asm volatile(LOCK_PREFIX "xorb %1,%0"
|
||||
@ -194,14 +201,9 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
|
||||
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
|
||||
|
||||
return oldbit;
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -212,7 +214,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
* This is the same as test_and_set_bit on x86.
|
||||
*/
|
||||
static __always_inline int
|
||||
test_and_set_bit_lock(int nr, volatile unsigned long *addr)
|
||||
test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
@ -226,7 +228,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr)
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
@ -245,15 +247,9 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
|
||||
|
||||
return oldbit;
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -272,7 +268,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
* accessed from a hypervisor on the same CPU if running in a VM: don't change
|
||||
* this without also updating arch/x86/kernel/kvm.c
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
@ -284,7 +280,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
@ -304,24 +300,18 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
* This operation is atomic and cannot be reordered.
|
||||
* It also implies a memory barrier.
|
||||
*/
|
||||
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
|
||||
|
||||
return oldbit;
|
||||
GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
|
||||
}
|
||||
|
||||
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return ((1UL << (nr % BITS_PER_LONG)) &
|
||||
(addr[nr / BITS_PER_LONG])) != 0;
|
||||
return ((1UL << (nr & (BITS_PER_LONG-1))) &
|
||||
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
|
||||
}
|
||||
|
||||
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
|
||||
static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
@ -406,7 +396,21 @@ static inline unsigned long __fls(unsigned long word)
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
int r;
|
||||
#ifdef CONFIG_X86_CMOV
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
|
||||
* dest reg is undefined if x==0, but their CPU architect says its
|
||||
* value is written to set it to the same as before, except that the
|
||||
* top 32 bits will be cleared.
|
||||
*
|
||||
* We cannot do this on 32 bits because at the very least some
|
||||
* 486 CPUs did not behave this way.
|
||||
*/
|
||||
asm("bsfl %1,%0"
|
||||
: "=r" (r)
|
||||
: "rm" (x), "0" (-1));
|
||||
#elif defined(CONFIG_X86_CMOV)
|
||||
asm("bsfl %1,%0\n\t"
|
||||
"cmovzl %2,%0"
|
||||
: "=&r" (r) : "rm" (x), "r" (-1));
|
||||
@ -433,7 +437,21 @@ static inline int ffs(int x)
|
||||
static inline int fls(int x)
|
||||
{
|
||||
int r;
|
||||
#ifdef CONFIG_X86_CMOV
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
|
||||
* dest reg is undefined if x==0, but their CPU architect says its
|
||||
* value is written to set it to the same as before, except that the
|
||||
* top 32 bits will be cleared.
|
||||
*
|
||||
* We cannot do this on 32 bits because at the very least some
|
||||
* 486 CPUs did not behave this way.
|
||||
*/
|
||||
asm("bsrl %1,%0"
|
||||
: "=r" (r)
|
||||
: "rm" (x), "0" (-1));
|
||||
#elif defined(CONFIG_X86_CMOV)
|
||||
asm("bsrl %1,%0\n\t"
|
||||
"cmovzl %2,%0"
|
||||
: "=&r" (r) : "rm" (x), "rm" (-1));
|
||||
@ -445,32 +463,47 @@ static inline int fls(int x)
|
||||
#endif
|
||||
return r + 1;
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#undef ADDR
|
||||
/**
|
||||
* fls64 - find last set bit in a 64-bit word
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined in a similar way as the libc and compiler builtin
|
||||
* ffsll, but returns the position of the most significant set bit.
|
||||
*
|
||||
* fls64(value) returns 0 if value is 0 or the position of the last
|
||||
* set bit if value is nonzero. The last (most significant) bit is
|
||||
* at position 64.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
static __always_inline int fls64(__u64 x)
|
||||
{
|
||||
int bitpos = -1;
|
||||
/*
|
||||
* AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
|
||||
* dest reg is undefined if x==0, but their CPU architect says its
|
||||
* value is written to set it to the same as before.
|
||||
*/
|
||||
asm("bsrq %1,%q0"
|
||||
: "+r" (bitpos)
|
||||
: "rm" (x));
|
||||
return bitpos + 1;
|
||||
}
|
||||
#else
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
||||
#define ARCH_HAS_FAST_MULTIPLIER 1
|
||||
#include <asm/arch_hweight.h>
|
||||
|
||||
#include <asm-generic/bitops/hweight.h>
|
||||
#include <asm-generic/bitops/const_hweight.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm-generic/bitops/ext2-non-atomic.h>
|
||||
|
||||
#define ext2_set_bit_atomic(lock, nr, addr) \
|
||||
test_and_set_bit((nr), (unsigned long *)(addr))
|
||||
#define ext2_clear_bit_atomic(lock, nr, addr) \
|
||||
test_and_clear_bit((nr), (unsigned long *)(addr))
|
||||
|
||||
#include <asm-generic/bitops/minix.h>
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_X86_BITOPS_H */
|
23
drivers/include/asm/cache.h
Normal file
23
drivers/include/asm/cache.h
Normal file
@ -0,0 +1,23 @@
|
||||
#ifndef _ASM_X86_CACHE_H
|
||||
#define _ASM_X86_CACHE_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
/* L1 cache line size */
|
||||
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
|
||||
#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
|
||||
|
||||
#ifdef CONFIG_X86_VSMP
|
||||
#ifdef CONFIG_SMP
|
||||
#define __cacheline_aligned_in_smp \
|
||||
__attribute__((__aligned__(INTERNODE_CACHE_BYTES))) \
|
||||
__page_aligned_data
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_CACHE_H */
|
131
drivers/include/asm/cacheflush.h
Normal file
131
drivers/include/asm/cacheflush.h
Normal file
@ -0,0 +1,131 @@
|
||||
#ifndef _ASM_X86_CACHEFLUSH_H
|
||||
#define _ASM_X86_CACHEFLUSH_H
|
||||
|
||||
/* Caches aren't brain-dead on the intel. */
|
||||
#include <asm-generic/cacheflush.h>
|
||||
#include <asm/special_insns.h>
|
||||
|
||||
/*
|
||||
* The set_memory_* API can be used to change various attributes of a virtual
|
||||
* address range. The attributes include:
|
||||
* Cachability : UnCached, WriteCombining, WriteBack
|
||||
* Executability : eXeutable, NoteXecutable
|
||||
* Read/Write : ReadOnly, ReadWrite
|
||||
* Presence : NotPresent
|
||||
*
|
||||
* Within a category, the attributes are mutually exclusive.
|
||||
*
|
||||
* The implementation of this API will take care of various aspects that
|
||||
* are associated with changing such attributes, such as:
|
||||
* - Flushing TLBs
|
||||
* - Flushing CPU caches
|
||||
* - Making sure aliases of the memory behind the mapping don't violate
|
||||
* coherency rules as defined by the CPU in the system.
|
||||
*
|
||||
* What this API does not do:
|
||||
* - Provide exclusion between various callers - including callers that
|
||||
* operation on other mappings of the same physical page
|
||||
* - Restore default attributes when a page is freed
|
||||
* - Guarantee that mappings other than the requested one are
|
||||
* in any state, other than that these do not violate rules for
|
||||
* the CPU you have. Do not depend on any effects on other mappings,
|
||||
* CPUs other than the one you have may have more relaxed rules.
|
||||
* The caller is required to take care of these.
|
||||
*/
|
||||
|
||||
int _set_memory_uc(unsigned long addr, int numpages);
|
||||
int _set_memory_wc(unsigned long addr, int numpages);
|
||||
int _set_memory_wb(unsigned long addr, int numpages);
|
||||
int set_memory_uc(unsigned long addr, int numpages);
|
||||
int set_memory_wc(unsigned long addr, int numpages);
|
||||
int set_memory_wb(unsigned long addr, int numpages);
|
||||
int set_memory_x(unsigned long addr, int numpages);
|
||||
int set_memory_nx(unsigned long addr, int numpages);
|
||||
int set_memory_ro(unsigned long addr, int numpages);
|
||||
int set_memory_rw(unsigned long addr, int numpages);
|
||||
int set_memory_np(unsigned long addr, int numpages);
|
||||
int set_memory_4k(unsigned long addr, int numpages);
|
||||
|
||||
int set_memory_array_uc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wb(unsigned long *addr, int addrinarray);
|
||||
|
||||
int set_pages_array_uc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wb(struct page **pages, int addrinarray);
|
||||
|
||||
/*
|
||||
* For legacy compatibility with the old APIs, a few functions
|
||||
* are provided that work on a "struct page".
|
||||
* These functions operate ONLY on the 1:1 kernel mapping of the
|
||||
* memory that the struct page represents, and internally just
|
||||
* call the set_memory_* function. See the description of the
|
||||
* set_memory_* function for more details on conventions.
|
||||
*
|
||||
* These APIs should be considered *deprecated* and are likely going to
|
||||
* be removed in the future.
|
||||
* The reason for this is the implicit operation on the 1:1 mapping only,
|
||||
* making this not a generally useful API.
|
||||
*
|
||||
* Specifically, many users of the old APIs had a virtual address,
|
||||
* called virt_to_page() or vmalloc_to_page() on that address to
|
||||
* get a struct page* that the old API required.
|
||||
* To convert these cases, use set_memory_*() on the original
|
||||
* virtual address, do not use these functions.
|
||||
*/
|
||||
|
||||
static int set_pages_uc(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int set_pages_wb(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int set_pages_x(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int set_pages_nx(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int set_pages_ro(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
static int set_pages_rw(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
};
|
||||
|
||||
|
||||
|
||||
void clflush_cache_range(void *addr, unsigned int size);
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
void mark_rodata_ro(void);
|
||||
extern const int rodata_test_data;
|
||||
extern int kernel_set_to_readonly;
|
||||
void set_kernel_text_rw(void);
|
||||
void set_kernel_text_ro(void);
|
||||
#else
|
||||
static inline void set_kernel_text_rw(void) { }
|
||||
static inline void set_kernel_text_ro(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA_TEST
|
||||
int rodata_test(void);
|
||||
#else
|
||||
static inline int rodata_test(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_CACHEFLUSH_H */
|
233
drivers/include/asm/cmpxchg.h
Normal file
233
drivers/include/asm/cmpxchg.h
Normal file
@ -0,0 +1,233 @@
|
||||
#ifndef ASM_X86_CMPXCHG_H
|
||||
#define ASM_X86_CMPXCHG_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
|
||||
|
||||
#define __HAVE_ARCH_CMPXCHG 1
|
||||
|
||||
/*
|
||||
* Non-existant functions to indicate usage errors at link time
|
||||
* (or compile-time if the compiler implements __compiletime_error().
|
||||
*/
|
||||
extern void __xchg_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for xchg");
|
||||
extern void __cmpxchg_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for cmpxchg");
|
||||
extern void __xadd_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for xadd");
|
||||
extern void __add_wrong_size(void)
|
||||
__compiletime_error("Bad argument size for add");
|
||||
|
||||
/*
|
||||
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
|
||||
* -1 because sizeof will never return -1, thereby making those switch
|
||||
* case statements guaranteeed dead code which the compiler will
|
||||
* eliminate, and allowing the "missing symbol in the default case" to
|
||||
* indicate a usage error.
|
||||
*/
|
||||
#define __X86_CASE_B 1
|
||||
#define __X86_CASE_W 2
|
||||
#define __X86_CASE_L 4
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __X86_CASE_Q 8
|
||||
#else
|
||||
#define __X86_CASE_Q -1 /* sizeof will never return -1 */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* An exchange-type operation, which takes a value and a pointer, and
|
||||
* returns the old value.
|
||||
*/
|
||||
#define __xchg_op(ptr, arg, op, lock) \
|
||||
({ \
|
||||
__typeof__ (*(ptr)) __ret = (arg); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile (lock #op "b %b0, %1\n" \
|
||||
: "+q" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile (lock #op "w %w0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile (lock #op "l %0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile (lock #op "q %q0, %1\n" \
|
||||
: "+r" (__ret), "+m" (*(ptr)) \
|
||||
: : "memory", "cc"); \
|
||||
break; \
|
||||
default: \
|
||||
__ ## op ## _wrong_size(); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
|
||||
* Since this is generally used to protect other memory information, we
|
||||
* use "asm volatile" and "memory" clobbers to prevent gcc from moving
|
||||
* information around.
|
||||
*/
|
||||
#define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
|
||||
|
||||
/*
|
||||
* Atomic compare and exchange. Compare OLD with MEM, if identical,
|
||||
* store NEW in MEM. Return the initial value in MEM. Success is
|
||||
* indicated by comparing RETURN with OLD.
|
||||
*/
|
||||
#define __raw_cmpxchg(ptr, old, new, size, lock) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__typeof__(*(ptr)) __old = (old); \
|
||||
__typeof__(*(ptr)) __new = (new); \
|
||||
switch (size) { \
|
||||
case __X86_CASE_B: \
|
||||
{ \
|
||||
volatile u8 *__ptr = (volatile u8 *)(ptr); \
|
||||
asm volatile(lock "cmpxchgb %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "q" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_W: \
|
||||
{ \
|
||||
volatile u16 *__ptr = (volatile u16 *)(ptr); \
|
||||
asm volatile(lock "cmpxchgw %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "r" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_L: \
|
||||
{ \
|
||||
volatile u32 *__ptr = (volatile u32 *)(ptr); \
|
||||
asm volatile(lock "cmpxchgl %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "r" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
case __X86_CASE_Q: \
|
||||
{ \
|
||||
volatile u64 *__ptr = (volatile u64 *)(ptr); \
|
||||
asm volatile(lock "cmpxchgq %2,%1" \
|
||||
: "=a" (__ret), "+m" (*__ptr) \
|
||||
: "r" (__new), "0" (__old) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
default: \
|
||||
__cmpxchg_wrong_size(); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __cmpxchg(ptr, old, new, size) \
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
|
||||
|
||||
#define __sync_cmpxchg(ptr, old, new, size) \
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
|
||||
|
||||
#define __cmpxchg_local(ptr, old, new, size) \
|
||||
__raw_cmpxchg((ptr), (old), (new), (size), "")
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/cmpxchg_32.h>
|
||||
#else
|
||||
# include <asm/cmpxchg_64.h>
|
||||
#endif
|
||||
|
||||
#define cmpxchg(ptr, old, new) \
|
||||
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
|
||||
|
||||
#define sync_cmpxchg(ptr, old, new) \
|
||||
__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
|
||||
|
||||
#define cmpxchg_local(ptr, old, new) \
|
||||
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* xadd() adds "inc" to "*ptr" and atomically returns the previous
|
||||
* value of "*ptr".
|
||||
*
|
||||
* xadd() is locked when multiple CPUs are online
|
||||
* xadd_sync() is always locked
|
||||
* xadd_local() is never locked
|
||||
*/
|
||||
#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
|
||||
#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
|
||||
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
|
||||
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
|
||||
|
||||
#define __add(ptr, inc, lock) \
|
||||
({ \
|
||||
__typeof__ (*(ptr)) __ret = (inc); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case __X86_CASE_B: \
|
||||
asm volatile (lock "addb %b1, %0\n" \
|
||||
: "+m" (*(ptr)) : "qi" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_W: \
|
||||
asm volatile (lock "addw %w1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_L: \
|
||||
asm volatile (lock "addl %1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
case __X86_CASE_Q: \
|
||||
asm volatile (lock "addq %1, %0\n" \
|
||||
: "+m" (*(ptr)) : "ri" (inc) \
|
||||
: "memory", "cc"); \
|
||||
break; \
|
||||
default: \
|
||||
__add_wrong_size(); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/*
|
||||
* add_*() adds "inc" to "*ptr"
|
||||
*
|
||||
* __add() takes a lock prefix
|
||||
* add_smp() is locked when multiple CPUs are online
|
||||
* add_sync() is always locked
|
||||
*/
|
||||
#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
|
||||
#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
|
||||
|
||||
#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
bool __ret; \
|
||||
__typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
|
||||
__typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
|
||||
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
|
||||
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
|
||||
VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
|
||||
VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
|
||||
asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
|
||||
: "=a" (__ret), "+d" (__old2), \
|
||||
"+m" (*(p1)), "+m" (*(p2)) \
|
||||
: "i" (2 * sizeof(long)), "a" (__old1), \
|
||||
"b" (__new1), "c" (__new2)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
|
||||
__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
|
||||
|
||||
#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
|
||||
__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
|
||||
|
||||
#endif /* ASM_X86_CMPXCHG_H */
|
114
drivers/include/asm/cmpxchg_32.h
Normal file
114
drivers/include/asm/cmpxchg_32.h
Normal file
@ -0,0 +1,114 @@
|
||||
#ifndef _ASM_X86_CMPXCHG_32_H
|
||||
#define _ASM_X86_CMPXCHG_32_H
|
||||
|
||||
/*
|
||||
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
|
||||
* you need to test for the feature in boot_cpu_data.
|
||||
*/
|
||||
|
||||
/*
|
||||
* CMPXCHG8B only writes to the target if we had the previous
|
||||
* value in registers, otherwise it acts as a read and gives us the
|
||||
* "new previous" value. That is why there is a loop. Preloading
|
||||
* EDX:EAX is a performance optimization: in the common case it means
|
||||
* we need only one locked operation.
|
||||
*
|
||||
* A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
|
||||
* least an FPU save and/or %cr0.ts manipulation.
|
||||
*
|
||||
* cmpxchg8b must be used with the lock prefix here to allow the
|
||||
* instruction to be executed atomically. We need to have the reader
|
||||
* side to see the coherent 64bit value.
|
||||
*/
|
||||
static inline void set_64bit(volatile u64 *ptr, u64 value)
|
||||
{
|
||||
u32 low = value;
|
||||
u32 high = value >> 32;
|
||||
u64 prev = *ptr;
|
||||
|
||||
asm volatile("\n1:\t"
|
||||
LOCK_PREFIX "cmpxchg8b %0\n\t"
|
||||
"jnz 1b"
|
||||
: "=m" (*ptr), "+A" (prev)
|
||||
: "b" (low), "c" (high)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
#endif
|
||||
|
||||
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
|
||||
{
|
||||
u64 prev;
|
||||
asm volatile(LOCK_PREFIX "cmpxchg8b %1"
|
||||
: "=A" (prev),
|
||||
"+m" (*ptr)
|
||||
: "b" ((u32)new),
|
||||
"c" ((u32)(new >> 32)),
|
||||
"0" (old)
|
||||
: "memory");
|
||||
return prev;
|
||||
}
|
||||
|
||||
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
|
||||
{
|
||||
u64 prev;
|
||||
asm volatile("cmpxchg8b %1"
|
||||
: "=A" (prev),
|
||||
"+m" (*ptr)
|
||||
: "b" ((u32)new),
|
||||
"c" ((u32)(new >> 32)),
|
||||
"0" (old)
|
||||
: "memory");
|
||||
return prev;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_X86_CMPXCHG64
|
||||
/*
|
||||
* Building a kernel capable running on 80386 and 80486. It may be necessary
|
||||
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
|
||||
*/
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__typeof__(*(ptr)) __old = (o); \
|
||||
__typeof__(*(ptr)) __new = (n); \
|
||||
alternative_io(LOCK_PREFIX_HERE \
|
||||
"call cmpxchg8b_emu", \
|
||||
"lock; cmpxchg8b (%%esi)" , \
|
||||
X86_FEATURE_CX8, \
|
||||
"=A" (__ret), \
|
||||
"S" ((ptr)), "0" (__old), \
|
||||
"b" ((unsigned int)__new), \
|
||||
"c" ((unsigned int)(__new>>32)) \
|
||||
: "memory"); \
|
||||
__ret; })
|
||||
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__typeof__(*(ptr)) __old = (o); \
|
||||
__typeof__(*(ptr)) __new = (n); \
|
||||
alternative_io("call cmpxchg8b_emu", \
|
||||
"cmpxchg8b (%%esi)" , \
|
||||
X86_FEATURE_CX8, \
|
||||
"=A" (__ret), \
|
||||
"S" ((ptr)), "0" (__old), \
|
||||
"b" ((unsigned int)__new), \
|
||||
"c" ((unsigned int)(__new>>32)) \
|
||||
: "memory"); \
|
||||
__ret; })
|
||||
|
||||
#endif
|
||||
|
||||
#define system_has_cmpxchg_double() cpu_has_cx8
|
||||
|
||||
#endif /* _ASM_X86_CMPXCHG_32_H */
|
586
drivers/include/asm/cpufeature.h
Normal file
586
drivers/include/asm/cpufeature.h
Normal file
@ -0,0 +1,586 @@
|
||||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#ifndef _ASM_X86_CPUFEATURE_H
|
||||
#define _ASM_X86_CPUFEATURE_H
|
||||
|
||||
#ifndef _ASM_X86_REQUIRED_FEATURES_H
|
||||
#include <asm/required-features.h>
|
||||
#endif
|
||||
|
||||
#ifndef _ASM_X86_DISABLED_FEATURES_H
|
||||
#include <asm/disabled-features.h>
|
||||
#endif
|
||||
|
||||
#define NCAPINTS 11 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 1 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
* in /proc/cpuinfo instead of the macro name. If the string is "",
|
||||
* this feature bit is not displayed in /proc/cpuinfo at all.
|
||||
*/
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
|
||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
|
||||
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
|
||||
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
|
||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
|
||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
|
||||
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
|
||||
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
|
||||
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
|
||||
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
|
||||
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
|
||||
/* (plus FCMOVcc, FCOMI with FPU) */
|
||||
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
|
||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
|
||||
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
|
||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
|
||||
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
||||
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
|
||||
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
|
||||
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
||||
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
||||
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
||||
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
|
||||
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
||||
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
|
||||
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
|
||||
/* Don't duplicate feature flags which are redundant with Intel! */
|
||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
|
||||
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
|
||||
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
|
||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
|
||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
|
||||
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
|
||||
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
|
||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
|
||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
|
||||
|
||||
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
|
||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
|
||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
|
||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
|
||||
|
||||
/* Other features, Linux-defined mapping, word 3 */
|
||||
/* This range is used for feature bits which conflict or are synthesized */
|
||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
|
||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
|
||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
|
||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
|
||||
/* cpu types for specific tunings: */
|
||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
||||
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
|
||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
||||
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
|
||||
/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
|
||||
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
|
||||
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
|
||||
/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
|
||||
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
|
||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
|
||||
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
|
||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
|
||||
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
|
||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
|
||||
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
|
||||
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
|
||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
|
||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
|
||||
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
|
||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
|
||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
|
||||
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
|
||||
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
|
||||
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
|
||||
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
|
||||
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
|
||||
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
||||
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
|
||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
|
||||
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
|
||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
|
||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
|
||||
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
|
||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
||||
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
||||
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
||||
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
|
||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
|
||||
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
|
||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
|
||||
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
|
||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
|
||||
|
||||
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
|
||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
|
||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
|
||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
|
||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
|
||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
|
||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
|
||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
|
||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
|
||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
|
||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
* CPUID levels like 0x6, 0xA etc, word 7
|
||||
*/
|
||||
#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
|
||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
||||
#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
|
||||
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
|
||||
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
|
||||
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
|
||||
#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
|
||||
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
|
||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||
|
||||
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
|
||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
|
||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
||||
|
||||
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
|
||||
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
|
||||
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
|
||||
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
||||
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
||||
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
|
||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
|
||||
|
||||
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#ifdef CONFIG_X86_FEATURE_NAMES
|
||||
extern const char * const x86_cap_flags[NCAPINTS*32];
|
||||
extern const char * const x86_power_flags[32];
|
||||
#define X86_CAP_FMT "%s"
|
||||
#define x86_cap_flag(flag) x86_cap_flags[flag]
|
||||
#else
|
||||
#define X86_CAP_FMT "%d:%d"
|
||||
#define x86_cap_flag(flag) ((flag) >> 5), ((flag) & 31)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* In order to save room, we index into this array by doing
|
||||
* X86_BUG_<name> - NCAPINTS*32.
|
||||
*/
|
||||
extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
|
||||
#define test_cpu_cap(c, bit) \
|
||||
test_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
|
||||
#define REQUIRED_MASK_BIT_SET(bit) \
|
||||
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
|
||||
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
|
||||
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
|
||||
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
|
||||
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
|
||||
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
|
||||
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
|
||||
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
|
||||
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
|
||||
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
|
||||
|
||||
#define DISABLED_MASK_BIT_SET(bit) \
|
||||
( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
|
||||
(((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
|
||||
(((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
|
||||
(((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
|
||||
(((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
|
||||
(((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
|
||||
(((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
|
||||
(((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
|
||||
(((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
|
||||
(((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
|
||||
|
||||
#define cpu_has(c, bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
test_cpu_cap(c, bit))
|
||||
|
||||
#define this_cpu_has(bit) \
|
||||
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
|
||||
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
|
||||
|
||||
/*
|
||||
* This macro is for detection of features which need kernel
|
||||
* infrastructure to be used. It may *not* directly test the CPU
|
||||
* itself. Use the cpu_has() family if you want true runtime
|
||||
* testing of CPU features, like in hypervisor code where you are
|
||||
* supporting a possible guest feature where host support for it
|
||||
* is not relevant.
|
||||
*/
|
||||
#define cpu_feature_enabled(bit) \
|
||||
(__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
|
||||
cpu_has(&boot_cpu_data, bit))
|
||||
|
||||
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
|
||||
|
||||
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
#define setup_clear_cpu_cap(bit) do { \
|
||||
clear_cpu_cap(&boot_cpu_data, bit); \
|
||||
set_bit(bit, (unsigned long *)cpu_caps_cleared); \
|
||||
} while (0)
|
||||
#define setup_force_cpu_cap(bit) do { \
|
||||
set_cpu_cap(&boot_cpu_data, bit); \
|
||||
set_bit(bit, (unsigned long *)cpu_caps_set); \
|
||||
} while (0)
|
||||
|
||||
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
|
||||
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
|
||||
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
|
||||
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
|
||||
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
|
||||
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
|
||||
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
|
||||
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
|
||||
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
|
||||
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
|
||||
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
|
||||
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
|
||||
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
|
||||
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
|
||||
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
|
||||
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
|
||||
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
|
||||
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
|
||||
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
|
||||
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
|
||||
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
|
||||
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
|
||||
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
|
||||
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
|
||||
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
|
||||
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
|
||||
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
|
||||
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
|
||||
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
|
||||
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
|
||||
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
|
||||
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
|
||||
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
|
||||
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
|
||||
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
|
||||
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
|
||||
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
|
||||
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
|
||||
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
|
||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
|
||||
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
|
||||
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
|
||||
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
|
||||
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
|
||||
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
|
||||
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
|
||||
#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
|
||||
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
|
||||
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
|
||||
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
|
||||
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
|
||||
|
||||
#if __GNUC__ >= 4
|
||||
extern void warn_pre_alternatives(void);
|
||||
extern bool __static_cpu_has_safe(u16 bit);
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has().
|
||||
* These are only valid after alternatives have run, but will statically
|
||||
* patch the target code for additional performance.
|
||||
*/
|
||||
static __always_inline __pure bool __static_cpu_has(u16 bit)
|
||||
{
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
|
||||
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
|
||||
|
||||
/*
|
||||
* Catch too early usage of this before alternatives
|
||||
* have run.
|
||||
*/
|
||||
asm_volatile_goto("1: jmp %l[t_warn]\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" .long 0\n" /* no replacement */
|
||||
" .word %P0\n" /* 1: do replace */
|
||||
" .byte 2b - 1b\n" /* source len */
|
||||
" .byte 0\n" /* replacement len */
|
||||
".previous\n"
|
||||
/* skipping size check since replacement size = 0 */
|
||||
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
|
||||
|
||||
#endif
|
||||
|
||||
asm_volatile_goto("1: jmp %l[t_no]\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" .long 0\n" /* no replacement */
|
||||
" .word %P0\n" /* feature bit */
|
||||
" .byte 2b - 1b\n" /* source len */
|
||||
" .byte 0\n" /* replacement len */
|
||||
".previous\n"
|
||||
/* skipping size check since replacement size = 0 */
|
||||
: : "i" (bit) : : t_no);
|
||||
return true;
|
||||
t_no:
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
|
||||
t_warn:
|
||||
warn_pre_alternatives();
|
||||
return false;
|
||||
#endif
|
||||
|
||||
#else /* CC_HAVE_ASM_GOTO */
|
||||
|
||||
u8 flag;
|
||||
/* Open-coded due to __stringify() in ALTERNATIVE() */
|
||||
asm volatile("1: movb $0,%0\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
" .long 3f - .\n"
|
||||
" .word %P1\n" /* feature bit */
|
||||
" .byte 2b - 1b\n" /* source len */
|
||||
" .byte 4f - 3f\n" /* replacement len */
|
||||
".previous\n"
|
||||
".section .discard,\"aw\",@progbits\n"
|
||||
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
|
||||
".previous\n"
|
||||
".section .altinstr_replacement,\"ax\"\n"
|
||||
"3: movb $1,%0\n"
|
||||
"4:\n"
|
||||
".previous\n"
|
||||
: "=qm" (flag) : "i" (bit));
|
||||
return flag;
|
||||
|
||||
#endif /* CC_HAVE_ASM_GOTO */
|
||||
}
|
||||
|
||||
#define static_cpu_has(bit) \
|
||||
( \
|
||||
__builtin_constant_p(boot_cpu_has(bit)) ? \
|
||||
boot_cpu_has(bit) : \
|
||||
__builtin_constant_p(bit) ? \
|
||||
__static_cpu_has(bit) : \
|
||||
boot_cpu_has(bit) \
|
||||
)
|
||||
|
||||
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
|
||||
{
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
/*
|
||||
* We need to spell the jumps to the compiler because, depending on the offset,
|
||||
* the replacement jump can be bigger than the original jump, and this we cannot
|
||||
* have. Thus, we force the jump to the widest, 4-byte, signed relative
|
||||
* offset even though the last would often fit in less bytes.
|
||||
*/
|
||||
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n" /* src offset */
|
||||
" .long 3f - .\n" /* repl offset */
|
||||
" .word %P1\n" /* always replace */
|
||||
" .byte 2b - 1b\n" /* src len */
|
||||
" .byte 4f - 3f\n" /* repl len */
|
||||
".previous\n"
|
||||
".section .altinstr_replacement,\"ax\"\n"
|
||||
"3: .byte 0xe9\n .long %l[t_no] - 2b\n"
|
||||
"4:\n"
|
||||
".previous\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n" /* src offset */
|
||||
" .long 0\n" /* no replacement */
|
||||
" .word %P0\n" /* feature bit */
|
||||
" .byte 2b - 1b\n" /* src len */
|
||||
" .byte 0\n" /* repl len */
|
||||
".previous\n"
|
||||
: : "i" (bit), "i" (X86_FEATURE_ALWAYS)
|
||||
: : t_dynamic, t_no);
|
||||
return true;
|
||||
t_no:
|
||||
return false;
|
||||
t_dynamic:
|
||||
return __static_cpu_has_safe(bit);
|
||||
#else
|
||||
u8 flag;
|
||||
/* Open-coded due to __stringify() in ALTERNATIVE() */
|
||||
asm volatile("1: movb $2,%0\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n" /* src offset */
|
||||
" .long 3f - .\n" /* repl offset */
|
||||
" .word %P2\n" /* always replace */
|
||||
" .byte 2b - 1b\n" /* source len */
|
||||
" .byte 4f - 3f\n" /* replacement len */
|
||||
".previous\n"
|
||||
".section .discard,\"aw\",@progbits\n"
|
||||
" .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
|
||||
".previous\n"
|
||||
".section .altinstr_replacement,\"ax\"\n"
|
||||
"3: movb $0,%0\n"
|
||||
"4:\n"
|
||||
".previous\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n" /* src offset */
|
||||
" .long 5f - .\n" /* repl offset */
|
||||
" .word %P1\n" /* feature bit */
|
||||
" .byte 4b - 3b\n" /* src len */
|
||||
" .byte 6f - 5f\n" /* repl len */
|
||||
".previous\n"
|
||||
".section .discard,\"aw\",@progbits\n"
|
||||
" .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
|
||||
".previous\n"
|
||||
".section .altinstr_replacement,\"ax\"\n"
|
||||
"5: movb $1,%0\n"
|
||||
"6:\n"
|
||||
".previous\n"
|
||||
: "=qm" (flag)
|
||||
: "i" (bit), "i" (X86_FEATURE_ALWAYS));
|
||||
return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
|
||||
#endif /* CC_HAVE_ASM_GOTO */
|
||||
}
|
||||
|
||||
#define static_cpu_has_safe(bit) \
|
||||
( \
|
||||
__builtin_constant_p(boot_cpu_has(bit)) ? \
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has_safe(bit) \
|
||||
)
|
||||
#else
|
||||
/*
|
||||
* gcc 3.x is too stupid to do the static test; fall back to dynamic.
|
||||
*/
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
#define static_cpu_has_safe(bit) boot_cpu_has(bit)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
|
||||
|
||||
#define static_cpu_has_bug(bit) static_cpu_has((bit))
|
||||
#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
|
||||
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
|
||||
|
||||
#define MAX_CPU_FEATURES (NCAPINTS * 32)
|
||||
#define cpu_have_feature boot_cpu_has
|
||||
|
||||
#define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X"
|
||||
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
|
||||
boot_cpu_data.x86_model
|
||||
|
||||
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
|
||||
#endif /* _ASM_X86_CPUFEATURE_H */
|
14
drivers/include/asm/cpumask.h
Normal file
14
drivers/include/asm/cpumask.h
Normal file
@ -0,0 +1,14 @@
|
||||
#ifndef _ASM_X86_CPUMASK_H
|
||||
#define _ASM_X86_CPUMASK_H
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern cpumask_var_t cpu_callin_mask;
|
||||
extern cpumask_var_t cpu_callout_mask;
|
||||
extern cpumask_var_t cpu_initialized_mask;
|
||||
extern cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
extern void setup_cpu_local_masks(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_CPUMASK_H */
|
21
drivers/include/asm/current.h
Normal file
21
drivers/include/asm/current.h
Normal file
@ -0,0 +1,21 @@
|
||||
#ifndef _ASM_X86_CURRENT_H
|
||||
#define _ASM_X86_CURRENT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct task_struct;
|
||||
|
||||
DECLARE_PER_CPU(struct task_struct *, current_task);
|
||||
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return this_cpu_read_stable(current_task);
|
||||
}
|
||||
|
||||
#define current (void*)GetPid()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_CURRENT_H */
|
8
drivers/include/asm/delay.h
Normal file
8
drivers/include/asm/delay.h
Normal file
@ -0,0 +1,8 @@
|
||||
#ifndef _ASM_X86_DELAY_H
|
||||
#define _ASM_X86_DELAY_H
|
||||
|
||||
#include <asm-generic/delay.h>
|
||||
|
||||
void use_tsc_delay(void);
|
||||
|
||||
#endif /* _ASM_X86_DELAY_H */
|
101
drivers/include/asm/desc_defs.h
Normal file
101
drivers/include/asm/desc_defs.h
Normal file
@ -0,0 +1,101 @@
|
||||
/* Written 2000 by Andi Kleen */
|
||||
#ifndef _ASM_X86_DESC_DEFS_H
|
||||
#define _ASM_X86_DESC_DEFS_H
|
||||
|
||||
/*
|
||||
* Segment descriptor structure definitions, usable from both x86_64 and i386
|
||||
* archs.
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* FIXME: Accessing the desc_struct through its fields is more elegant,
|
||||
* and should be the one valid thing to do. However, a lot of open code
|
||||
* still touches the a and b accessors, and doing this allow us to do it
|
||||
* incrementally. We keep the signature as a struct, rather than an union,
|
||||
* so we can get rid of it transparently in the future -- glommer
|
||||
*/
|
||||
/* 8 byte segment descriptor */
|
||||
struct desc_struct {
|
||||
union {
|
||||
struct {
|
||||
unsigned int a;
|
||||
unsigned int b;
|
||||
};
|
||||
struct {
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
|
||||
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
|
||||
};
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
#define GDT_ENTRY_INIT(flags, base, limit) { { { \
|
||||
.a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
|
||||
.b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
|
||||
((limit) & 0xf0000) | ((base) & 0xff000000), \
|
||||
} } }
|
||||
|
||||
enum {
|
||||
GATE_INTERRUPT = 0xE,
|
||||
GATE_TRAP = 0xF,
|
||||
GATE_CALL = 0xC,
|
||||
GATE_TASK = 0x5,
|
||||
};
|
||||
|
||||
/* 16byte gate */
|
||||
struct gate_struct64 {
|
||||
u16 offset_low;
|
||||
u16 segment;
|
||||
unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
|
||||
u16 offset_middle;
|
||||
u32 offset_high;
|
||||
u32 zero1;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
|
||||
#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
|
||||
#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
|
||||
|
||||
enum {
|
||||
DESC_TSS = 0x9,
|
||||
DESC_LDT = 0x2,
|
||||
DESCTYPE_S = 0x10, /* !system */
|
||||
};
|
||||
|
||||
/* LDT or TSS descriptor in the GDT. 16 bytes. */
|
||||
struct ldttss_desc64 {
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
unsigned base1 : 8, type : 5, dpl : 2, p : 1;
|
||||
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
|
||||
u32 base3;
|
||||
u32 zero1;
|
||||
} __attribute__((packed));
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
typedef struct gate_struct64 gate_desc;
|
||||
typedef struct ldttss_desc64 ldt_desc;
|
||||
typedef struct ldttss_desc64 tss_desc;
|
||||
#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
|
||||
#define gate_segment(g) ((g).segment)
|
||||
#else
|
||||
typedef struct desc_struct gate_desc;
|
||||
typedef struct desc_struct ldt_desc;
|
||||
typedef struct desc_struct tss_desc;
|
||||
#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
|
||||
#define gate_segment(g) ((g).a >> 16)
|
||||
#endif
|
||||
|
||||
struct desc_ptr {
|
||||
unsigned short size;
|
||||
unsigned long address;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_DESC_DEFS_H */
|
45
drivers/include/asm/disabled-features.h
Normal file
45
drivers/include/asm/disabled-features.h
Normal file
@ -0,0 +1,45 @@
|
||||
#ifndef _ASM_X86_DISABLED_FEATURES_H
|
||||
#define _ASM_X86_DISABLED_FEATURES_H
|
||||
|
||||
/* These features, although they might be available in a CPU
|
||||
* will not be used because the compile options to support
|
||||
* them are not present.
|
||||
*
|
||||
* This code allows them to be checked and disabled at
|
||||
* compile time without an explicit #ifdef. Use
|
||||
* cpu_feature_enabled().
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MPX
|
||||
# define DISABLE_MPX 0
|
||||
#else
|
||||
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
|
||||
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
|
||||
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
|
||||
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
|
||||
#else
|
||||
# define DISABLE_VME 0
|
||||
# define DISABLE_K6_MTRR 0
|
||||
# define DISABLE_CYRIX_ARR 0
|
||||
# define DISABLE_CENTAUR_MCR 0
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
* Make sure to add features to the correct mask
|
||||
*/
|
||||
#define DISABLED_MASK0 (DISABLE_VME)
|
||||
#define DISABLED_MASK1 0
|
||||
#define DISABLED_MASK2 0
|
||||
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
|
||||
#define DISABLED_MASK4 0
|
||||
#define DISABLED_MASK5 0
|
||||
#define DISABLED_MASK6 0
|
||||
#define DISABLED_MASK7 0
|
||||
#define DISABLED_MASK8 0
|
||||
#define DISABLED_MASK9 (DISABLE_MPX)
|
||||
|
||||
#endif /* _ASM_X86_DISABLED_FEATURES_H */
|
77
drivers/include/asm/e820.h
Normal file
77
drivers/include/asm/e820.h
Normal file
@ -0,0 +1,77 @@
|
||||
#ifndef _ASM_X86_E820_H
|
||||
#define _ASM_X86_E820_H
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
#include <linux/numa.h>
|
||||
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
|
||||
#else /* ! CONFIG_EFI */
|
||||
#define E820_X_MAX E820MAX
|
||||
#endif
|
||||
#include <uapi/asm/e820.h>
|
||||
#ifndef __ASSEMBLY__
|
||||
/* see comment in arch/x86/kernel/e820.c */
|
||||
extern struct e820map e820;
|
||||
extern struct e820map e820_saved;
|
||||
|
||||
extern unsigned long pci_mem_start;
|
||||
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
|
||||
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
|
||||
extern void e820_add_region(u64 start, u64 size, int type);
|
||||
extern void e820_print_map(char *who);
|
||||
extern int
|
||||
sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
|
||||
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
|
||||
unsigned new_type);
|
||||
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
|
||||
int checktype);
|
||||
extern void update_e820(void);
|
||||
extern void e820_setup_gap(void);
|
||||
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
|
||||
unsigned long start_addr, unsigned long long end_addr);
|
||||
struct setup_data;
|
||||
extern void parse_e820_ext(u64 phys_addr, u32 data_len);
|
||||
|
||||
#if defined(CONFIG_X86_64) || \
|
||||
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
|
||||
extern void e820_mark_nosave_regions(unsigned long limit_pfn);
|
||||
#else
|
||||
static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMTEST
|
||||
extern void early_memtest(unsigned long start, unsigned long end);
|
||||
#else
|
||||
static inline void early_memtest(unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long e820_end_of_ram_pfn(void);
|
||||
extern unsigned long e820_end_of_low_ram_pfn(void);
|
||||
extern u64 early_reserve_e820(u64 sizet, u64 align);
|
||||
|
||||
void memblock_x86_fill(void);
|
||||
void memblock_find_dma_reserve(void);
|
||||
|
||||
extern void finish_e820_parsing(void);
|
||||
extern void e820_reserve_resources(void);
|
||||
extern void e820_reserve_resources_late(void);
|
||||
extern void setup_memory_map(void);
|
||||
extern char *default_machine_specific_memory_setup(void);
|
||||
|
||||
/*
|
||||
* Returns true iff the specified range [s,e) is completely contained inside
|
||||
* the ISA region.
|
||||
*/
|
||||
static inline bool is_ISA_range(u64 s, u64 e)
|
||||
{
|
||||
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#include <linux/ioport.h>
|
||||
|
||||
#define HIGH_MEMORY (1024*1024)
|
||||
#endif /* _ASM_X86_E820_H */
|
209
drivers/include/asm/irqflags.h
Normal file
209
drivers/include/asm/irqflags.h
Normal file
@ -0,0 +1,209 @@
|
||||
#ifndef _X86_IRQFLAGS_H_
|
||||
#define _X86_IRQFLAGS_H_
|
||||
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* Interrupt control:
|
||||
*/
|
||||
|
||||
static inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* "=rm" is safe here, because "pop" adjusts the stack before
|
||||
* it evaluates its effective address -- this is part of the
|
||||
* documented behavior of the "pop" instruction.
|
||||
*/
|
||||
asm volatile("# __raw_save_flags\n\t"
|
||||
"pushf ; pop %0"
|
||||
: "=rm" (flags)
|
||||
: /* no input */
|
||||
: "memory");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void native_restore_fl(unsigned long flags)
|
||||
{
|
||||
asm volatile("push %0 ; popf"
|
||||
: /* no output */
|
||||
:"g" (flags)
|
||||
:"memory", "cc");
|
||||
}
|
||||
|
||||
static inline void native_irq_disable(void)
|
||||
{
|
||||
asm volatile("cli": : :"memory");
|
||||
}
|
||||
|
||||
static inline void native_irq_enable(void)
|
||||
{
|
||||
asm volatile("sti": : :"memory");
|
||||
}
|
||||
|
||||
static inline void native_safe_halt(void)
|
||||
{
|
||||
asm volatile("sti; hlt": : :"memory");
|
||||
}
|
||||
|
||||
static inline void native_halt(void)
|
||||
{
|
||||
asm volatile("hlt": : :"memory");
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
|
||||
static inline notrace unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return native_save_fl();
|
||||
}
|
||||
|
||||
static inline notrace void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
native_restore_fl(flags);
|
||||
}
|
||||
|
||||
static inline notrace void arch_local_irq_disable(void)
|
||||
{
|
||||
native_irq_disable();
|
||||
}
|
||||
|
||||
static inline notrace void arch_local_irq_enable(void)
|
||||
{
|
||||
native_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Used in the idle loop; sti takes one instruction cycle
|
||||
* to complete:
|
||||
*/
|
||||
static inline void arch_safe_halt(void)
|
||||
{
|
||||
native_safe_halt();
|
||||
}
|
||||
|
||||
/*
|
||||
* Used when interrupts are already enabled or to
|
||||
* shutdown the processor:
|
||||
*/
|
||||
static inline void halt(void)
|
||||
{
|
||||
native_halt();
|
||||
}
|
||||
|
||||
/*
|
||||
* For spinlocks, etc:
|
||||
*/
|
||||
static inline notrace unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long flags = arch_local_save_flags();
|
||||
arch_local_irq_disable();
|
||||
return flags;
|
||||
}
|
||||
#else
|
||||
|
||||
#define ENABLE_INTERRUPTS(x) sti
|
||||
#define DISABLE_INTERRUPTS(x) cli
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define SWAPGS swapgs
|
||||
/*
|
||||
* Currently paravirt can't handle swapgs nicely when we
|
||||
* don't have a stack we can rely on (such as a user space
|
||||
* stack). So we either find a way around these or just fault
|
||||
* and emulate if a guest tries to call swapgs directly.
|
||||
*
|
||||
* Either way, this is a good way to document that we don't
|
||||
* have a reliable stack. x86_64 only.
|
||||
*/
|
||||
#define SWAPGS_UNSAFE_STACK swapgs
|
||||
|
||||
#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
|
||||
|
||||
#define INTERRUPT_RETURN jmp native_iret
|
||||
#define USERGS_SYSRET64 \
|
||||
swapgs; \
|
||||
sysretq;
|
||||
#define USERGS_SYSRET32 \
|
||||
swapgs; \
|
||||
sysretl
|
||||
#define ENABLE_INTERRUPTS_SYSEXIT32 \
|
||||
swapgs; \
|
||||
sti; \
|
||||
sysexit
|
||||
|
||||
#else
|
||||
#define INTERRUPT_RETURN iret
|
||||
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
|
||||
#define GET_CR0_INTO_EAX movl %cr0, %eax
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return !(flags & X86_EFLAGS_IF);
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
unsigned long flags = arch_local_save_flags();
|
||||
|
||||
return arch_irqs_disabled_flags(flags);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
|
||||
#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
|
||||
TRACE_IRQS_ON; \
|
||||
sti; \
|
||||
SAVE_REST; \
|
||||
LOCKDEP_SYS_EXIT; \
|
||||
RESTORE_REST; \
|
||||
cli; \
|
||||
TRACE_IRQS_OFF;
|
||||
|
||||
#else
|
||||
#define ARCH_LOCKDEP_SYS_EXIT \
|
||||
pushl %eax; \
|
||||
pushl %ecx; \
|
||||
pushl %edx; \
|
||||
call lockdep_sys_exit; \
|
||||
popl %edx; \
|
||||
popl %ecx; \
|
||||
popl %eax;
|
||||
|
||||
#define ARCH_LOCKDEP_SYS_EXIT_IRQ
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
|
||||
# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
|
||||
#else
|
||||
# define TRACE_IRQS_ON
|
||||
# define TRACE_IRQS_OFF
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
|
||||
# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
|
||||
# else
|
||||
# define LOCKDEP_SYS_EXIT
|
||||
# define LOCKDEP_SYS_EXIT_IRQ
|
||||
# endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif
|
61
drivers/include/asm/linkage.h
Normal file
61
drivers/include/asm/linkage.h
Normal file
@ -0,0 +1,61 @@
|
||||
#ifndef _ASM_X86_LINKAGE_H
|
||||
#define _ASM_X86_LINKAGE_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#undef notrace
|
||||
#define notrace __attribute__((no_instrument_function))
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
|
||||
|
||||
/*
|
||||
* Make sure the compiler doesn't do anything stupid with the
|
||||
* arguments on the stack - they are owned by the *caller*, not
|
||||
* the callee. This just fools gcc into not spilling into them,
|
||||
* and keeps it from doing tailcall recursion and/or using the
|
||||
* stack slots for temporaries, since they are live and "used"
|
||||
* all the way to the end of the function.
|
||||
*
|
||||
* NOTE! On x86-64, all the arguments are in registers, so this
|
||||
* only matters on a 32-bit kernel.
|
||||
*/
|
||||
#define asmlinkage_protect(n, ret, args...) \
|
||||
__asmlinkage_protect##n(ret, ##args)
|
||||
#define __asmlinkage_protect_n(ret, args...) \
|
||||
__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
|
||||
#define __asmlinkage_protect0(ret) \
|
||||
__asmlinkage_protect_n(ret)
|
||||
#define __asmlinkage_protect1(ret, arg1) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1))
|
||||
#define __asmlinkage_protect2(ret, arg1, arg2) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
|
||||
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
|
||||
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4))
|
||||
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4), "m" (arg5))
|
||||
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
|
||||
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
||||
"m" (arg4), "m" (arg5), "m" (arg6))
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
|
||||
#define __ALIGN .p2align 4, 0x90
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_LINKAGE_H */
|
||||
|
18
drivers/include/asm/math_emu.h
Normal file
18
drivers/include/asm/math_emu.h
Normal file
@ -0,0 +1,18 @@
|
||||
#ifndef _ASM_X86_MATH_EMU_H
|
||||
#define _ASM_X86_MATH_EMU_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/vm86.h>
|
||||
|
||||
/* This structure matches the layout of the data saved to the stack
|
||||
following a device-not-present interrupt, part of it saved
|
||||
automatically by the 80386/80486.
|
||||
*/
|
||||
struct math_emu_info {
|
||||
long ___orig_eip;
|
||||
union {
|
||||
struct pt_regs *regs;
|
||||
struct kernel_vm86_regs *vm86;
|
||||
};
|
||||
};
|
||||
#endif /* _ASM_X86_MATH_EMU_H */
|
291
drivers/include/asm/msr.h
Normal file
291
drivers/include/asm/msr.h
Normal file
@ -0,0 +1,291 @@
|
||||
#ifndef _ASM_X86_MSR_H
|
||||
#define _ASM_X86_MSR_H
|
||||
|
||||
#include <uapi/asm/msr.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/cpumask.h>
|
||||
|
||||
struct msr {
|
||||
union {
|
||||
struct {
|
||||
u32 l;
|
||||
u32 h;
|
||||
};
|
||||
u64 q;
|
||||
};
|
||||
};
|
||||
|
||||
struct msr_info {
|
||||
u32 msr_no;
|
||||
struct msr reg;
|
||||
struct msr *msrs;
|
||||
int err;
|
||||
};
|
||||
|
||||
struct msr_regs_info {
|
||||
u32 *regs;
|
||||
int err;
|
||||
};
|
||||
|
||||
static inline unsigned long long native_read_tscp(unsigned int *aux)
|
||||
{
|
||||
unsigned long low, high;
|
||||
asm volatile(".byte 0x0f,0x01,0xf9"
|
||||
: "=a" (low), "=d" (high), "=c" (*aux));
|
||||
return low | ((u64)high << 32);
|
||||
}
|
||||
|
||||
/*
|
||||
* both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
|
||||
* constraint has different meanings. For i386, "A" means exactly
|
||||
* edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
|
||||
* it means rax *or* rdx.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
#define DECLARE_ARGS(val, low, high) unsigned low, high
|
||||
#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32))
|
||||
#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
|
||||
#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
|
||||
#else
|
||||
#define DECLARE_ARGS(val, low, high) unsigned long long val
|
||||
#define EAX_EDX_VAL(val, low, high) (val)
|
||||
#define EAX_EDX_ARGS(val, low, high) "A" (val)
|
||||
#define EAX_EDX_RET(val, low, high) "=A" (val)
|
||||
#endif
|
||||
|
||||
static inline unsigned long long native_read_msr(unsigned int msr)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("2: rdmsr ; xor %[err],%[err]\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: mov %[fault],%[err] ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), [fault] "i" (-EIO));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline void native_write_msr(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
|
||||
}
|
||||
|
||||
/* Can be uninlined because referenced by paravirt */
|
||||
notrace static inline int native_write_msr_safe(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
int err;
|
||||
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: mov %[fault],%[err] ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: [err] "=a" (err)
|
||||
: "c" (msr), "0" (low), "d" (high),
|
||||
[fault] "i" (-EIO)
|
||||
: "memory");
|
||||
return err;
|
||||
}
|
||||
|
||||
extern unsigned long long native_read_tsc(void);
|
||||
|
||||
extern int rdmsr_safe_regs(u32 regs[8]);
|
||||
extern int wrmsr_safe_regs(u32 regs[8]);
|
||||
|
||||
static __always_inline unsigned long long __native_read_tsc(void)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
|
||||
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_pmc(int counter)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#include <linux/errno.h>
|
||||
/*
|
||||
* Access to machine-specific registers (available on 586 and better only)
|
||||
* Note: the rd* operations modify the parameters directly (without using
|
||||
* pointer indirection), this allows gcc to optimize better
|
||||
*/
|
||||
|
||||
#define rdmsr(msr, low, high) \
|
||||
do { \
|
||||
u64 __val = native_read_msr((msr)); \
|
||||
(void)((low) = (u32)__val); \
|
||||
(void)((high) = (u32)(__val >> 32)); \
|
||||
} while (0)
|
||||
|
||||
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
native_write_msr(msr, low, high);
|
||||
}
|
||||
|
||||
#define rdmsrl(msr, val) \
|
||||
((val) = native_read_msr((msr)))
|
||||
|
||||
#define wrmsrl(msr, val) \
|
||||
native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
|
||||
|
||||
/* wrmsr with exception handling */
|
||||
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
return native_write_msr_safe(msr, low, high);
|
||||
}
|
||||
|
||||
/* rdmsr with exception handling */
|
||||
#define rdmsr_safe(msr, low, high) \
|
||||
({ \
|
||||
int __err; \
|
||||
u64 __val = native_read_msr_safe((msr), &__err); \
|
||||
(*low) = (u32)__val; \
|
||||
(*high) = (u32)(__val >> 32); \
|
||||
__err; \
|
||||
})
|
||||
|
||||
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = native_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)__native_read_tsc())
|
||||
|
||||
#define rdtscll(val) \
|
||||
((val) = __native_read_tsc())
|
||||
|
||||
#define rdpmc(counter, low, high) \
|
||||
do { \
|
||||
u64 _l = native_read_pmc((counter)); \
|
||||
(low) = (u32)_l; \
|
||||
(high) = (u32)(_l >> 32); \
|
||||
} while (0)
|
||||
|
||||
#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
|
||||
|
||||
#define rdtscp(low, high, aux) \
|
||||
do { \
|
||||
unsigned long long _val = native_read_tscp(&(aux)); \
|
||||
(low) = (u32)_val; \
|
||||
(high) = (u32)(_val >> 32); \
|
||||
} while (0)
|
||||
|
||||
#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
|
||||
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
|
||||
(u32)((val) >> 32))
|
||||
|
||||
#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
|
||||
|
||||
#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
|
||||
|
||||
struct msr *msrs_alloc(void);
|
||||
void msrs_free(struct msr *msrs);
|
||||
int msr_set_bit(u32 msr, u8 bit);
|
||||
int msr_clear_bit(u32 msr, u8 bit);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
|
||||
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
|
||||
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
|
||||
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
|
||||
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
|
||||
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
|
||||
int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
|
||||
#else /* CONFIG_SMP */
|
||||
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
|
||||
{
|
||||
rdmsr(msr_no, *l, *h);
|
||||
return 0;
|
||||
}
|
||||
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
wrmsr(msr_no, l, h);
|
||||
return 0;
|
||||
}
|
||||
static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
rdmsrl(msr_no, *q);
|
||||
return 0;
|
||||
}
|
||||
static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
{
|
||||
wrmsrl(msr_no, q);
|
||||
return 0;
|
||||
}
|
||||
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||
struct msr *msrs)
|
||||
{
|
||||
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
|
||||
}
|
||||
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
|
||||
struct msr *msrs)
|
||||
{
|
||||
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
|
||||
}
|
||||
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
|
||||
u32 *l, u32 *h)
|
||||
{
|
||||
return rdmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
|
||||
{
|
||||
return wrmsr_safe(msr_no, l, h);
|
||||
}
|
||||
static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
|
||||
{
|
||||
return rdmsrl_safe(msr_no, q);
|
||||
}
|
||||
static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
|
||||
{
|
||||
return wrmsrl_safe(msr_no, q);
|
||||
}
|
||||
static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return rdmsr_safe_regs(regs);
|
||||
}
|
||||
static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
return wrmsr_safe_regs(regs);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_X86_MSR_H */
|
146
drivers/include/asm/nops.h
Normal file
146
drivers/include/asm/nops.h
Normal file
@ -0,0 +1,146 @@
|
||||
#ifndef _ASM_X86_NOPS_H
|
||||
#define _ASM_X86_NOPS_H
|
||||
|
||||
/*
|
||||
* Define nops for use with alternative() and for tracing.
|
||||
*
|
||||
* *_NOP5_ATOMIC must be a single instruction.
|
||||
*/
|
||||
|
||||
#define NOP_DS_PREFIX 0x3e
|
||||
|
||||
/* generic versions from gas
|
||||
1: nop
|
||||
the following instructions are NOT nops in 64-bit mode,
|
||||
for 64-bit mode use K8 or P6 nops instead
|
||||
2: movl %esi,%esi
|
||||
3: leal 0x00(%esi),%esi
|
||||
4: leal 0x00(,%esi,1),%esi
|
||||
6: leal 0x00000000(%esi),%esi
|
||||
7: leal 0x00000000(,%esi,1),%esi
|
||||
*/
|
||||
#define GENERIC_NOP1 0x90
|
||||
#define GENERIC_NOP2 0x89,0xf6
|
||||
#define GENERIC_NOP3 0x8d,0x76,0x00
|
||||
#define GENERIC_NOP4 0x8d,0x74,0x26,0x00
|
||||
#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4
|
||||
#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
|
||||
#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
|
||||
#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7
|
||||
#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4
|
||||
|
||||
/* Opteron 64bit nops
|
||||
1: nop
|
||||
2: osp nop
|
||||
3: osp osp nop
|
||||
4: osp osp osp nop
|
||||
*/
|
||||
#define K8_NOP1 GENERIC_NOP1
|
||||
#define K8_NOP2 0x66,K8_NOP1
|
||||
#define K8_NOP3 0x66,K8_NOP2
|
||||
#define K8_NOP4 0x66,K8_NOP3
|
||||
#define K8_NOP5 K8_NOP3,K8_NOP2
|
||||
#define K8_NOP6 K8_NOP3,K8_NOP3
|
||||
#define K8_NOP7 K8_NOP4,K8_NOP3
|
||||
#define K8_NOP8 K8_NOP4,K8_NOP4
|
||||
#define K8_NOP5_ATOMIC 0x66,K8_NOP4
|
||||
|
||||
/* K7 nops
|
||||
uses eax dependencies (arbitrary choice)
|
||||
1: nop
|
||||
2: movl %eax,%eax
|
||||
3: leal (,%eax,1),%eax
|
||||
4: leal 0x00(,%eax,1),%eax
|
||||
6: leal 0x00000000(%eax),%eax
|
||||
7: leal 0x00000000(,%eax,1),%eax
|
||||
*/
|
||||
#define K7_NOP1 GENERIC_NOP1
|
||||
#define K7_NOP2 0x8b,0xc0
|
||||
#define K7_NOP3 0x8d,0x04,0x20
|
||||
#define K7_NOP4 0x8d,0x44,0x20,0x00
|
||||
#define K7_NOP5 K7_NOP4,K7_NOP1
|
||||
#define K7_NOP6 0x8d,0x80,0,0,0,0
|
||||
#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0
|
||||
#define K7_NOP8 K7_NOP7,K7_NOP1
|
||||
#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4
|
||||
|
||||
/* P6 nops
|
||||
uses eax dependencies (Intel-recommended choice)
|
||||
1: nop
|
||||
2: osp nop
|
||||
3: nopl (%eax)
|
||||
4: nopl 0x00(%eax)
|
||||
5: nopl 0x00(%eax,%eax,1)
|
||||
6: osp nopl 0x00(%eax,%eax,1)
|
||||
7: nopl 0x00000000(%eax)
|
||||
8: nopl 0x00000000(%eax,%eax,1)
|
||||
Note: All the above are assumed to be a single instruction.
|
||||
There is kernel code that depends on this.
|
||||
*/
|
||||
#define P6_NOP1 GENERIC_NOP1
|
||||
#define P6_NOP2 0x66,0x90
|
||||
#define P6_NOP3 0x0f,0x1f,0x00
|
||||
#define P6_NOP4 0x0f,0x1f,0x40,0
|
||||
#define P6_NOP5 0x0f,0x1f,0x44,0x00,0
|
||||
#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0
|
||||
#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0
|
||||
#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
|
||||
#define P6_NOP5_ATOMIC P6_NOP5
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#define _ASM_MK_NOP(x) .byte x
|
||||
#else
|
||||
#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MK7)
|
||||
#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
|
||||
#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2)
|
||||
#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3)
|
||||
#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4)
|
||||
#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5)
|
||||
#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6)
|
||||
#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7)
|
||||
#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8)
|
||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC)
|
||||
#elif defined(CONFIG_X86_P6_NOP)
|
||||
#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1)
|
||||
#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2)
|
||||
#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3)
|
||||
#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4)
|
||||
#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5)
|
||||
#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6)
|
||||
#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7)
|
||||
#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
|
||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC)
|
||||
#elif defined(CONFIG_X86_64)
|
||||
#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1)
|
||||
#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2)
|
||||
#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3)
|
||||
#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4)
|
||||
#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5)
|
||||
#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6)
|
||||
#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
|
||||
#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
|
||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC)
|
||||
#else
|
||||
#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1)
|
||||
#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2)
|
||||
#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3)
|
||||
#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4)
|
||||
#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5)
|
||||
#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6)
|
||||
#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7)
|
||||
#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8)
|
||||
#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC)
|
||||
#endif
|
||||
|
||||
#define ASM_NOP_MAX 8
|
||||
#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern const unsigned char * const *ideal_nops;
|
||||
extern void arch_init_ideal_nops(void);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NOPS_H */
|
76
drivers/include/asm/page.h
Normal file
76
drivers/include/asm/page.h
Normal file
@ -0,0 +1,76 @@
|
||||
#ifndef _ASM_X86_PAGE_H
|
||||
#define _ASM_X86_PAGE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/page_types.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <asm/page_64.h>
|
||||
#else
|
||||
#include <asm/page_32.h>
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct page;
|
||||
|
||||
#include <linux/range.h>
|
||||
extern struct range pfn_mapped[];
|
||||
extern int nr_pfn_mapped;
|
||||
|
||||
static inline void clear_user_page(void *page, unsigned long vaddr,
|
||||
struct page *pg)
|
||||
{
|
||||
clear_page(page);
|
||||
}
|
||||
|
||||
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
|
||||
struct page *topage)
|
||||
{
|
||||
copy_page(to, from);
|
||||
}
|
||||
|
||||
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
|
||||
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
|
||||
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
||||
|
||||
#define __pa(x) __phys_addr((unsigned long)(x))
|
||||
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
|
||||
/* __pa_symbol should be used for C visible symbols.
|
||||
This seems to be the official gcc blessed way to do such arithmetic. */
|
||||
/*
|
||||
* We need __phys_reloc_hide() here because gcc may assume that there is no
|
||||
* overflow during __pa() calculation and can optimize it unexpectedly.
|
||||
* Newer versions of gcc provide -fno-strict-overflow switch to handle this
|
||||
* case properly. Once all supported versions of gcc understand it, we can
|
||||
* remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
|
||||
*/
|
||||
#define __pa_symbol(x) \
|
||||
__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
|
||||
|
||||
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
|
||||
|
||||
#define __boot_va(x) __va(x)
|
||||
#define __boot_pa(x) __pa(x)
|
||||
|
||||
/*
|
||||
* virt_to_page(kaddr) returns a valid pointer if and only if
|
||||
* virt_addr_valid(kaddr) returns true.
|
||||
*/
|
||||
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
||||
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
||||
extern bool __virt_addr_valid(unsigned long kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
#include <asm-generic/getorder.h>
|
||||
|
||||
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_X86_PAGE_H */
|
48
drivers/include/asm/page_32.h
Normal file
48
drivers/include/asm/page_32.h
Normal file
@ -0,0 +1,48 @@
|
||||
#ifndef _ASM_X86_PAGE_32_H
|
||||
#define _ASM_X86_PAGE_32_H
|
||||
|
||||
#include <asm/page_32_types.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
|
||||
#ifdef CONFIG_DEBUG_VIRTUAL
|
||||
extern unsigned long __phys_addr(unsigned long);
|
||||
#else
|
||||
#define __phys_addr(x) __phys_addr_nodebug(x)
|
||||
#endif
|
||||
#define __phys_addr_symbol(x) __phys_addr(x)
|
||||
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define pfn_valid(pfn) ((pfn) < max_mapnr)
|
||||
#endif /* CONFIG_FLATMEM */
|
||||
|
||||
#ifdef CONFIG_X86_USE_3DNOW
|
||||
#include <asm/mmx.h>
|
||||
|
||||
static inline void clear_page(void *page)
|
||||
{
|
||||
mmx_clear_page(page);
|
||||
}
|
||||
|
||||
static inline void copy_page(void *to, void *from)
|
||||
{
|
||||
mmx_copy_page(to, from);
|
||||
}
|
||||
#else /* !CONFIG_X86_USE_3DNOW */
|
||||
#include <linux/string.h>
|
||||
|
||||
static inline void clear_page(void *page)
|
||||
{
|
||||
memset(page, 0, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline void copy_page(void *to, void *from)
|
||||
{
|
||||
memcpy(to, from, PAGE_SIZE);
|
||||
}
|
||||
#endif /* CONFIG_X86_3DNOW */
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PAGE_32_H */
|
620
drivers/include/asm/percpu.h
Normal file
620
drivers/include/asm/percpu.h
Normal file
@ -0,0 +1,620 @@
|
||||
#ifndef _ASM_X86_PERCPU_H
|
||||
#define _ASM_X86_PERCPU_H
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __percpu_seg gs
|
||||
#define __percpu_mov_op movq
|
||||
#else
|
||||
#define __percpu_seg fs
|
||||
#define __percpu_mov_op movl
|
||||
#endif
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* PER_CPU finds an address of a per-cpu variable.
|
||||
*
|
||||
* Args:
|
||||
* var - variable name
|
||||
* reg - 32bit register
|
||||
*
|
||||
* The resulting address is stored in the "reg" argument.
|
||||
*
|
||||
* Example:
|
||||
* PER_CPU(cpu_gdt_descr, %ebx)
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
#define PER_CPU(var, reg) \
|
||||
__percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
|
||||
lea var(reg), reg
|
||||
#define PER_CPU_VAR(var) %__percpu_seg:var
|
||||
#else /* ! SMP */
|
||||
#define PER_CPU(var, reg) __percpu_mov_op $var, reg
|
||||
#define PER_CPU_VAR(var) var
|
||||
#endif /* SMP */
|
||||
|
||||
#ifdef CONFIG_X86_64_SMP
|
||||
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
|
||||
#else
|
||||
#define INIT_PER_CPU_VAR(var) var
|
||||
#endif
|
||||
|
||||
#else /* ...!ASSEMBLY */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
|
||||
#define __my_cpu_offset this_cpu_read(this_cpu_off)
|
||||
|
||||
/*
|
||||
* Compared to the generic __my_cpu_offset version, the following
|
||||
* saves one instruction and avoids clobbering a temp register.
|
||||
*/
|
||||
#define arch_raw_cpu_ptr(ptr) \
|
||||
({ \
|
||||
unsigned long tcp_ptr__; \
|
||||
asm volatile("add " __percpu_arg(1) ", %0" \
|
||||
: "=r" (tcp_ptr__) \
|
||||
: "m" (this_cpu_off), "0" (ptr)); \
|
||||
(typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
|
||||
})
|
||||
#else
|
||||
#define __percpu_prefix ""
|
||||
#endif
|
||||
|
||||
#define __percpu_arg(x) __percpu_prefix "%" #x
|
||||
|
||||
/*
|
||||
* Initialized pointers to per-cpu variables needed for the boot
|
||||
* processor need to use these macros to get the proper address
|
||||
* offset from __per_cpu_load on SMP.
|
||||
*
|
||||
* There also must be an entry in vmlinux_64.lds.S
|
||||
*/
|
||||
#define DECLARE_INIT_PER_CPU(var) \
|
||||
extern typeof(var) init_per_cpu_var(var)
|
||||
|
||||
#ifdef CONFIG_X86_64_SMP
|
||||
#define init_per_cpu_var(var) init_per_cpu__##var
|
||||
#else
|
||||
#define init_per_cpu_var(var) var
|
||||
#endif
|
||||
|
||||
/* For arch-specific code, we can use direct single-insn ops (they
|
||||
* don't give an lvalue though). */
|
||||
extern void __bad_percpu_size(void);
|
||||
|
||||
#define percpu_to_op(op, var, val) \
|
||||
do { \
|
||||
typedef typeof(var) pto_T__; \
|
||||
if (0) { \
|
||||
pto_T__ pto_tmp__; \
|
||||
pto_tmp__ = (val); \
|
||||
(void)pto_tmp__; \
|
||||
} \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "qi" ((pto_T__)(val))); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "ri" ((pto_T__)(val))); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "ri" ((pto_T__)(val))); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q %1,"__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "re" ((pto_T__)(val))); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Generate a percpu add to memory instruction and optimize code
|
||||
* if one is added or subtracted.
|
||||
*/
|
||||
#define percpu_add_op(var, val) \
|
||||
do { \
|
||||
typedef typeof(var) pao_T__; \
|
||||
const int pao_ID__ = (__builtin_constant_p(val) && \
|
||||
((val) == 1 || (val) == -1)) ? \
|
||||
(int)(val) : 0; \
|
||||
if (0) { \
|
||||
pao_T__ pao_tmp__; \
|
||||
pao_tmp__ = (val); \
|
||||
(void)pao_tmp__; \
|
||||
} \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
if (pao_ID__ == 1) \
|
||||
asm("incb "__percpu_arg(0) : "+m" (var)); \
|
||||
else if (pao_ID__ == -1) \
|
||||
asm("decb "__percpu_arg(0) : "+m" (var)); \
|
||||
else \
|
||||
asm("addb %1, "__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "qi" ((pao_T__)(val))); \
|
||||
break; \
|
||||
case 2: \
|
||||
if (pao_ID__ == 1) \
|
||||
asm("incw "__percpu_arg(0) : "+m" (var)); \
|
||||
else if (pao_ID__ == -1) \
|
||||
asm("decw "__percpu_arg(0) : "+m" (var)); \
|
||||
else \
|
||||
asm("addw %1, "__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "ri" ((pao_T__)(val))); \
|
||||
break; \
|
||||
case 4: \
|
||||
if (pao_ID__ == 1) \
|
||||
asm("incl "__percpu_arg(0) : "+m" (var)); \
|
||||
else if (pao_ID__ == -1) \
|
||||
asm("decl "__percpu_arg(0) : "+m" (var)); \
|
||||
else \
|
||||
asm("addl %1, "__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "ri" ((pao_T__)(val))); \
|
||||
break; \
|
||||
case 8: \
|
||||
if (pao_ID__ == 1) \
|
||||
asm("incq "__percpu_arg(0) : "+m" (var)); \
|
||||
else if (pao_ID__ == -1) \
|
||||
asm("decq "__percpu_arg(0) : "+m" (var)); \
|
||||
else \
|
||||
asm("addq %1, "__percpu_arg(0) \
|
||||
: "+m" (var) \
|
||||
: "re" ((pao_T__)(val))); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define percpu_from_op(op, var) \
|
||||
({ \
|
||||
typeof(var) pfo_ret__; \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b "__percpu_arg(1)",%0" \
|
||||
: "=q" (pfo_ret__) \
|
||||
: "m" (var)); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w "__percpu_arg(1)",%0" \
|
||||
: "=r" (pfo_ret__) \
|
||||
: "m" (var)); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l "__percpu_arg(1)",%0" \
|
||||
: "=r" (pfo_ret__) \
|
||||
: "m" (var)); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q "__percpu_arg(1)",%0" \
|
||||
: "=r" (pfo_ret__) \
|
||||
: "m" (var)); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
pfo_ret__; \
|
||||
})
|
||||
|
||||
#define percpu_stable_op(op, var) \
|
||||
({ \
|
||||
typeof(var) pfo_ret__; \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b "__percpu_arg(P1)",%0" \
|
||||
: "=q" (pfo_ret__) \
|
||||
: "p" (&(var))); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w "__percpu_arg(P1)",%0" \
|
||||
: "=r" (pfo_ret__) \
|
||||
: "p" (&(var))); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l "__percpu_arg(P1)",%0" \
|
||||
: "=r" (pfo_ret__) \
|
||||
: "p" (&(var))); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q "__percpu_arg(P1)",%0" \
|
||||
: "=r" (pfo_ret__) \
|
||||
: "p" (&(var))); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
pfo_ret__; \
|
||||
})
|
||||
|
||||
#define percpu_unary_op(op, var) \
|
||||
({ \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm(op "b "__percpu_arg(0) \
|
||||
: "+m" (var)); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm(op "w "__percpu_arg(0) \
|
||||
: "+m" (var)); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm(op "l "__percpu_arg(0) \
|
||||
: "+m" (var)); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm(op "q "__percpu_arg(0) \
|
||||
: "+m" (var)); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
})
|
||||
|
||||
/*
|
||||
* Add return operation
|
||||
*/
|
||||
#define percpu_add_return_op(var, val) \
|
||||
({ \
|
||||
typeof(var) paro_ret__ = val; \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm("xaddb %0, "__percpu_arg(1) \
|
||||
: "+q" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm("xaddw %0, "__percpu_arg(1) \
|
||||
: "+r" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm("xaddl %0, "__percpu_arg(1) \
|
||||
: "+r" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm("xaddq %0, "__percpu_arg(1) \
|
||||
: "+re" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
paro_ret__ += val; \
|
||||
paro_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* xchg is implemented using cmpxchg without a lock prefix. xchg is
|
||||
* expensive due to the implied lock prefix. The processor cannot prefetch
|
||||
* cachelines if xchg is used.
|
||||
*/
|
||||
#define percpu_xchg_op(var, nval) \
|
||||
({ \
|
||||
typeof(var) pxo_ret__; \
|
||||
typeof(var) pxo_new__ = (nval); \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm("\n\tmov "__percpu_arg(1)",%%al" \
|
||||
"\n1:\tcmpxchgb %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=&a" (pxo_ret__), "+m" (var) \
|
||||
: "q" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm("\n\tmov "__percpu_arg(1)",%%ax" \
|
||||
"\n1:\tcmpxchgw %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=&a" (pxo_ret__), "+m" (var) \
|
||||
: "r" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm("\n\tmov "__percpu_arg(1)",%%eax" \
|
||||
"\n1:\tcmpxchgl %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=&a" (pxo_ret__), "+m" (var) \
|
||||
: "r" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm("\n\tmov "__percpu_arg(1)",%%rax" \
|
||||
"\n1:\tcmpxchgq %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=&a" (pxo_ret__), "+m" (var) \
|
||||
: "r" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
pxo_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* cmpxchg has no such implied lock semantics as a result it is much
|
||||
* more efficient for cpu local operations.
|
||||
*/
|
||||
#define percpu_cmpxchg_op(var, oval, nval) \
|
||||
({ \
|
||||
typeof(var) pco_ret__; \
|
||||
typeof(var) pco_old__ = (oval); \
|
||||
typeof(var) pco_new__ = (nval); \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm("cmpxchgb %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "q" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm("cmpxchgw %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "r" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm("cmpxchgl %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "r" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm("cmpxchgq %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "r" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
pco_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* this_cpu_read() makes gcc load the percpu variable every time it is
|
||||
* accessed while this_cpu_read_stable() allows the value to be cached.
|
||||
* this_cpu_read_stable() is more efficient and can be used if its value
|
||||
* is guaranteed to be valid across cpus. The current users include
|
||||
* get_current() and get_thread_info() both of which are actually
|
||||
* per-thread variables implemented as per-cpu variables and thus
|
||||
* stable for the duration of the respective task.
|
||||
*/
|
||||
#define this_cpu_read_stable(var) percpu_stable_op("mov", var)
|
||||
|
||||
#define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp)
|
||||
#define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp)
|
||||
#define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp)
|
||||
|
||||
#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||
#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||
#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
||||
#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
|
||||
#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
|
||||
#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
|
||||
|
||||
#define this_cpu_read_1(pcp) percpu_from_op("mov", pcp)
|
||||
#define this_cpu_read_2(pcp) percpu_from_op("mov", pcp)
|
||||
#define this_cpu_read_4(pcp) percpu_from_op("mov", pcp)
|
||||
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||
#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||
#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
||||
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
|
||||
#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
bool __ret; \
|
||||
typeof(pcp1) __o1 = (o1), __n1 = (n1); \
|
||||
typeof(pcp2) __o2 = (o2), __n2 = (n2); \
|
||||
asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
|
||||
: "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
|
||||
: "b" (__n1), "c" (__n2), "a" (__o1)); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
|
||||
#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
|
||||
#endif /* CONFIG_X86_CMPXCHG64 */
|
||||
|
||||
/*
|
||||
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||
* 32 bit must fall back to generic operations.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
#define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp)
|
||||
#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||
#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#define this_cpu_read_8(pcp) percpu_from_op("mov", pcp)
|
||||
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
/*
|
||||
* Pretty complex macro to generate cmpxchg16 instruction. The instruction
|
||||
* is not supported on early AMD64 processors so we must be able to emulate
|
||||
* it in software. The address used in the cmpxchg16 instruction must be
|
||||
* aligned to a 16 byte boundary.
|
||||
*/
|
||||
#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
bool __ret; \
|
||||
typeof(pcp1) __o1 = (o1), __n1 = (n1); \
|
||||
typeof(pcp2) __o2 = (o2), __n2 = (n2); \
|
||||
alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
|
||||
"cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
|
||||
X86_FEATURE_CX16, \
|
||||
ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
|
||||
"+m" (pcp2), "+d" (__o2)), \
|
||||
"b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
|
||||
#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
|
||||
|
||||
#endif
|
||||
|
||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||
#define x86_test_and_clear_bit_percpu(bit, var) \
|
||||
({ \
|
||||
int old__; \
|
||||
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
|
||||
: "=r" (old__), "+m" (var) \
|
||||
: "dIr" (bit)); \
|
||||
old__; \
|
||||
})
|
||||
|
||||
static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
|
||||
const unsigned long __percpu *addr)
|
||||
{
|
||||
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
|
||||
#else
|
||||
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int x86_this_cpu_variable_test_bit(int nr,
|
||||
const unsigned long __percpu *addr)
|
||||
{
|
||||
int oldbit;
|
||||
|
||||
asm volatile("bt "__percpu_arg(2)",%1\n\t"
|
||||
"sbb %0,%0"
|
||||
: "=r" (oldbit)
|
||||
: "m" (*(unsigned long *)addr), "Ir" (nr));
|
||||
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
#define x86_this_cpu_test_bit(nr, addr) \
|
||||
(__builtin_constant_p((nr)) \
|
||||
? x86_this_cpu_constant_test_bit((nr), (addr)) \
|
||||
: x86_this_cpu_variable_test_bit((nr), (addr)))
|
||||
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
/* We can use this directly for local CPU (faster). */
|
||||
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*
|
||||
* Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
|
||||
* variables that are initialized and accessed before there are per_cpu
|
||||
* areas allocated.
|
||||
*/
|
||||
|
||||
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
|
||||
DEFINE_PER_CPU(_type, _name) = _initvalue; \
|
||||
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
|
||||
{ [0 ... NR_CPUS-1] = _initvalue }; \
|
||||
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
|
||||
|
||||
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
|
||||
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
|
||||
__typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
|
||||
{ [0 ... NR_CPUS-1] = _initvalue }; \
|
||||
__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
|
||||
|
||||
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
|
||||
EXPORT_PER_CPU_SYMBOL(_name)
|
||||
|
||||
#define DECLARE_EARLY_PER_CPU(_type, _name) \
|
||||
DECLARE_PER_CPU(_type, _name); \
|
||||
extern __typeof__(_type) *_name##_early_ptr; \
|
||||
extern __typeof__(_type) _name##_early_map[]
|
||||
|
||||
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
|
||||
DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
|
||||
extern __typeof__(_type) *_name##_early_ptr; \
|
||||
extern __typeof__(_type) _name##_early_map[]
|
||||
|
||||
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
|
||||
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
|
||||
#define early_per_cpu(_name, _cpu) \
|
||||
*(early_per_cpu_ptr(_name) ? \
|
||||
&early_per_cpu_ptr(_name)[_cpu] : \
|
||||
&per_cpu(_name, _cpu))
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
|
||||
DEFINE_PER_CPU(_type, _name) = _initvalue
|
||||
|
||||
#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
|
||||
DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
|
||||
|
||||
#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
|
||||
EXPORT_PER_CPU_SYMBOL(_name)
|
||||
|
||||
#define DECLARE_EARLY_PER_CPU(_type, _name) \
|
||||
DECLARE_PER_CPU(_type, _name)
|
||||
|
||||
#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
|
||||
DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
|
||||
|
||||
#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
|
||||
#define early_per_cpu_ptr(_name) NULL
|
||||
/* no early_per_cpu_map() */
|
||||
|
||||
#endif /* !CONFIG_SMP */
|
||||
|
||||
#endif /* _ASM_X86_PERCPU_H */
|
116
drivers/include/asm/pgtable-2level.h
Normal file
116
drivers/include/asm/pgtable-2level.h
Normal file
@ -0,0 +1,116 @@
|
||||
#ifndef _ASM_X86_PGTABLE_2LEVEL_H
|
||||
#define _ASM_X86_PGTABLE_2LEVEL_H
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/*
|
||||
* Certain architectures need to do special things when PTEs
|
||||
* within a page table are directly modified. Thus, the following
|
||||
* hook is made available.
|
||||
*/
|
||||
static inline void native_set_pte(pte_t *ptep , pte_t pte)
|
||||
{
|
||||
*ptep = pte;
|
||||
}
|
||||
|
||||
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
*pmdp = pmd;
|
||||
}
|
||||
|
||||
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
native_set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void native_pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
native_set_pmd(pmdp, __pmd(0));
|
||||
}
|
||||
|
||||
static inline void native_pte_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *xp)
|
||||
{
|
||||
*xp = native_make_pte(0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline pte_t native_ptep_get_and_clear(pte_t *xp)
|
||||
{
|
||||
return __pte(xchg(&xp->pte_low, 0));
|
||||
}
|
||||
#else
|
||||
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
|
||||
{
|
||||
return __pmd(xchg((pmdval_t *)xp, 0));
|
||||
}
|
||||
#else
|
||||
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
|
||||
#endif
|
||||
|
||||
/* Bit manipulation helper on pte/pgoff entry */
|
||||
static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
|
||||
unsigned long mask, unsigned int leftshift)
|
||||
{
|
||||
return ((value >> rightshift) & mask) << leftshift;
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
|
||||
* split up the 29 bits of offset into this range.
|
||||
*/
|
||||
#define PTE_FILE_MAX_BITS 29
|
||||
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
|
||||
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
|
||||
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
|
||||
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
|
||||
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
|
||||
|
||||
#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
|
||||
#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
|
||||
|
||||
#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
|
||||
#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
|
||||
|
||||
static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
|
||||
{
|
||||
return (pgoff_t)
|
||||
(pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
|
||||
pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
|
||||
pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
|
||||
}
|
||||
|
||||
static __always_inline pte_t pgoff_to_pte(pgoff_t off)
|
||||
{
|
||||
return (pte_t){
|
||||
.pte_low =
|
||||
pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
|
||||
pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
|
||||
pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
|
||||
_PAGE_FILE,
|
||||
};
|
||||
}
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
|
||||
#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
||||
|
||||
#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
|
||||
& ((1U << SWP_TYPE_BITS) - 1))
|
||||
#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
((type) << (_PAGE_BIT_PRESENT + 1)) \
|
||||
| ((offset) << SWP_OFFSET_SHIFT) })
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
|
37
drivers/include/asm/pgtable-2level_types.h
Normal file
37
drivers/include/asm/pgtable-2level_types.h
Normal file
@ -0,0 +1,37 @@
|
||||
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
|
||||
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
|
||||
typedef unsigned long pteval_t;
|
||||
typedef unsigned long pmdval_t;
|
||||
typedef unsigned long pudval_t;
|
||||
typedef unsigned long pgdval_t;
|
||||
typedef unsigned long pgprotval_t;
|
||||
|
||||
typedef union {
|
||||
pteval_t pte;
|
||||
pteval_t pte_low;
|
||||
} pte_t;
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define SHARED_KERNEL_PMD 0
|
||||
#define PAGETABLE_LEVELS 2
|
||||
|
||||
/*
|
||||
* traditional i386 two-level paging structure:
|
||||
*/
|
||||
|
||||
#define PGDIR_SHIFT 22
|
||||
#define PTRS_PER_PGD 1024
|
||||
|
||||
|
||||
/*
|
||||
* the i386 is two-level, so we don't really have any
|
||||
* PMD directory physically.
|
||||
*/
|
||||
|
||||
#define PTRS_PER_PTE 1024
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
|
905
drivers/include/asm/pgtable.h
Normal file
905
drivers/include/asm/pgtable.h
Normal file
@ -0,0 +1,905 @@
|
||||
#ifndef _ASM_X86_PGTABLE_H
|
||||
#define _ASM_X86_PGTABLE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
#include <asm/pgtable_types.h>
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as UC-
|
||||
*/
|
||||
#define pgprot_noncached(prot) \
|
||||
((boot_cpu_data.x86 > 3) \
|
||||
? (__pgprot(pgprot_val(prot) | \
|
||||
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
|
||||
: (prot))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/x86_init.h>
|
||||
|
||||
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||
__visible;
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
extern spinlock_t pgd_lock;
|
||||
extern struct list_head pgd_list;
|
||||
|
||||
extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else /* !CONFIG_PARAVIRT */
|
||||
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
|
||||
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
|
||||
#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
|
||||
|
||||
#define set_pte_atomic(ptep, pte) \
|
||||
native_set_pte_atomic(ptep, pte)
|
||||
|
||||
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
|
||||
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
|
||||
#define pgd_clear(pgd) native_pgd_clear(pgd)
|
||||
#endif
|
||||
|
||||
#ifndef set_pud
|
||||
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
||||
#endif
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
#define pud_clear(pud) native_pud_clear(pud)
|
||||
#endif
|
||||
|
||||
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
|
||||
#define pmd_clear(pmd) native_pmd_clear(pmd)
|
||||
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#define pmd_update(mm, addr, ptep) do { } while (0)
|
||||
#define pmd_update_defer(mm, addr, ptep) do { } while (0)
|
||||
|
||||
#define pgd_val(x) native_pgd_val(x)
|
||||
#define __pgd(x) native_make_pgd(x)
|
||||
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
#define pud_val(x) native_pud_val(x)
|
||||
#define __pud(x) native_make_pud(x)
|
||||
#endif
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
#define pmd_val(x) native_pmd_val(x)
|
||||
#define __pmd(x) native_make_pmd(x)
|
||||
#endif
|
||||
|
||||
#define pte_val(x) native_pte_val(x)
|
||||
#define __pte(x) native_make_pte(x)
|
||||
|
||||
#define arch_end_context_switch(prev) do {} while(0)
|
||||
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
/*
|
||||
* The following only work if pte_present() is true.
|
||||
* Undefined behaviour if not..
|
||||
*/
|
||||
static inline int pte_dirty(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_DIRTY;
|
||||
}
|
||||
|
||||
static inline int pte_young(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_ACCESSED;
|
||||
}
|
||||
|
||||
static inline int pmd_dirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_DIRTY;
|
||||
}
|
||||
|
||||
static inline int pmd_young(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_ACCESSED;
|
||||
}
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_RW;
|
||||
}
|
||||
|
||||
static inline int pte_file(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_FILE;
|
||||
}
|
||||
|
||||
static inline int pte_huge(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_PSE;
|
||||
}
|
||||
|
||||
static inline int pte_global(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_GLOBAL;
|
||||
}
|
||||
|
||||
static inline int pte_exec(pte_t pte)
|
||||
{
|
||||
return !(pte_flags(pte) & _PAGE_NX);
|
||||
}
|
||||
|
||||
static inline int pte_special(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h.
|
||||
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 ==
|
||||
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL.
|
||||
*/
|
||||
return (pte_flags(pte) & _PAGE_SPECIAL) &&
|
||||
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE));
|
||||
}
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_pfn(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
|
||||
static inline int pmd_large(pmd_t pte)
|
||||
{
|
||||
return pmd_flags(pte) & _PAGE_PSE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
static inline int pmd_trans_splitting(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) & _PAGE_SPLITTING;
|
||||
}
|
||||
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
{
|
||||
return pmd_val(pmd) & _PAGE_PSE;
|
||||
}
|
||||
|
||||
static inline int has_transparent_hugepage(void)
|
||||
{
|
||||
return cpu_has_pse;
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
|
||||
{
|
||||
pteval_t v = native_pte_val(pte);
|
||||
|
||||
return native_make_pte(v | set);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
|
||||
{
|
||||
pteval_t v = native_pte_val(pte);
|
||||
|
||||
return native_make_pte(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkclean(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkold(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkexec(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_NX);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkdirty(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkyoung(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkwrite(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkhuge(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clrhuge(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkglobal(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_GLOBAL);
|
||||
}
|
||||
|
||||
static inline pte_t pte_clrglobal(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_GLOBAL);
|
||||
}
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_SPECIAL);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
||||
{
|
||||
pmdval_t v = native_pmd_val(pmd);
|
||||
|
||||
return __pmd(v | set);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
|
||||
{
|
||||
pmdval_t v = native_pmd_val(pmd);
|
||||
|
||||
return __pmd(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkold(pmd_t pmd)
|
||||
{
|
||||
return pmd_clear_flags(pmd, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_wrprotect(pmd_t pmd)
|
||||
{
|
||||
return pmd_clear_flags(pmd, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkdirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkhuge(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkyoung(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_ACCESSED);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
return pmd_clear_flags(pmd, _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline int pte_soft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
|
||||
}
|
||||
|
||||
static inline int pmd_soft_dirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
|
||||
}
|
||||
|
||||
static inline pte_t pte_mksoft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
|
||||
{
|
||||
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline pte_t pte_file_mksoft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline int pte_file_soft_dirty(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
|
||||
|
||||
/*
|
||||
* Mask out unsupported bits in a present pgprot. Non-present pgprots
|
||||
* can use those bits for other purposes, so leave them be.
|
||||
*/
|
||||
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
|
||||
{
|
||||
pgprotval_t protval = pgprot_val(pgprot);
|
||||
|
||||
if (protval & _PAGE_PRESENT)
|
||||
protval &= __supported_pte_mask;
|
||||
|
||||
return protval;
|
||||
}
|
||||
|
||||
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
pteval_t val = pte_val(pte);
|
||||
|
||||
/*
|
||||
* Chop off the NX bit (if present), and add the NX portion of
|
||||
* the newprot (if present):
|
||||
*/
|
||||
val &= _PAGE_CHG_MASK;
|
||||
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
|
||||
|
||||
return __pte(val);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
pmdval_t val = pmd_val(pmd);
|
||||
|
||||
val &= _HPAGE_CHG_MASK;
|
||||
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
|
||||
|
||||
return __pmd(val);
|
||||
}
|
||||
|
||||
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
|
||||
#define pgprot_modify pgprot_modify
|
||||
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||
{
|
||||
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
|
||||
pgprotval_t addbits = pgprot_val(newprot);
|
||||
return __pgprot(preservebits | addbits);
|
||||
}
|
||||
|
||||
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
|
||||
|
||||
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
|
||||
|
||||
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
|
||||
enum page_cache_mode pcm,
|
||||
enum page_cache_mode new_pcm)
|
||||
{
|
||||
/*
|
||||
* PAT type is always WB for untracked ranges, so no need to check.
|
||||
*/
|
||||
if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Certain new memtypes are not allowed with certain
|
||||
* requested memtype:
|
||||
* - request is uncached, return cannot be write-back
|
||||
* - request is write-combine, return cannot be write-back
|
||||
*/
|
||||
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
|
||||
new_pcm == _PAGE_CACHE_MODE_WB) ||
|
||||
(pcm == _PAGE_CACHE_MODE_WC &&
|
||||
new_pcm == _PAGE_CACHE_MODE_WB)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
pmd_t *populate_extra_pmd(unsigned long vaddr);
|
||||
pte_t *populate_extra_pte(unsigned long vaddr);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/pgtable_32.h>
|
||||
#else
|
||||
# include <asm/pgtable_64.h>
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
//#include <linux/mm_types.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
static inline int pte_none(pte_t pte)
|
||||
{
|
||||
return !pte.pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
static inline int pte_same(pte_t a, pte_t b)
|
||||
{
|
||||
return a.pte == b.pte;
|
||||
}
|
||||
|
||||
static inline int pte_present(pte_t a)
|
||||
{
|
||||
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
|
||||
_PAGE_NUMA);
|
||||
}
|
||||
|
||||
#define pte_present_nonuma pte_present_nonuma
|
||||
static inline int pte_present_nonuma(pte_t a)
|
||||
{
|
||||
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
|
||||
}
|
||||
|
||||
#define pte_accessible pte_accessible
|
||||
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
|
||||
{
|
||||
if (pte_flags(a) & _PAGE_PRESENT)
|
||||
return true;
|
||||
|
||||
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
|
||||
mm_tlb_flush_pending(mm))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int pte_hidden(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_HIDDEN;
|
||||
}
|
||||
|
||||
static inline int pmd_present(pmd_t pmd)
|
||||
{
|
||||
/*
|
||||
* Checking for _PAGE_PSE is needed too because
|
||||
* split_huge_page will temporarily clear the present bit (but
|
||||
* the _PAGE_PSE flag will remain set at all times while the
|
||||
* _PAGE_PRESENT bit is clear).
|
||||
*/
|
||||
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
|
||||
_PAGE_NUMA);
|
||||
}
|
||||
|
||||
static inline int pmd_none(pmd_t pmd)
|
||||
{
|
||||
/* Only check low word on 32-bit platforms, since it might be
|
||||
out of sync with upper half. */
|
||||
return (unsigned long)native_pmd_val(pmd) == 0;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently stuck as a macro due to indirect forward reference to
|
||||
* linux/mmzone.h's __section_mem_map_addr() definition:
|
||||
*/
|
||||
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
|
||||
*
|
||||
* this macro returns the index of the entry in the pmd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned long pmd_index(unsigned long address)
|
||||
{
|
||||
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*
|
||||
* (Currently stuck as a macro because of indirect forward reference
|
||||
* to linux/mm.h:page_to_nid())
|
||||
*/
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
|
||||
/*
|
||||
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
|
||||
*
|
||||
* this function returns the index of the entry in the pte page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
static inline unsigned long pte_index(unsigned long address)
|
||||
{
|
||||
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
||||
}
|
||||
|
||||
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
|
||||
{
|
||||
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
|
||||
}
|
||||
|
||||
static inline int pmd_bad(pmd_t pmd)
|
||||
{
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/* pmd_numa check */
|
||||
if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
|
||||
return 0;
|
||||
#endif
|
||||
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline unsigned long pages_to_mb(unsigned long npg)
|
||||
{
|
||||
return npg >> (20 - PAGE_SHIFT);
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
static inline int pud_none(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) == 0;
|
||||
}
|
||||
|
||||
static inline int pud_present(pud_t pud)
|
||||
{
|
||||
return pud_flags(pud) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_page_vaddr(pud_t pud)
|
||||
{
|
||||
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently stuck as a macro due to indirect forward reference to
|
||||
* linux/mmzone.h's __section_mem_map_addr() definition:
|
||||
*/
|
||||
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
|
||||
{
|
||||
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
|
||||
}
|
||||
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
|
||||
(_PAGE_PSE | _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
static inline int pud_bad(pud_t pud)
|
||||
{
|
||||
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
|
||||
}
|
||||
#else
|
||||
static inline int pud_large(pud_t pud)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 2 */
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
static inline int pgd_present(pgd_t pgd)
|
||||
{
|
||||
return pgd_flags(pgd) & _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
|
||||
{
|
||||
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently stuck as a macro due to indirect forward reference to
|
||||
* linux/mmzone.h's __section_mem_map_addr() definition:
|
||||
*/
|
||||
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
|
||||
|
||||
/* to find an entry in a page-table-directory. */
|
||||
static inline unsigned long pud_index(unsigned long address)
|
||||
{
|
||||
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
|
||||
}
|
||||
|
||||
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
|
||||
{
|
||||
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
|
||||
}
|
||||
|
||||
static inline int pgd_bad(pgd_t pgd)
|
||||
{
|
||||
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
|
||||
}
|
||||
|
||||
static inline int pgd_none(pgd_t pgd)
|
||||
{
|
||||
return !native_pgd_val(pgd);
|
||||
}
|
||||
#endif /* PAGETABLE_LEVELS > 3 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
|
||||
*
|
||||
* this macro returns the index of the entry in the pgd page which would
|
||||
* control the given virtual address
|
||||
*/
|
||||
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
|
||||
|
||||
/*
|
||||
* pgd_offset() returns a (pgd_t *)
|
||||
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
|
||||
*/
|
||||
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
|
||||
/*
|
||||
* a shortcut which implies the use of the kernel's pgd, instead
|
||||
* of a process's
|
||||
*/
|
||||
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
|
||||
|
||||
|
||||
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
|
||||
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern int direct_gbpages;
|
||||
void init_mem_mapping(void);
|
||||
void early_alloc_pgt_buf(void);
|
||||
|
||||
/* local pte updates need not use xchg for locking */
|
||||
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
|
||||
{
|
||||
pte_t res = *ptep;
|
||||
|
||||
/* Pure native function needs no input for mm, addr */
|
||||
native_pte_clear(NULL, 0, ptep);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
|
||||
{
|
||||
pmd_t res = *pmdp;
|
||||
|
||||
native_pmd_clear(pmdp);
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep , pte_t pte)
|
||||
{
|
||||
native_set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp , pmd_t pmd)
|
||||
{
|
||||
native_set_pmd(pmdp, pmd);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
/*
|
||||
* Rules for using pte_update - it must be called after any PTE update which
|
||||
* has not been done using the set_pte / clear_pte interfaces. It is used by
|
||||
* shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
|
||||
* updates should either be sets, clears, or set_pte_atomic for P->P
|
||||
* transitions, which means this hook should only be called for user PTEs.
|
||||
* This hook implies a P->P protection or access change has taken place, which
|
||||
* requires a subsequent TLB flush. The notification can optionally be delayed
|
||||
* until the TLB flush event by using the pte_update_defer form of the
|
||||
* interface, but care must be taken to assure that the flush happens while
|
||||
* still holding the same page table lock so that the shadow and primary pages
|
||||
* do not become out of sync on SMP.
|
||||
*/
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We only update the dirty/accessed state if we set
|
||||
* the dirty bit by hand in the kernel, since the hardware
|
||||
* will do the accessed bit for us, and we don't want to
|
||||
* race with other CPU's that might be updating the dirty
|
||||
* bit at the same time.
|
||||
*/
|
||||
struct vm_area_struct;
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
||||
extern int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep,
|
||||
pte_t entry, int dirty);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
||||
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
||||
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
pte_t pte = native_ptep_get_and_clear(ptep);
|
||||
pte_update(mm, addr, ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
||||
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
int full)
|
||||
{
|
||||
pte_t pte;
|
||||
if (full) {
|
||||
/*
|
||||
* Full address destruction in progress; paravirt does not
|
||||
* care about updates and native needs no locking
|
||||
*/
|
||||
pte = native_local_ptep_get_and_clear(ptep);
|
||||
} else {
|
||||
pte = ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
|
||||
pte_update(mm, addr, ptep);
|
||||
}
|
||||
|
||||
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
|
||||
|
||||
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
|
||||
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp,
|
||||
pmd_t entry, int dirty);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
|
||||
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
|
||||
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp);
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
|
||||
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmdp);
|
||||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
static inline int pmd_write(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_RW;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
|
||||
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
|
||||
pmd_update(mm, addr, pmdp);
|
||||
return pmd;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
|
||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pmd_t *pmdp)
|
||||
{
|
||||
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
|
||||
pmd_update(mm, addr, pmdp);
|
||||
}
|
||||
|
||||
/*
|
||||
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
|
||||
*
|
||||
* dst - pointer to pgd range anwhere on a pgd page
|
||||
* src - ""
|
||||
* count - the number of pgds to copy.
|
||||
*
|
||||
* dst and src can be on the same page, but the range must not overlap,
|
||||
* and must not cross a page boundary.
|
||||
*/
|
||||
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
|
||||
{
|
||||
memcpy(dst, src, count * sizeof(pgd_t));
|
||||
}
|
||||
|
||||
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
|
||||
static inline int page_level_shift(enum pg_level level)
|
||||
{
|
||||
return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
|
||||
}
|
||||
static inline unsigned long page_level_size(enum pg_level level)
|
||||
{
|
||||
return 1UL << page_level_shift(level);
|
||||
}
|
||||
static inline unsigned long page_level_mask(enum pg_level level)
|
||||
{
|
||||
return ~(page_level_size(level) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* The x86 doesn't have any external MMU info: the kernel page
|
||||
* tables contain all the necessary information.
|
||||
*/
|
||||
static inline void update_mmu_cache(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
}
|
||||
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
|
||||
unsigned long addr, pmd_t *pmd)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
|
||||
{
|
||||
VM_BUG_ON(pte_present_nonuma(pte));
|
||||
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
|
||||
}
|
||||
|
||||
static inline int pte_swp_soft_dirty(pte_t pte)
|
||||
{
|
||||
VM_BUG_ON(pte_present_nonuma(pte));
|
||||
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
|
||||
}
|
||||
|
||||
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
|
||||
{
|
||||
VM_BUG_ON(pte_present_nonuma(pte));
|
||||
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
|
||||
}
|
||||
#endif
|
||||
|
||||
//#include <asm-generic/pgtable.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_H */
|
76
drivers/include/asm/pgtable_32.h
Normal file
76
drivers/include/asm/pgtable_32.h
Normal file
@ -0,0 +1,76 @@
|
||||
#ifndef _ASM_X86_PGTABLE_32_H
|
||||
#define _ASM_X86_PGTABLE_32_H
|
||||
|
||||
#include <asm/pgtable_32_types.h>
|
||||
|
||||
/*
|
||||
* The Linux memory management assumes a three-level page table setup. On
|
||||
* the i386, we use that, but "fold" the mid level into the top-level page
|
||||
* table, so that we physically have the same two-level page table as the
|
||||
* i386 mmu expects.
|
||||
*
|
||||
* This file contains the functions and defines necessary to modify and use
|
||||
* the i386 page table tree.
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/processor.h>
|
||||
#include <linux/threads.h>
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
|
||||
extern pgd_t swapper_pg_dir[1024];
|
||||
extern pgd_t initial_page_table[1024];
|
||||
|
||||
static inline void pgtable_cache_init(void) { }
|
||||
static inline void check_pgt_cache(void) { }
|
||||
void paging_init(void);
|
||||
|
||||
/*
|
||||
* Define this if things work differently on an i386 and an i486:
|
||||
* it will (on an i486) warn about kernel memory accesses that are
|
||||
* done without a 'access_ok(VERIFY_WRITE,..)'
|
||||
*/
|
||||
#undef TEST_ACCESS_OK
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# include <asm/pgtable-3level.h>
|
||||
#else
|
||||
# include <asm/pgtable-2level.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HIGHPTE)
|
||||
#define pte_offset_map(dir, address) \
|
||||
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
|
||||
pte_index((address)))
|
||||
#define pte_unmap(pte) kunmap_atomic((pte))
|
||||
#else
|
||||
#define pte_offset_map(dir, address) \
|
||||
((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
|
||||
#define pte_unmap(pte) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Clear a kernel PTE and flush it from the TLB */
|
||||
#define kpte_clear_flush(ptep, vaddr) \
|
||||
do { \
|
||||
pte_clear(&init_mm, (vaddr), (ptep)); \
|
||||
__flush_tlb_one((vaddr)); \
|
||||
} while (0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* kern_addr_valid() is (1) for FLATMEM and (0) for
|
||||
* SPARSEMEM and DISCONTIGMEM
|
||||
*/
|
||||
#ifdef CONFIG_FLATMEM
|
||||
#define kern_addr_valid(addr) (1)
|
||||
#else
|
||||
#define kern_addr_valid(kaddr) (0)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_32_H */
|
55
drivers/include/asm/pgtable_32_types.h
Normal file
55
drivers/include/asm/pgtable_32_types.h
Normal file
@ -0,0 +1,55 @@
|
||||
#ifndef _ASM_X86_PGTABLE_32_DEFS_H
|
||||
#define _ASM_X86_PGTABLE_32_DEFS_H
|
||||
|
||||
/*
|
||||
* The Linux x86 paging architecture is 'compile-time dual-mode', it
|
||||
* implements both the traditional 2-level x86 page tables and the
|
||||
* newer 3-level PAE-mode page tables.
|
||||
*/
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# include <asm/pgtable-3level_types.h>
|
||||
# define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
# define PMD_MASK (~(PMD_SIZE - 1))
|
||||
#else
|
||||
# include <asm/pgtable-2level_types.h>
|
||||
#endif
|
||||
|
||||
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||
|
||||
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
||||
* current 8MB value just means that there will be a 8MB "hole" after the
|
||||
* physical memory until the kernel virtual memory starts. That means that
|
||||
* any out-of-bounds memory accesses will hopefully be caught.
|
||||
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
||||
* area for the same reason. ;)
|
||||
*/
|
||||
#define VMALLOC_OFFSET (8 * 1024 * 1024)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern bool __vmalloc_start_set; /* set once high_memory is set */
|
||||
#endif
|
||||
|
||||
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
|
||||
#ifdef CONFIG_X86_PAE
|
||||
#define LAST_PKMAP 512
|
||||
#else
|
||||
#define LAST_PKMAP 1024
|
||||
#endif
|
||||
|
||||
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
|
||||
& PMD_MASK)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
|
||||
#else
|
||||
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
#define MODULES_VADDR VMALLOC_START
|
||||
#define MODULES_END VMALLOC_END
|
||||
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
|
||||
|
||||
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
|
463
drivers/include/asm/pgtable_types.h
Normal file
463
drivers/include/asm/pgtable_types.h
Normal file
@ -0,0 +1,463 @@
|
||||
#ifndef _ASM_X86_PGTABLE_DEFS_H
|
||||
#define _ASM_X86_PGTABLE_DEFS_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <asm/page_types.h>
|
||||
|
||||
#define FIRST_USER_ADDRESS 0
|
||||
|
||||
#define _PAGE_BIT_PRESENT 0 /* is present */
|
||||
#define _PAGE_BIT_RW 1 /* writeable */
|
||||
#define _PAGE_BIT_USER 2 /* userspace addressable */
|
||||
#define _PAGE_BIT_PWT 3 /* page write through */
|
||||
#define _PAGE_BIT_PCD 4 /* page cache disabled */
|
||||
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
|
||||
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
|
||||
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
|
||||
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
||||
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
||||
#define _PAGE_BIT_SOFTW1 9 /* available for programmer */
|
||||
#define _PAGE_BIT_SOFTW2 10 /* " */
|
||||
#define _PAGE_BIT_SOFTW3 11 /* " */
|
||||
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
|
||||
#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
|
||||
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
|
||||
#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */
|
||||
#define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */
|
||||
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
|
||||
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
|
||||
|
||||
/*
|
||||
* Swap offsets on configurations that allow automatic NUMA balancing use the
|
||||
* bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from
|
||||
* swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the
|
||||
* maximum possible swap space from 16TB to 8TB.
|
||||
*/
|
||||
#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1)
|
||||
|
||||
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
||||
/* - if the user mapped it with PROT_NONE; pte_present gives true */
|
||||
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
|
||||
/* - set: nonlinear file mapping, saved PTE; unset:swap */
|
||||
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
|
||||
|
||||
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
|
||||
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
|
||||
#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
|
||||
#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
|
||||
#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
|
||||
#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
|
||||
#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
|
||||
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
|
||||
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
|
||||
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
|
||||
#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
|
||||
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
|
||||
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
|
||||
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
|
||||
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
|
||||
#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING)
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
#ifdef CONFIG_KMEMCHECK
|
||||
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
|
||||
#else
|
||||
#define _PAGE_HIDDEN (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The same hidden bit is used by kmemcheck, but since kmemcheck
|
||||
* works on kernel pages while soft-dirty engine on user space,
|
||||
* they do not conflict with each other.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
|
||||
#else
|
||||
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* _PAGE_NUMA distinguishes between a numa hinting minor fault and a page
|
||||
* that is not present. The hinting fault gathers numa placement statistics
|
||||
* (see pte_numa()). The bit is always zero when the PTE is not present.
|
||||
*
|
||||
* The bit picked must be always zero when the pmd is present and not
|
||||
* present, so that we don't lose information when we set it while
|
||||
* atomically clearing the present bit.
|
||||
*/
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA)
|
||||
#else
|
||||
#define _PAGE_NUMA (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tracking soft dirty bit when a page goes to a swap is tricky.
|
||||
* We need a bit which can be stored in pte _and_ not conflict
|
||||
* with swap entry format. On x86 bits 6 and 7 are *not* involved
|
||||
* into swap entry computation, but bit 6 is used for nonlinear
|
||||
* file mapping, so we borrow bit 7 for soft dirty tracking.
|
||||
*
|
||||
* Please note that this bit must be treated as swap dirty page
|
||||
* mark if and only if the PTE has present bit clear!
|
||||
*/
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
|
||||
#else
|
||||
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
|
||||
#else
|
||||
#define _PAGE_NX (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
||||
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
|
||||
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
||||
|
||||
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
|
||||
_PAGE_ACCESSED | _PAGE_DIRTY)
|
||||
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
|
||||
_PAGE_DIRTY)
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
|
||||
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SOFT_DIRTY | _PAGE_NUMA)
|
||||
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA)
|
||||
|
||||
/*
|
||||
* The cache modes defined here are used to translate between pure SW usage
|
||||
* and the HW defined cache mode bits and/or PAT entries.
|
||||
*
|
||||
* The resulting bits for PWT, PCD and PAT should be chosen in a way
|
||||
* to have the WB mode at index 0 (all bits clear). This is the default
|
||||
* right now and likely would break too much if changed.
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
enum page_cache_mode {
|
||||
_PAGE_CACHE_MODE_WB = 0,
|
||||
_PAGE_CACHE_MODE_WC = 1,
|
||||
_PAGE_CACHE_MODE_UC_MINUS = 2,
|
||||
_PAGE_CACHE_MODE_UC = 3,
|
||||
_PAGE_CACHE_MODE_WT = 4,
|
||||
_PAGE_CACHE_MODE_WP = 5,
|
||||
_PAGE_CACHE_MODE_NUM = 8
|
||||
};
|
||||
#endif
|
||||
|
||||
#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
|
||||
#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
|
||||
|
||||
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
|
||||
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
|
||||
_PAGE_ACCESSED | _PAGE_NX)
|
||||
|
||||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
|
||||
_PAGE_USER | _PAGE_ACCESSED)
|
||||
#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
|
||||
_PAGE_ACCESSED | _PAGE_NX)
|
||||
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
|
||||
_PAGE_ACCESSED)
|
||||
#define PAGE_COPY PAGE_COPY_NOEXEC
|
||||
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
|
||||
_PAGE_ACCESSED | _PAGE_NX)
|
||||
#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
|
||||
_PAGE_ACCESSED)
|
||||
|
||||
#define __PAGE_KERNEL_EXEC \
|
||||
(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
|
||||
#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
|
||||
|
||||
#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
|
||||
#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
|
||||
#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
|
||||
#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
|
||||
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
|
||||
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
|
||||
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
|
||||
|
||||
#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
|
||||
#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
|
||||
|
||||
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
|
||||
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
|
||||
#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
|
||||
#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
|
||||
#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
|
||||
#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
|
||||
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
|
||||
#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
|
||||
|
||||
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
|
||||
#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
|
||||
|
||||
/* xwr */
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_COPY
|
||||
#define __P011 PAGE_COPY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_COPY_EXEC
|
||||
#define __P111 PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
||||
/*
|
||||
* early identity mapping pte attrib macros.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
|
||||
#else
|
||||
#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
|
||||
#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
|
||||
#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/pgtable_32_types.h>
|
||||
#else
|
||||
# include <asm/pgtable_64_types.h>
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
|
||||
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
|
||||
|
||||
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
|
||||
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
|
||||
|
||||
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
|
||||
|
||||
typedef struct { pgdval_t pgd; } pgd_t;
|
||||
|
||||
static inline pgd_t native_make_pgd(pgdval_t val)
|
||||
{
|
||||
return (pgd_t) { val };
|
||||
}
|
||||
|
||||
static inline pgdval_t native_pgd_val(pgd_t pgd)
|
||||
{
|
||||
return pgd.pgd;
|
||||
}
|
||||
|
||||
static inline pgdval_t pgd_flags(pgd_t pgd)
|
||||
{
|
||||
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
#if PAGETABLE_LEVELS > 3
|
||||
typedef struct { pudval_t pud; } pud_t;
|
||||
|
||||
static inline pud_t native_make_pud(pmdval_t val)
|
||||
{
|
||||
return (pud_t) { val };
|
||||
}
|
||||
|
||||
static inline pudval_t native_pud_val(pud_t pud)
|
||||
{
|
||||
return pud.pud;
|
||||
}
|
||||
#else
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
static inline pudval_t native_pud_val(pud_t pud)
|
||||
{
|
||||
return native_pgd_val(pud.pgd);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PAGETABLE_LEVELS > 2
|
||||
typedef struct { pmdval_t pmd; } pmd_t;
|
||||
|
||||
static inline pmd_t native_make_pmd(pmdval_t val)
|
||||
{
|
||||
return (pmd_t) { val };
|
||||
}
|
||||
|
||||
static inline pmdval_t native_pmd_val(pmd_t pmd)
|
||||
{
|
||||
return pmd.pmd;
|
||||
}
|
||||
#else
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline pmdval_t native_pmd_val(pmd_t pmd)
|
||||
{
|
||||
return native_pgd_val(pmd.pud.pgd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline pudval_t pud_flags(pud_t pud)
|
||||
{
|
||||
return native_pud_val(pud) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
static inline pmdval_t pmd_flags(pmd_t pmd)
|
||||
{
|
||||
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
static inline pte_t native_make_pte(pteval_t val)
|
||||
{
|
||||
return (pte_t) { .pte = val };
|
||||
}
|
||||
|
||||
static inline pteval_t native_pte_val(pte_t pte)
|
||||
{
|
||||
return pte.pte;
|
||||
}
|
||||
|
||||
static inline pteval_t pte_flags(pte_t pte)
|
||||
{
|
||||
return native_pte_val(pte) & PTE_FLAGS_MASK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/* Set of bits that distinguishes present, prot_none and numa ptes */
|
||||
#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)
|
||||
static inline pteval_t ptenuma_flags(pte_t pte)
|
||||
{
|
||||
return pte_flags(pte) & _PAGE_NUMA_MASK;
|
||||
}
|
||||
|
||||
static inline pmdval_t pmdnuma_flags(pmd_t pmd)
|
||||
{
|
||||
return pmd_flags(pmd) & _PAGE_NUMA_MASK;
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
#define pgprot_val(x) ((x).pgprot)
|
||||
#define __pgprot(x) ((pgprot_t) { (x) } )
|
||||
|
||||
extern uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM];
|
||||
extern uint8_t __pte2cachemode_tbl[8];
|
||||
|
||||
#define __pte2cm_idx(cb) \
|
||||
((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
|
||||
(((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
|
||||
(((cb) >> _PAGE_BIT_PWT) & 1))
|
||||
#define __cm_idx2pte(i) \
|
||||
((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
|
||||
(((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
|
||||
(((i) & 1) << _PAGE_BIT_PWT))
|
||||
|
||||
static inline unsigned long cachemode2protval(enum page_cache_mode pcm)
|
||||
{
|
||||
if (likely(pcm == 0))
|
||||
return 0;
|
||||
return __cachemode2pte_tbl[pcm];
|
||||
}
|
||||
static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
|
||||
{
|
||||
return __pgprot(cachemode2protval(pcm));
|
||||
}
|
||||
static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
|
||||
{
|
||||
unsigned long masked;
|
||||
|
||||
masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
|
||||
if (likely(masked == 0))
|
||||
return 0;
|
||||
return __pte2cachemode_tbl[__pte2cm_idx(masked)];
|
||||
}
|
||||
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
|
||||
{
|
||||
pgprot_t new;
|
||||
unsigned long val;
|
||||
|
||||
val = pgprot_val(pgprot);
|
||||
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
|
||||
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
|
||||
return new;
|
||||
}
|
||||
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
|
||||
{
|
||||
pgprot_t new;
|
||||
unsigned long val;
|
||||
|
||||
val = pgprot_val(pgprot);
|
||||
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
|
||||
((val & _PAGE_PAT_LARGE) >>
|
||||
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
|
||||
return new;
|
||||
}
|
||||
|
||||
|
||||
typedef struct page *pgtable_t;
|
||||
|
||||
extern pteval_t __supported_pte_mask;
|
||||
extern void set_nx(void);
|
||||
extern int nx_enabled;
|
||||
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
extern pgprot_t pgprot_writecombine(pgprot_t prot);
|
||||
|
||||
/* Indicate that x86 has its own track and untrack pfn vma functions */
|
||||
#define __HAVE_PFNMAP_TRACKING
|
||||
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
struct file;
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot);
|
||||
|
||||
/* Install a pte for a particular vaddr in kernel space. */
|
||||
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void native_pagetable_init(void);
|
||||
#else
|
||||
#define native_pagetable_init paging_init
|
||||
#endif
|
||||
|
||||
struct seq_file;
|
||||
extern void arch_report_meminfo(struct seq_file *m);
|
||||
|
||||
enum pg_level {
|
||||
PG_LEVEL_NONE,
|
||||
PG_LEVEL_4K,
|
||||
PG_LEVEL_2M,
|
||||
PG_LEVEL_1G,
|
||||
PG_LEVEL_NUM
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern void update_page_count(int level, unsigned long pages);
|
||||
#else
|
||||
static inline void update_page_count(int level, unsigned long pages) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Helper function that returns the kernel pagetable entry controlling
|
||||
* the virtual address 'address'. NULL means no pagetable entry present.
|
||||
* NOTE: the return type is pte_t but if the pmd is PSE then we return it
|
||||
* as a pte too.
|
||||
*/
|
||||
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
|
||||
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
|
||||
unsigned int *level);
|
||||
extern pmd_t *lookup_pmd_address(unsigned long address);
|
||||
extern phys_addr_t slow_virt_to_phys(void *__address);
|
||||
extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
|
||||
unsigned numpages, unsigned long page_flags);
|
||||
void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
|
||||
unsigned numpages);
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_DEFS_H */
|
5
drivers/include/asm/posix_types.h
Normal file
5
drivers/include/asm/posix_types.h
Normal file
@ -0,0 +1,5 @@
|
||||
# ifdef CONFIG_X86_32
|
||||
# include <asm/posix_types_32.h>
|
||||
# else
|
||||
# include <asm/posix_types_64.h>
|
||||
# endif
|
11
drivers/include/asm/processor-flags.h
Normal file
11
drivers/include/asm/processor-flags.h
Normal file
@ -0,0 +1,11 @@
|
||||
#ifndef _ASM_X86_PROCESSOR_FLAGS_H
|
||||
#define _ASM_X86_PROCESSOR_FLAGS_H
|
||||
|
||||
#include <uapi/asm/processor-flags.h>
|
||||
|
||||
#ifdef CONFIG_VM86
|
||||
#define X86_VM_MASK X86_EFLAGS_VM
|
||||
#else
|
||||
#define X86_VM_MASK 0 /* No VM86 support */
|
||||
#endif
|
||||
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
|
1010
drivers/include/asm/processor.h
Normal file
1010
drivers/include/asm/processor.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -47,6 +47,12 @@
|
||||
# define NEED_NOPL 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MATOM
|
||||
# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
|
||||
#else
|
||||
# define NEED_MOVBE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/* Paravirtualized systems may not have PSE or PGE available */
|
||||
@ -80,7 +86,7 @@
|
||||
|
||||
#define REQUIRED_MASK2 0
|
||||
#define REQUIRED_MASK3 (NEED_NOPL)
|
||||
#define REQUIRED_MASK4 0
|
||||
#define REQUIRED_MASK4 (NEED_MOVBE)
|
||||
#define REQUIRED_MASK5 0
|
||||
#define REQUIRED_MASK6 0
|
||||
#define REQUIRED_MASK7 0
|
41
drivers/include/asm/rmwcc.h
Normal file
41
drivers/include/asm/rmwcc.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef _ASM_X86_RMWcc
|
||||
#define _ASM_X86_RMWcc
|
||||
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
|
||||
#define __GEN_RMWcc(fullop, var, cc, ...) \
|
||||
do { \
|
||||
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
|
||||
: : "m" (var), ## __VA_ARGS__ \
|
||||
: "memory" : cc_label); \
|
||||
return 0; \
|
||||
cc_label: \
|
||||
return 1; \
|
||||
} while (0)
|
||||
|
||||
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc)
|
||||
|
||||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
|
||||
|
||||
#else /* !CC_HAVE_ASM_GOTO */
|
||||
|
||||
#define __GEN_RMWcc(fullop, var, cc, ...) \
|
||||
do { \
|
||||
char c; \
|
||||
asm volatile (fullop "; set" cc " %1" \
|
||||
: "+m" (var), "=qm" (c) \
|
||||
: __VA_ARGS__ : "memory"); \
|
||||
return c != 0; \
|
||||
} while (0)
|
||||
|
||||
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc)
|
||||
|
||||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
|
||||
|
||||
#endif /* CC_HAVE_ASM_GOTO */
|
||||
|
||||
#endif /* _ASM_X86_RMWcc */
|
79
drivers/include/asm/sigcontext.h
Normal file
79
drivers/include/asm/sigcontext.h
Normal file
@ -0,0 +1,79 @@
|
||||
#ifndef _ASM_X86_SIGCONTEXT_H
|
||||
#define _ASM_X86_SIGCONTEXT_H
|
||||
|
||||
#include <uapi/asm/sigcontext.h>
|
||||
|
||||
#ifdef __i386__
|
||||
struct sigcontext {
|
||||
unsigned short gs, __gsh;
|
||||
unsigned short fs, __fsh;
|
||||
unsigned short es, __esh;
|
||||
unsigned short ds, __dsh;
|
||||
unsigned long di;
|
||||
unsigned long si;
|
||||
unsigned long bp;
|
||||
unsigned long sp;
|
||||
unsigned long bx;
|
||||
unsigned long dx;
|
||||
unsigned long cx;
|
||||
unsigned long ax;
|
||||
unsigned long trapno;
|
||||
unsigned long err;
|
||||
unsigned long ip;
|
||||
unsigned short cs, __csh;
|
||||
unsigned long flags;
|
||||
unsigned long sp_at_signal;
|
||||
unsigned short ss, __ssh;
|
||||
|
||||
/*
|
||||
* fpstate is really (struct _fpstate *) or (struct _xstate *)
|
||||
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
|
||||
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
|
||||
* of extended memory layout. See comments at the definition of
|
||||
* (struct _fpx_sw_bytes)
|
||||
*/
|
||||
void __user *fpstate; /* zero when no FPU/extended context */
|
||||
unsigned long oldmask;
|
||||
unsigned long cr2;
|
||||
};
|
||||
#else /* __i386__ */
|
||||
struct sigcontext {
|
||||
unsigned long r8;
|
||||
unsigned long r9;
|
||||
unsigned long r10;
|
||||
unsigned long r11;
|
||||
unsigned long r12;
|
||||
unsigned long r13;
|
||||
unsigned long r14;
|
||||
unsigned long r15;
|
||||
unsigned long di;
|
||||
unsigned long si;
|
||||
unsigned long bp;
|
||||
unsigned long bx;
|
||||
unsigned long dx;
|
||||
unsigned long ax;
|
||||
unsigned long cx;
|
||||
unsigned long sp;
|
||||
unsigned long ip;
|
||||
unsigned long flags;
|
||||
unsigned short cs;
|
||||
unsigned short gs;
|
||||
unsigned short fs;
|
||||
unsigned short __pad0;
|
||||
unsigned long err;
|
||||
unsigned long trapno;
|
||||
unsigned long oldmask;
|
||||
unsigned long cr2;
|
||||
|
||||
/*
|
||||
* fpstate is really (struct _fpstate *) or (struct _xstate *)
|
||||
* depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved
|
||||
* bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end
|
||||
* of extended memory layout. See comments at the definition of
|
||||
* (struct _fpx_sw_bytes)
|
||||
*/
|
||||
void __user *fpstate; /* zero when no FPU/extended context */
|
||||
unsigned long reserved1[8];
|
||||
};
|
||||
#endif /* !__i386__ */
|
||||
#endif /* _ASM_X86_SIGCONTEXT_H */
|
207
drivers/include/asm/special_insns.h
Normal file
207
drivers/include/asm/special_insns.h
Normal file
@ -0,0 +1,207 @@
|
||||
#ifndef _ASM_X86_SPECIAL_INSNS_H
|
||||
#define _ASM_X86_SPECIAL_INSNS_H
|
||||
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
static inline void native_clts(void)
|
||||
{
|
||||
asm volatile("clts");
|
||||
}
|
||||
|
||||
/*
|
||||
* Volatile isn't enough to prevent the compiler from reordering the
|
||||
* read/write functions for the control registers and messing everything up.
|
||||
* A memory clobber would solve the problem, but would prevent reordering of
|
||||
* all loads stores around it, which can hurt performance. Solution is to
|
||||
* use a variable and mimic reads and writes to it to enforce serialization
|
||||
*/
|
||||
extern unsigned long __force_order;
|
||||
|
||||
static inline unsigned long native_read_cr0(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_cr0(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr2(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_cr2(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr3(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_cr3(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr4(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline unsigned long native_read_cr4_safe(void)
|
||||
{
|
||||
unsigned long val;
|
||||
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
|
||||
* exists, so it will never fail. */
|
||||
#ifdef CONFIG_X86_32
|
||||
asm volatile("1: mov %%cr4, %0\n"
|
||||
"2:\n"
|
||||
_ASM_EXTABLE(1b, 2b)
|
||||
: "=r" (val), "=m" (__force_order) : "0" (0));
|
||||
#else
|
||||
val = native_read_cr4();
|
||||
#endif
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_cr4(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline unsigned long native_read_cr8(void)
|
||||
{
|
||||
unsigned long cr8;
|
||||
asm volatile("movq %%cr8,%0" : "=r" (cr8));
|
||||
return cr8;
|
||||
}
|
||||
|
||||
static inline void native_write_cr8(unsigned long val)
|
||||
{
|
||||
asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void native_wbinvd(void)
|
||||
{
|
||||
asm volatile("wbinvd": : :"memory");
|
||||
}
|
||||
|
||||
extern asmlinkage void native_load_gs_index(unsigned);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
|
||||
static inline unsigned long read_cr0(void)
|
||||
{
|
||||
return native_read_cr0();
|
||||
}
|
||||
|
||||
static inline void write_cr0(unsigned long x)
|
||||
{
|
||||
native_write_cr0(x);
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr2(void)
|
||||
{
|
||||
return native_read_cr2();
|
||||
}
|
||||
|
||||
static inline void write_cr2(unsigned long x)
|
||||
{
|
||||
native_write_cr2(x);
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr3(void)
|
||||
{
|
||||
return native_read_cr3();
|
||||
}
|
||||
|
||||
static inline void write_cr3(unsigned long x)
|
||||
{
|
||||
native_write_cr3(x);
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr4(void)
|
||||
{
|
||||
return native_read_cr4();
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr4_safe(void)
|
||||
{
|
||||
return native_read_cr4_safe();
|
||||
}
|
||||
|
||||
static inline void write_cr4(unsigned long x)
|
||||
{
|
||||
native_write_cr4(x);
|
||||
}
|
||||
|
||||
static inline void wbinvd(void)
|
||||
{
|
||||
native_wbinvd();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static inline unsigned long read_cr8(void)
|
||||
{
|
||||
return native_read_cr8();
|
||||
}
|
||||
|
||||
static inline void write_cr8(unsigned long x)
|
||||
{
|
||||
native_write_cr8(x);
|
||||
}
|
||||
|
||||
static inline void load_gs_index(unsigned selector)
|
||||
{
|
||||
native_load_gs_index(selector);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Clear the 'TS' bit */
|
||||
static inline void clts(void)
|
||||
{
|
||||
native_clts();
|
||||
}
|
||||
|
||||
#endif/* CONFIG_PARAVIRT */
|
||||
|
||||
#define stts() write_cr0(read_cr0() | X86_CR0_TS)
|
||||
|
||||
static inline void clflush(volatile void *__p)
|
||||
{
|
||||
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
|
||||
}
|
||||
|
||||
static inline void clflushopt(volatile void *__p)
|
||||
{
|
||||
alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
|
||||
".byte 0x66; clflush %P0",
|
||||
X86_FEATURE_CLFLUSHOPT,
|
||||
"+m" (*(volatile char __force *)__p));
|
||||
}
|
||||
|
||||
#define nop() asm volatile ("nop")
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_SPECIAL_INSNS_H */
|
5
drivers/include/asm/string.h
Normal file
5
drivers/include/asm/string.h
Normal file
@ -0,0 +1,5 @@
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <asm/string_32.h>
|
||||
#else
|
||||
# include <asm/string_64.h>
|
||||
#endif
|
216
drivers/include/asm/x86_init.h
Normal file
216
drivers/include/asm/x86_init.h
Normal file
@ -0,0 +1,216 @@
|
||||
#ifndef _ASM_X86_PLATFORM_H
|
||||
#define _ASM_X86_PLATFORM_H
|
||||
|
||||
#include <asm/pgtable_types.h>
|
||||
//#include <asm/bootparam.h>
|
||||
|
||||
struct mpc_bus;
|
||||
struct mpc_cpu;
|
||||
struct mpc_table;
|
||||
struct cpuinfo_x86;
|
||||
|
||||
/**
|
||||
* struct x86_init_mpparse - platform specific mpparse ops
|
||||
* @mpc_record: platform specific mpc record accounting
|
||||
* @setup_ioapic_ids: platform specific ioapic id override
|
||||
* @mpc_apic_id: platform specific mpc apic id assignment
|
||||
* @smp_read_mpc_oem: platform specific oem mpc table setup
|
||||
* @mpc_oem_pci_bus: platform specific pci bus setup (default NULL)
|
||||
* @mpc_oem_bus_info: platform specific mpc bus info
|
||||
* @find_smp_config: find the smp configuration
|
||||
* @get_smp_config: get the smp configuration
|
||||
*/
|
||||
struct x86_init_mpparse {
|
||||
void (*mpc_record)(unsigned int mode);
|
||||
void (*setup_ioapic_ids)(void);
|
||||
int (*mpc_apic_id)(struct mpc_cpu *m);
|
||||
void (*smp_read_mpc_oem)(struct mpc_table *mpc);
|
||||
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
|
||||
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
|
||||
void (*find_smp_config)(void);
|
||||
void (*get_smp_config)(unsigned int early);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_resources - platform specific resource related ops
|
||||
* @probe_roms: probe BIOS roms
|
||||
* @reserve_resources: reserve the standard resources for the
|
||||
* platform
|
||||
* @memory_setup: platform specific memory setup
|
||||
*
|
||||
*/
|
||||
struct x86_init_resources {
|
||||
void (*probe_roms)(void);
|
||||
void (*reserve_resources)(void);
|
||||
char *(*memory_setup)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_irqs - platform specific interrupt setup
|
||||
* @pre_vector_init: init code to run before interrupt vectors
|
||||
* are set up.
|
||||
* @intr_init: interrupt init code
|
||||
* @trap_init: platform specific trap setup
|
||||
*/
|
||||
struct x86_init_irqs {
|
||||
void (*pre_vector_init)(void);
|
||||
void (*intr_init)(void);
|
||||
void (*trap_init)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_oem - oem platform specific customizing functions
|
||||
* @arch_setup: platform specific architecure setup
|
||||
* @banner: print a platform specific banner
|
||||
*/
|
||||
struct x86_init_oem {
|
||||
void (*arch_setup)(void);
|
||||
void (*banner)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_paging - platform specific paging functions
|
||||
* @pagetable_init: platform specific paging initialization call to setup
|
||||
* the kernel pagetables and prepare accessors functions.
|
||||
* Callback must call paging_init(). Called once after the
|
||||
* direct mapping for phys memory is available.
|
||||
*/
|
||||
struct x86_init_paging {
|
||||
void (*pagetable_init)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_timers - platform specific timer setup
|
||||
* @setup_perpcu_clockev: set up the per cpu clock event device for the
|
||||
* boot cpu
|
||||
* @tsc_pre_init: platform function called before TSC init
|
||||
* @timer_init: initialize the platform timer (default PIT/HPET)
|
||||
* @wallclock_init: init the wallclock device
|
||||
*/
|
||||
struct x86_init_timers {
|
||||
void (*setup_percpu_clockev)(void);
|
||||
void (*tsc_pre_init)(void);
|
||||
void (*timer_init)(void);
|
||||
void (*wallclock_init)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_iommu - platform specific iommu setup
|
||||
* @iommu_init: platform specific iommu setup
|
||||
*/
|
||||
struct x86_init_iommu {
|
||||
int (*iommu_init)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_pci - platform specific pci init functions
|
||||
* @arch_init: platform specific pci arch init call
|
||||
* @init: platform specific pci subsystem init
|
||||
* @init_irq: platform specific pci irq init
|
||||
* @fixup_irqs: platform specific pci irq fixup
|
||||
*/
|
||||
struct x86_init_pci {
|
||||
int (*arch_init)(void);
|
||||
int (*init)(void);
|
||||
void (*init_irq)(void);
|
||||
void (*fixup_irqs)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_ops - functions for platform specific setup
|
||||
*
|
||||
*/
|
||||
struct x86_init_ops {
|
||||
struct x86_init_resources resources;
|
||||
struct x86_init_mpparse mpparse;
|
||||
struct x86_init_irqs irqs;
|
||||
struct x86_init_oem oem;
|
||||
struct x86_init_paging paging;
|
||||
struct x86_init_timers timers;
|
||||
struct x86_init_iommu iommu;
|
||||
struct x86_init_pci pci;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_cpuinit_ops - platform specific cpu hotplug setups
|
||||
* @setup_percpu_clockev: set up the per cpu clock event device
|
||||
* @early_percpu_clock_init: early init of the per cpu clock event device
|
||||
*/
|
||||
struct x86_cpuinit_ops {
|
||||
void (*setup_percpu_clockev)(void);
|
||||
void (*early_percpu_clock_init)(void);
|
||||
void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
|
||||
};
|
||||
|
||||
struct timespec;
|
||||
|
||||
/**
|
||||
* struct x86_platform_ops - platform specific runtime functions
|
||||
* @calibrate_tsc: calibrate TSC
|
||||
* @get_wallclock: get time from HW clock like RTC etc.
|
||||
* @set_wallclock: set time back to HW clock
|
||||
* @is_untracked_pat_range exclude from PAT logic
|
||||
* @nmi_init enable NMI on cpus
|
||||
* @i8042_detect pre-detect if i8042 controller exists
|
||||
* @save_sched_clock_state: save state for sched_clock() on suspend
|
||||
* @restore_sched_clock_state: restore state for sched_clock() on resume
|
||||
* @apic_post_init: adjust apic if neeeded
|
||||
*/
|
||||
struct x86_platform_ops {
|
||||
unsigned long (*calibrate_tsc)(void);
|
||||
void (*get_wallclock)(struct timespec *ts);
|
||||
int (*set_wallclock)(const struct timespec *ts);
|
||||
void (*iommu_shutdown)(void);
|
||||
bool (*is_untracked_pat_range)(u64 start, u64 end);
|
||||
void (*nmi_init)(void);
|
||||
unsigned char (*get_nmi_reason)(void);
|
||||
int (*i8042_detect)(void);
|
||||
void (*save_sched_clock_state)(void);
|
||||
void (*restore_sched_clock_state)(void);
|
||||
void (*apic_post_init)(void);
|
||||
};
|
||||
|
||||
struct pci_dev;
|
||||
struct msi_msg;
|
||||
|
||||
struct x86_msi_ops {
|
||||
int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
|
||||
void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
|
||||
unsigned int dest, struct msi_msg *msg,
|
||||
u8 hpet_id);
|
||||
void (*teardown_msi_irq)(unsigned int irq);
|
||||
void (*teardown_msi_irqs)(struct pci_dev *dev);
|
||||
void (*restore_msi_irqs)(struct pci_dev *dev);
|
||||
int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
|
||||
};
|
||||
|
||||
struct IO_APIC_route_entry;
|
||||
struct io_apic_irq_attr;
|
||||
struct irq_data;
|
||||
struct cpumask;
|
||||
|
||||
struct x86_io_apic_ops {
|
||||
void (*init) (void);
|
||||
unsigned int (*read) (unsigned int apic, unsigned int reg);
|
||||
void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
|
||||
void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
|
||||
void (*disable)(void);
|
||||
void (*print_entries)(unsigned int apic, unsigned int nr_entries);
|
||||
int (*set_affinity)(struct irq_data *data,
|
||||
const struct cpumask *mask,
|
||||
bool force);
|
||||
int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
|
||||
unsigned int destination, int vector,
|
||||
struct io_apic_irq_attr *attr);
|
||||
void (*eoi_ioapic_pin)(int apic, int pin, int vector);
|
||||
};
|
||||
|
||||
extern struct x86_init_ops x86_init;
|
||||
extern struct x86_cpuinit_ops x86_cpuinit;
|
||||
extern struct x86_platform_ops x86_platform;
|
||||
extern struct x86_msi_ops x86_msi;
|
||||
extern struct x86_io_apic_ops x86_io_apic_ops;
|
||||
extern void x86_init_noop(void);
|
||||
extern void x86_init_uint_noop(unsigned int unused);
|
||||
|
||||
#endif
|
@ -3,10 +3,10 @@
|
||||
#ifndef __DDK_H__
|
||||
#define __DDK_H__
|
||||
|
||||
#include <kernel.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <mutex.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
|
||||
@ -17,12 +17,6 @@
|
||||
#define PG_NOCACHE 0x018
|
||||
#define PG_SHARED 0x200
|
||||
|
||||
#define _PAGE_PRESENT (1<<0)
|
||||
#define _PAGE_RW (1<<1)
|
||||
#define _PAGE_PWT (1<<3)
|
||||
#define _PAGE_PCD (1<<4)
|
||||
#define _PAGE_PAT (1<<7)
|
||||
|
||||
#define MANUAL_DESTROY 0x80000000
|
||||
|
||||
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__)
|
||||
@ -31,24 +25,24 @@
|
||||
|
||||
typedef struct
|
||||
{
|
||||
u32_t code;
|
||||
u32_t data[5];
|
||||
u32 code;
|
||||
u32 data[5];
|
||||
}kevent_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
u32_t handle;
|
||||
u32_t euid;
|
||||
u32 handle;
|
||||
u32 euid;
|
||||
};
|
||||
u64_t raw;
|
||||
u64 raw;
|
||||
}evhandle_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
u32_t handle;
|
||||
u32_t io_code;
|
||||
u32 handle;
|
||||
u32 io_code;
|
||||
void *input;
|
||||
int inp_size;
|
||||
void *output;
|
||||
@ -65,16 +59,10 @@ struct ddk_params;
|
||||
|
||||
int ddk_init(struct ddk_params *params);
|
||||
|
||||
u32_t drvEntry(int, char *)__asm__("_drvEntry");
|
||||
u32 drvEntry(int, char *)__asm__("_drvEntry");
|
||||
|
||||
|
||||
|
||||
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
||||
{
|
||||
// if (size != 0 && n > SIZE_MAX / size)
|
||||
// return NULL;
|
||||
return kmalloc(n * size, flags);
|
||||
}
|
||||
|
||||
|
||||
#endif /* DDK_H */
|
||||
|
@ -1,836 +0,0 @@
|
||||
/**
|
||||
* \file drm.h
|
||||
* Header for the Direct Rendering Manager
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
*
|
||||
* \par Acknowledgments:
|
||||
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DRM_H_
|
||||
#define _DRM_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <errno-base.h>
|
||||
typedef unsigned int drm_handle_t;
|
||||
|
||||
//#include <asm/ioctl.h> /* For _IO* macros */
|
||||
|
||||
#define DRM_MAJOR 226
|
||||
#define DRM_MAX_MINOR 15
|
||||
|
||||
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
|
||||
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
|
||||
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
|
||||
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
|
||||
|
||||
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
|
||||
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
|
||||
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
|
||||
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
|
||||
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
|
||||
|
||||
typedef unsigned int drm_context_t;
|
||||
typedef unsigned int drm_drawable_t;
|
||||
typedef unsigned int drm_magic_t;
|
||||
|
||||
/**
|
||||
* Cliprect.
|
||||
*
|
||||
* \warning: If you change this structure, make sure you change
|
||||
* XF86DRIClipRectRec in the server as well
|
||||
*
|
||||
* \note KW: Actually it's illegal to change either for
|
||||
* backwards-compatibility reasons.
|
||||
*/
|
||||
struct drm_clip_rect {
|
||||
unsigned short x1;
|
||||
unsigned short y1;
|
||||
unsigned short x2;
|
||||
unsigned short y2;
|
||||
};
|
||||
|
||||
/**
|
||||
* Drawable information.
|
||||
*/
|
||||
struct drm_drawable_info {
|
||||
unsigned int num_rects;
|
||||
struct drm_clip_rect *rects;
|
||||
};
|
||||
|
||||
/**
|
||||
* Texture region,
|
||||
*/
|
||||
struct drm_tex_region {
|
||||
unsigned char next;
|
||||
unsigned char prev;
|
||||
unsigned char in_use;
|
||||
unsigned char padding;
|
||||
unsigned int age;
|
||||
};
|
||||
|
||||
/**
|
||||
* Hardware lock.
|
||||
*
|
||||
* The lock structure is a simple cache-line aligned integer. To avoid
|
||||
* processor bus contention on a multiprocessor system, there should not be any
|
||||
* other data stored in the same cache line.
|
||||
*/
|
||||
struct drm_hw_lock {
|
||||
__volatile__ unsigned int lock; /**< lock variable */
|
||||
char padding[60]; /**< Pad to cache line */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_VERSION ioctl argument type.
|
||||
*
|
||||
* \sa drmGetVersion().
|
||||
*/
|
||||
struct drm_version {
|
||||
int version_major; /**< Major version */
|
||||
int version_minor; /**< Minor version */
|
||||
int version_patchlevel; /**< Patch level */
|
||||
size_t name_len; /**< Length of name buffer */
|
||||
char __user *name; /**< Name of driver */
|
||||
size_t date_len; /**< Length of date buffer */
|
||||
char __user *date; /**< User-space buffer to hold date */
|
||||
size_t desc_len; /**< Length of desc buffer */
|
||||
char __user *desc; /**< User-space buffer to hold desc */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
|
||||
*
|
||||
* \sa drmGetBusid() and drmSetBusId().
|
||||
*/
|
||||
struct drm_unique {
|
||||
size_t unique_len; /**< Length of unique */
|
||||
char __user *unique; /**< Unique name for driver instantiation */
|
||||
};
|
||||
|
||||
struct drm_list {
|
||||
int count; /**< Length of user-space structures */
|
||||
struct drm_version __user *version;
|
||||
};
|
||||
|
||||
struct drm_block {
|
||||
int unused;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_CONTROL ioctl argument type.
|
||||
*
|
||||
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
|
||||
*/
|
||||
struct drm_control {
|
||||
enum {
|
||||
DRM_ADD_COMMAND,
|
||||
DRM_RM_COMMAND,
|
||||
DRM_INST_HANDLER,
|
||||
DRM_UNINST_HANDLER
|
||||
} func;
|
||||
int irq;
|
||||
};
|
||||
|
||||
/**
|
||||
* Type of memory to map.
|
||||
*/
|
||||
enum drm_map_type {
|
||||
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
|
||||
_DRM_REGISTERS = 1, /**< no caching, no core dump */
|
||||
_DRM_SHM = 2, /**< shared, cached */
|
||||
_DRM_AGP = 3, /**< AGP/GART */
|
||||
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
|
||||
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
|
||||
};
|
||||
|
||||
/**
|
||||
* Memory mapping flags.
|
||||
*/
|
||||
enum drm_map_flags {
|
||||
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
|
||||
_DRM_READ_ONLY = 0x02,
|
||||
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
|
||||
_DRM_KERNEL = 0x08, /**< kernel requires access */
|
||||
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
|
||||
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
|
||||
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
|
||||
_DRM_DRIVER = 0x80 /**< Managed by driver */
|
||||
};
|
||||
|
||||
struct drm_ctx_priv_map {
|
||||
unsigned int ctx_id; /**< Context requesting private mapping */
|
||||
void *handle; /**< Handle of map */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
|
||||
* argument type.
|
||||
*
|
||||
* \sa drmAddMap().
|
||||
*/
|
||||
struct drm_map {
|
||||
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
|
||||
unsigned long size; /**< Requested physical size (bytes) */
|
||||
enum drm_map_type type; /**< Type of memory to map */
|
||||
enum drm_map_flags flags; /**< Flags */
|
||||
void *handle; /**< User-space: "Handle" to pass to mmap() */
|
||||
/**< Kernel-space: kernel-virtual address */
|
||||
int mtrr; /**< MTRR slot used */
|
||||
/* Private data */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_CLIENT ioctl argument type.
|
||||
*/
|
||||
struct drm_client {
|
||||
int idx; /**< Which client desired? */
|
||||
int auth; /**< Is client authenticated? */
|
||||
unsigned long pid; /**< Process ID */
|
||||
unsigned long uid; /**< User ID */
|
||||
unsigned long magic; /**< Magic */
|
||||
unsigned long iocs; /**< Ioctl count */
|
||||
};
|
||||
|
||||
enum drm_stat_type {
|
||||
_DRM_STAT_LOCK,
|
||||
_DRM_STAT_OPENS,
|
||||
_DRM_STAT_CLOSES,
|
||||
_DRM_STAT_IOCTLS,
|
||||
_DRM_STAT_LOCKS,
|
||||
_DRM_STAT_UNLOCKS,
|
||||
_DRM_STAT_VALUE, /**< Generic value */
|
||||
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
|
||||
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
|
||||
|
||||
_DRM_STAT_IRQ, /**< IRQ */
|
||||
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
|
||||
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
|
||||
_DRM_STAT_DMA, /**< DMA */
|
||||
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
|
||||
_DRM_STAT_MISSED /**< Missed DMA opportunity */
|
||||
/* Add to the *END* of the list */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_STATS ioctl argument type.
|
||||
*/
|
||||
struct drm_stats {
|
||||
unsigned long count;
|
||||
struct {
|
||||
unsigned long value;
|
||||
enum drm_stat_type type;
|
||||
} data[15];
|
||||
};
|
||||
|
||||
/**
|
||||
* Hardware locking flags.
|
||||
*/
|
||||
enum drm_lock_flags {
|
||||
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
|
||||
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
|
||||
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
|
||||
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
|
||||
/* These *HALT* flags aren't supported yet
|
||||
-- they will be used to support the
|
||||
full-screen DGA-like mode. */
|
||||
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
|
||||
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
|
||||
*
|
||||
* \sa drmGetLock() and drmUnlock().
|
||||
*/
|
||||
struct drm_lock {
|
||||
int context;
|
||||
enum drm_lock_flags flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* DMA flags
|
||||
*
|
||||
* \warning
|
||||
* These values \e must match xf86drm.h.
|
||||
*
|
||||
* \sa drm_dma.
|
||||
*/
|
||||
enum drm_dma_flags {
|
||||
/* Flags for DMA buffer dispatch */
|
||||
_DRM_DMA_BLOCK = 0x01, /**<
|
||||
* Block until buffer dispatched.
|
||||
*
|
||||
* \note The buffer may not yet have
|
||||
* been processed by the hardware --
|
||||
* getting a hardware lock with the
|
||||
* hardware quiescent will ensure
|
||||
* that the buffer has been
|
||||
* processed.
|
||||
*/
|
||||
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
|
||||
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
|
||||
|
||||
/* Flags for DMA buffer request */
|
||||
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
|
||||
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
|
||||
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
|
||||
*
|
||||
* \sa drmAddBufs().
|
||||
*/
|
||||
struct drm_buf_desc {
|
||||
int count; /**< Number of buffers of this size */
|
||||
int size; /**< Size in bytes */
|
||||
int low_mark; /**< Low water mark */
|
||||
int high_mark; /**< High water mark */
|
||||
enum {
|
||||
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
|
||||
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
|
||||
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
|
||||
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
|
||||
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
|
||||
} flags;
|
||||
unsigned long agp_start; /**<
|
||||
* Start address of where the AGP buffers are
|
||||
* in the AGP aperture
|
||||
*/
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_INFO_BUFS ioctl argument type.
|
||||
*/
|
||||
struct drm_buf_info {
|
||||
int count; /**< Entries in list */
|
||||
struct drm_buf_desc __user *list;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_FREE_BUFS ioctl argument type.
|
||||
*/
|
||||
struct drm_buf_free {
|
||||
int count;
|
||||
int __user *list;
|
||||
};
|
||||
|
||||
/**
|
||||
* Buffer information
|
||||
*
|
||||
* \sa drm_buf_map.
|
||||
*/
|
||||
struct drm_buf_pub {
|
||||
int idx; /**< Index into the master buffer list */
|
||||
int total; /**< Buffer size */
|
||||
int used; /**< Amount of buffer in use (for DMA) */
|
||||
void __user *address; /**< Address of buffer */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MAP_BUFS ioctl argument type.
|
||||
*/
|
||||
struct drm_buf_map {
|
||||
int count; /**< Length of the buffer list */
|
||||
void __user *virtual; /**< Mmap'd area in user-virtual */
|
||||
struct drm_buf_pub __user *list; /**< Buffer information */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_DMA ioctl argument type.
|
||||
*
|
||||
* Indices here refer to the offset into the buffer list in drm_buf_get.
|
||||
*
|
||||
* \sa drmDMA().
|
||||
*/
|
||||
struct drm_dma {
|
||||
int context; /**< Context handle */
|
||||
int send_count; /**< Number of buffers to send */
|
||||
int __user *send_indices; /**< List of handles to buffers */
|
||||
int __user *send_sizes; /**< Lengths of data to send */
|
||||
enum drm_dma_flags flags; /**< Flags */
|
||||
int request_count; /**< Number of buffers requested */
|
||||
int request_size; /**< Desired size for buffers */
|
||||
int __user *request_indices; /**< Buffer information */
|
||||
int __user *request_sizes;
|
||||
int granted_count; /**< Number of buffers granted */
|
||||
};
|
||||
|
||||
enum drm_ctx_flags {
|
||||
_DRM_CONTEXT_PRESERVED = 0x01,
|
||||
_DRM_CONTEXT_2DONLY = 0x02
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_CTX ioctl argument type.
|
||||
*
|
||||
* \sa drmCreateContext() and drmDestroyContext().
|
||||
*/
|
||||
struct drm_ctx {
|
||||
drm_context_t handle;
|
||||
enum drm_ctx_flags flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_RES_CTX ioctl argument type.
|
||||
*/
|
||||
struct drm_ctx_res {
|
||||
int count;
|
||||
struct drm_ctx __user *contexts;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
|
||||
*/
|
||||
struct drm_draw {
|
||||
drm_drawable_t handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
|
||||
*/
|
||||
typedef enum {
|
||||
DRM_DRAWABLE_CLIPRECTS,
|
||||
} drm_drawable_info_type_t;
|
||||
|
||||
struct drm_update_draw {
|
||||
drm_drawable_t handle;
|
||||
unsigned int type;
|
||||
unsigned int num;
|
||||
unsigned long long data;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
|
||||
*/
|
||||
struct drm_auth {
|
||||
drm_magic_t magic;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
|
||||
*
|
||||
* \sa drmGetInterruptFromBusID().
|
||||
*/
|
||||
struct drm_irq_busid {
|
||||
int irq; /**< IRQ number */
|
||||
int busnum; /**< bus number */
|
||||
int devnum; /**< device number */
|
||||
int funcnum; /**< function number */
|
||||
};
|
||||
|
||||
enum drm_vblank_seq_type {
|
||||
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
|
||||
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
|
||||
/* bits 1-6 are reserved for high crtcs */
|
||||
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
|
||||
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
|
||||
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
|
||||
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
|
||||
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
|
||||
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
|
||||
};
|
||||
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
|
||||
|
||||
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
|
||||
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
|
||||
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
|
||||
|
||||
struct drm_wait_vblank_request {
|
||||
enum drm_vblank_seq_type type;
|
||||
unsigned int sequence;
|
||||
unsigned long signal;
|
||||
};
|
||||
|
||||
struct drm_wait_vblank_reply {
|
||||
enum drm_vblank_seq_type type;
|
||||
unsigned int sequence;
|
||||
long tval_sec;
|
||||
long tval_usec;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
|
||||
*
|
||||
* \sa drmWaitVBlank().
|
||||
*/
|
||||
union drm_wait_vblank {
|
||||
struct drm_wait_vblank_request request;
|
||||
struct drm_wait_vblank_reply reply;
|
||||
};
|
||||
|
||||
#define _DRM_PRE_MODESET 1
|
||||
#define _DRM_POST_MODESET 2
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_MODESET_CTL ioctl argument type
|
||||
*
|
||||
* \sa drmModesetCtl().
|
||||
*/
|
||||
struct drm_modeset_ctl {
|
||||
__u32 crtc;
|
||||
__u32 cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
|
||||
*
|
||||
* \sa drmAgpEnable().
|
||||
*/
|
||||
struct drm_agp_mode {
|
||||
unsigned long mode; /**< AGP mode */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
|
||||
*
|
||||
* \sa drmAgpAlloc() and drmAgpFree().
|
||||
*/
|
||||
struct drm_agp_buffer {
|
||||
unsigned long size; /**< In bytes -- will round to page boundary */
|
||||
unsigned long handle; /**< Used for binding / unbinding */
|
||||
unsigned long type; /**< Type of memory to allocate */
|
||||
unsigned long physical; /**< Physical used by i810 */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
|
||||
*
|
||||
* \sa drmAgpBind() and drmAgpUnbind().
|
||||
*/
|
||||
struct drm_agp_binding {
|
||||
unsigned long handle; /**< From drm_agp_buffer */
|
||||
unsigned long offset; /**< In bytes -- will round to page boundary */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_AGP_INFO ioctl argument type.
|
||||
*
|
||||
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
|
||||
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
|
||||
* drmAgpVendorId() and drmAgpDeviceId().
|
||||
*/
|
||||
struct drm_agp_info {
|
||||
int agp_version_major;
|
||||
int agp_version_minor;
|
||||
unsigned long mode;
|
||||
unsigned long aperture_base; /* physical address */
|
||||
unsigned long aperture_size; /* bytes */
|
||||
unsigned long memory_allowed; /* bytes */
|
||||
unsigned long memory_used;
|
||||
|
||||
/* PCI information */
|
||||
unsigned short id_vendor;
|
||||
unsigned short id_device;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_SG_ALLOC ioctl argument type.
|
||||
*/
|
||||
struct drm_scatter_gather {
|
||||
unsigned long size; /**< In bytes -- will round to page boundary */
|
||||
unsigned long handle; /**< Used for mapping / unmapping */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_IOCTL_SET_VERSION ioctl argument type.
|
||||
*/
|
||||
struct drm_set_version {
|
||||
int drm_di_major;
|
||||
int drm_di_minor;
|
||||
int drm_dd_major;
|
||||
int drm_dd_minor;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
|
||||
struct drm_gem_close {
|
||||
/** Handle of the object to be closed. */
|
||||
__u32 handle;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
|
||||
struct drm_gem_flink {
|
||||
/** Handle for the object being named */
|
||||
__u32 handle;
|
||||
|
||||
/** Returned global name */
|
||||
__u32 name;
|
||||
};
|
||||
|
||||
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
|
||||
struct drm_gem_open {
|
||||
/** Name of object being opened */
|
||||
__u32 name;
|
||||
|
||||
/** Returned handle for the object */
|
||||
__u32 handle;
|
||||
|
||||
/** Returned size of the object */
|
||||
__u64 size;
|
||||
};
|
||||
|
||||
#define DRM_CAP_DUMB_BUFFER 0x1
|
||||
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
|
||||
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
|
||||
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
|
||||
#define DRM_CAP_PRIME 0x5
|
||||
#define DRM_PRIME_CAP_IMPORT 0x1
|
||||
#define DRM_PRIME_CAP_EXPORT 0x2
|
||||
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
|
||||
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
|
||||
|
||||
/** DRM_IOCTL_GET_CAP ioctl argument type */
|
||||
struct drm_get_cap {
|
||||
__u64 capability;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM_CLIENT_CAP_STEREO_3D
|
||||
*
|
||||
* if set to 1, the DRM core will expose the stereo 3D capabilities of the
|
||||
* monitor by advertising the supported 3D layouts in the flags of struct
|
||||
* drm_mode_modeinfo.
|
||||
*/
|
||||
#define DRM_CLIENT_CAP_STEREO_3D 1
|
||||
|
||||
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
|
||||
struct drm_set_client_cap {
|
||||
__u64 capability;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
#define DRM_CLOEXEC O_CLOEXEC
|
||||
struct drm_prime_handle {
|
||||
__u32 handle;
|
||||
|
||||
/** Flags.. only applicable for handle->fd */
|
||||
__u32 flags;
|
||||
|
||||
/** Returned dmabuf file descriptor */
|
||||
__s32 fd;
|
||||
};
|
||||
|
||||
#include <drm/drm_mode.h>
|
||||
|
||||
#if 0
|
||||
#define DRM_IOCTL_BASE 'd'
|
||||
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
|
||||
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
|
||||
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
|
||||
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
|
||||
|
||||
#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
|
||||
#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
|
||||
#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
|
||||
#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
|
||||
#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
|
||||
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
|
||||
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
|
||||
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
|
||||
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
|
||||
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
|
||||
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
|
||||
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
|
||||
#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
|
||||
#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
|
||||
|
||||
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
|
||||
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
|
||||
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
|
||||
#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
|
||||
#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
|
||||
#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
|
||||
#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
|
||||
#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
|
||||
#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
|
||||
#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
|
||||
#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
|
||||
|
||||
#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
|
||||
|
||||
#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
|
||||
#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
|
||||
|
||||
#define DRM_IOCTL_SET_MASTER DRM_IO(0x1e)
|
||||
#define DRM_IOCTL_DROP_MASTER DRM_IO(0x1f)
|
||||
|
||||
#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
|
||||
#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
|
||||
#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
|
||||
#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
|
||||
#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
|
||||
#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
|
||||
#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
|
||||
#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
|
||||
#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
|
||||
#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
|
||||
#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
|
||||
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
|
||||
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
|
||||
|
||||
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
|
||||
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
|
||||
|
||||
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
|
||||
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
|
||||
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
|
||||
#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
|
||||
#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
|
||||
#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
|
||||
#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
|
||||
#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
|
||||
|
||||
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
|
||||
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
|
||||
|
||||
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
|
||||
|
||||
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
|
||||
|
||||
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
|
||||
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
|
||||
#define DRM_IOCTL_MODE_SETCRTC DRM_IOWR(0xA2, struct drm_mode_crtc)
|
||||
#define DRM_IOCTL_MODE_CURSOR DRM_IOWR(0xA3, struct drm_mode_cursor)
|
||||
#define DRM_IOCTL_MODE_GETGAMMA DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
|
||||
#define DRM_IOCTL_MODE_SETGAMMA DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
|
||||
#define DRM_IOCTL_MODE_GETENCODER DRM_IOWR(0xA6, struct drm_mode_get_encoder)
|
||||
#define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA7, struct drm_mode_get_connector)
|
||||
#define DRM_IOCTL_MODE_ATTACHMODE DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
|
||||
#define DRM_IOCTL_MODE_DETACHMODE DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
|
||||
|
||||
#define DRM_IOCTL_MODE_GETPROPERTY DRM_IOWR(0xAA, struct drm_mode_get_property)
|
||||
#define DRM_IOCTL_MODE_SETPROPERTY DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
|
||||
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
|
||||
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
|
||||
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
|
||||
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
|
||||
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
|
||||
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
|
||||
|
||||
#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
|
||||
#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
|
||||
#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
|
||||
#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
|
||||
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
|
||||
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
|
||||
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
|
||||
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
|
||||
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
|
||||
#define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Device specific ioctls should only be in their respective headers
|
||||
* The device specific ioctl range is from 0x40 to 0x99.
|
||||
* Generic IOCTLS restart at 0xA0.
|
||||
*
|
||||
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
|
||||
* drmCommandReadWrite().
|
||||
*/
|
||||
#define DRM_COMMAND_BASE 0x40
|
||||
#define DRM_COMMAND_END 0xA0
|
||||
|
||||
/**
|
||||
* Header for events written back to userspace on the drm fd. The
|
||||
* type defines the type of event, the length specifies the total
|
||||
* length of the event (including the header), and user_data is
|
||||
* typically a 64 bit value passed with the ioctl that triggered the
|
||||
* event. A read on the drm fd will always only return complete
|
||||
* events, that is, if for example the read buffer is 100 bytes, and
|
||||
* there are two 64 byte events pending, only one will be returned.
|
||||
*
|
||||
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
|
||||
* up are chipset specific.
|
||||
*/
|
||||
struct drm_event {
|
||||
__u32 type;
|
||||
__u32 length;
|
||||
};
|
||||
|
||||
#define DRM_EVENT_VBLANK 0x01
|
||||
#define DRM_EVENT_FLIP_COMPLETE 0x02
|
||||
|
||||
struct drm_event_vblank {
|
||||
struct drm_event base;
|
||||
__u64 user_data;
|
||||
__u32 tv_sec;
|
||||
__u32 tv_usec;
|
||||
__u32 sequence;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
/* typedef area */
|
||||
#ifndef __KERNEL__
|
||||
typedef struct drm_clip_rect drm_clip_rect_t;
|
||||
typedef struct drm_drawable_info drm_drawable_info_t;
|
||||
typedef struct drm_tex_region drm_tex_region_t;
|
||||
typedef struct drm_hw_lock drm_hw_lock_t;
|
||||
typedef struct drm_version drm_version_t;
|
||||
typedef struct drm_unique drm_unique_t;
|
||||
typedef struct drm_list drm_list_t;
|
||||
typedef struct drm_block drm_block_t;
|
||||
typedef struct drm_control drm_control_t;
|
||||
typedef enum drm_map_type drm_map_type_t;
|
||||
typedef enum drm_map_flags drm_map_flags_t;
|
||||
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
|
||||
typedef struct drm_map drm_map_t;
|
||||
typedef struct drm_client drm_client_t;
|
||||
typedef enum drm_stat_type drm_stat_type_t;
|
||||
typedef struct drm_stats drm_stats_t;
|
||||
typedef enum drm_lock_flags drm_lock_flags_t;
|
||||
typedef struct drm_lock drm_lock_t;
|
||||
typedef enum drm_dma_flags drm_dma_flags_t;
|
||||
typedef struct drm_buf_desc drm_buf_desc_t;
|
||||
typedef struct drm_buf_info drm_buf_info_t;
|
||||
typedef struct drm_buf_free drm_buf_free_t;
|
||||
typedef struct drm_buf_pub drm_buf_pub_t;
|
||||
typedef struct drm_buf_map drm_buf_map_t;
|
||||
typedef struct drm_dma drm_dma_t;
|
||||
typedef union drm_wait_vblank drm_wait_vblank_t;
|
||||
typedef struct drm_agp_mode drm_agp_mode_t;
|
||||
typedef enum drm_ctx_flags drm_ctx_flags_t;
|
||||
typedef struct drm_ctx drm_ctx_t;
|
||||
typedef struct drm_ctx_res drm_ctx_res_t;
|
||||
typedef struct drm_draw drm_draw_t;
|
||||
typedef struct drm_update_draw drm_update_draw_t;
|
||||
typedef struct drm_auth drm_auth_t;
|
||||
typedef struct drm_irq_busid drm_irq_busid_t;
|
||||
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
|
||||
|
||||
typedef struct drm_agp_buffer drm_agp_buffer_t;
|
||||
typedef struct drm_agp_binding drm_agp_binding_t;
|
||||
typedef struct drm_agp_info drm_agp_info_t;
|
||||
typedef struct drm_scatter_gather drm_scatter_gather_t;
|
||||
typedef struct drm_set_version drm_set_version_t;
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,17 +1,14 @@
|
||||
/**
|
||||
* \file drmP.h
|
||||
* Private header for Direct Rendering Manager
|
||||
*
|
||||
* \author Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* \author Gareth Hughes <gareth@valinux.com>
|
||||
*/
|
||||
|
||||
/*
|
||||
* Internal Header for the Direct Rendering Manager
|
||||
*
|
||||
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
|
||||
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
|
||||
* Copyright (c) 2009-2010, Code Aurora Forum.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Author: Rickard E. (Rik) Faith <faith@valinux.com>
|
||||
* Author: Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
@ -35,91 +32,69 @@
|
||||
#ifndef _DRM_P_H_
|
||||
#define _DRM_P_H_
|
||||
|
||||
#define iowrite32(v, addr) writel((v), (addr))
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef __alpha__
|
||||
/* add include of current.h so that "current" is defined
|
||||
* before static inline funcs in wait.h. Doing this so we
|
||||
* can build the DRM (part of PI DRI). 4/21/2000 S + B */
|
||||
#include <asm/current.h>
|
||||
#endif /* __alpha__ */
|
||||
|
||||
#include <syscall.h>
|
||||
#include <linux/agp_backend.h>
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <linux/fs.h>
|
||||
//#include <linux/init.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/mutex.h>
|
||||
//#include <asm/io.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
//#include <asm/uaccess.h>
|
||||
//#include <linux/workqueue.h>
|
||||
//#include <linux/poll.h>
|
||||
//#include <asm/pgalloc.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <uapi/drm/drm.h>
|
||||
#include <uapi/drm/drm_mode.h>
|
||||
|
||||
#include <drm/drm.h>
|
||||
#include <drm/drm_agpsupport.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_global.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <drm/drm_mem_util.h>
|
||||
#include <drm/drm_mm.h>
|
||||
#include <drm/drm_os_linux.h>
|
||||
#include <drm/drm_sarea.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
|
||||
#include <linux/idr.h>
|
||||
|
||||
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
|
||||
|
||||
struct module;
|
||||
|
||||
struct drm_file;
|
||||
struct drm_device;
|
||||
struct drm_agp_head;
|
||||
struct drm_local_map;
|
||||
struct drm_device_dma;
|
||||
struct drm_dma_handle;
|
||||
struct drm_gem_object;
|
||||
|
||||
struct device_node;
|
||||
struct videomode;
|
||||
struct reservation_object;
|
||||
struct dma_buf_attachment;
|
||||
|
||||
struct inode;
|
||||
struct poll_table_struct;
|
||||
struct drm_lock_data;
|
||||
|
||||
struct sg_table;
|
||||
struct dma_buf;
|
||||
|
||||
//#include <drm/drm_os_linux.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <drm/drm_mm.h>
|
||||
|
||||
#define KHZ2PICOS(a) (1000000000UL/(a))
|
||||
|
||||
/* Flags and return codes for get_vblank_timestamp() driver function. */
|
||||
#define DRM_CALLED_FROM_VBLIRQ 1
|
||||
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
|
||||
#define DRM_VBLANKTIME_INVBL (1 << 1)
|
||||
|
||||
|
||||
/* get_scanout_position() return flags */
|
||||
#define DRM_SCANOUTPOS_VALID (1 << 0)
|
||||
#define DRM_SCANOUTPOS_INVBL (1 << 1)
|
||||
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
|
||||
|
||||
/*
|
||||
* 4 debug categories are defined:
|
||||
*
|
||||
@ -156,8 +131,8 @@ struct dma_buf;
|
||||
extern __printf(2, 3)
|
||||
void drm_ut_debug_printk(const char *function_name,
|
||||
const char *format, ...);
|
||||
extern __printf(2, 3)
|
||||
int drm_err(const char *func, const char *format, ...);
|
||||
extern __printf(1, 2)
|
||||
void drm_err(const char *format, ...);
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name DRM template customization defaults */
|
||||
@ -175,24 +150,6 @@ int drm_err(const char *func, const char *format, ...);
|
||||
#define DRIVER_PRIME 0x4000
|
||||
#define DRIVER_RENDER 0x8000
|
||||
|
||||
#define DRIVER_BUS_PCI 0x1
|
||||
#define DRIVER_BUS_PLATFORM 0x2
|
||||
#define DRIVER_BUS_USB 0x3
|
||||
#define DRIVER_BUS_HOST1X 0x4
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Begin the DRM... */
|
||||
/*@{*/
|
||||
|
||||
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
|
||||
also include looping detection. */
|
||||
|
||||
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
|
||||
|
||||
#define DRM_MAP_HASH_OFFSET 0x10000000
|
||||
|
||||
/*@}*/
|
||||
|
||||
/***********************************************************************/
|
||||
/** \name Macros to make printk easier */
|
||||
/*@{*/
|
||||
@ -204,7 +161,7 @@ int drm_err(const char *func, const char *format, ...);
|
||||
* \param arg arguments
|
||||
*/
|
||||
#define DRM_ERROR(fmt, ...) \
|
||||
drm_err(__func__, fmt, ##__VA_ARGS__)
|
||||
drm_err(fmt, ##__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
|
||||
@ -219,7 +176,7 @@ int drm_err(const char *func, const char *format, ...);
|
||||
DEFAULT_RATELIMIT_BURST); \
|
||||
\
|
||||
if (__ratelimit(&_rs)) \
|
||||
drm_err(__func__, fmt, ##__VA_ARGS__); \
|
||||
drm_err(fmt, ##__VA_ARGS__); \
|
||||
})
|
||||
|
||||
#define DRM_INFO(fmt, ...) \
|
||||
@ -265,27 +222,8 @@ int drm_err(const char *func, const char *format, ...);
|
||||
/** \name Internal types and structures */
|
||||
/*@{*/
|
||||
|
||||
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
|
||||
|
||||
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
|
||||
|
||||
/**
|
||||
* Test that the hardware lock is held by the caller, returning otherwise.
|
||||
*
|
||||
* \param dev DRM device.
|
||||
* \param filp file pointer of the caller.
|
||||
*/
|
||||
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
|
||||
do { \
|
||||
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
|
||||
_file_priv->master->lock.file_priv != _file_priv) { \
|
||||
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
|
||||
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
|
||||
_file_priv->master->lock.file_priv, _file_priv); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* Ioctl function type.
|
||||
*
|
||||
@ -326,83 +264,6 @@ struct drm_ioctl_desc {
|
||||
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
|
||||
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
|
||||
|
||||
#if 0
|
||||
struct drm_magic_entry {
|
||||
struct list_head head;
|
||||
struct drm_hash_item hash_item;
|
||||
struct drm_file *priv;
|
||||
};
|
||||
|
||||
struct drm_vma_entry {
|
||||
struct list_head head;
|
||||
struct vm_area_struct *vma;
|
||||
pid_t pid;
|
||||
};
|
||||
|
||||
/**
|
||||
* DMA buffer.
|
||||
*/
|
||||
struct drm_buf {
|
||||
int idx; /**< Index into master buflist */
|
||||
int total; /**< Buffer size */
|
||||
int order; /**< log-base-2(total) */
|
||||
int used; /**< Amount of buffer in use (for DMA) */
|
||||
unsigned long offset; /**< Byte offset (used internally) */
|
||||
void *address; /**< Address of buffer */
|
||||
unsigned long bus_address; /**< Bus address of buffer */
|
||||
struct drm_buf *next; /**< Kernel-only: used for free list */
|
||||
__volatile__ int waiting; /**< On kernel DMA queue */
|
||||
__volatile__ int pending; /**< On hardware DMA queue */
|
||||
struct drm_file *file_priv; /**< Private of holding file descr */
|
||||
int context; /**< Kernel queue for this buffer */
|
||||
int while_locked; /**< Dispatch this buffer while locked */
|
||||
enum {
|
||||
DRM_LIST_NONE = 0,
|
||||
DRM_LIST_FREE = 1,
|
||||
DRM_LIST_WAIT = 2,
|
||||
DRM_LIST_PEND = 3,
|
||||
DRM_LIST_PRIO = 4,
|
||||
DRM_LIST_RECLAIM = 5
|
||||
} list; /**< Which list we're on */
|
||||
|
||||
int dev_priv_size; /**< Size of buffer private storage */
|
||||
void *dev_private; /**< Per-buffer private storage */
|
||||
};
|
||||
|
||||
/** bufs is one longer than it has to be */
|
||||
struct drm_waitlist {
|
||||
int count; /**< Number of possible buffers */
|
||||
struct drm_buf **bufs; /**< List of pointers to buffers */
|
||||
struct drm_buf **rp; /**< Read pointer */
|
||||
struct drm_buf **wp; /**< Write pointer */
|
||||
struct drm_buf **end; /**< End pointer */
|
||||
spinlock_t read_lock;
|
||||
spinlock_t write_lock;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct drm_dma_handle {
|
||||
dma_addr_t busaddr;
|
||||
void *vaddr;
|
||||
size_t size;
|
||||
} drm_dma_handle_t;
|
||||
|
||||
/**
|
||||
* Buffer entry. There is one of this for each buffer size order.
|
||||
*/
|
||||
struct drm_buf_entry {
|
||||
int buf_size; /**< size */
|
||||
int buf_count; /**< number of buffers */
|
||||
struct drm_buf *buflist; /**< buffer list */
|
||||
int seg_count;
|
||||
int page_order;
|
||||
struct drm_dma_handle **seglist;
|
||||
|
||||
int low_mark; /**< Low water mark */
|
||||
int high_mark; /**< High water mark */
|
||||
};
|
||||
|
||||
/* Event queued up for userspace to read */
|
||||
struct drm_pending_event {
|
||||
struct drm_event *event;
|
||||
@ -457,7 +318,6 @@ struct drm_file {
|
||||
int event_space;
|
||||
};
|
||||
|
||||
#if 0
|
||||
/**
|
||||
* Lock data.
|
||||
*/
|
||||
@ -473,192 +333,6 @@ struct drm_lock_data {
|
||||
int idle_has_lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* DMA data.
|
||||
*/
|
||||
struct drm_device_dma {
|
||||
|
||||
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
|
||||
int buf_count; /**< total number of buffers */
|
||||
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
|
||||
int seg_count;
|
||||
int page_count; /**< number of pages */
|
||||
unsigned long *pagelist; /**< page list */
|
||||
unsigned long byte_count;
|
||||
enum {
|
||||
_DRM_DMA_USE_AGP = 0x01,
|
||||
_DRM_DMA_USE_SG = 0x02,
|
||||
_DRM_DMA_USE_FB = 0x04,
|
||||
_DRM_DMA_USE_PCI_RO = 0x08
|
||||
} flags;
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
* AGP memory entry. Stored as a doubly linked list.
|
||||
*/
|
||||
struct drm_agp_mem {
|
||||
unsigned long handle; /**< handle */
|
||||
struct agp_memory *memory;
|
||||
unsigned long bound; /**< address */
|
||||
int pages;
|
||||
struct list_head head;
|
||||
};
|
||||
|
||||
/**
|
||||
* AGP data.
|
||||
*
|
||||
* \sa drm_agp_init() and drm_device::agp.
|
||||
*/
|
||||
struct drm_agp_head {
|
||||
struct agp_kern_info agp_info; /**< AGP device information */
|
||||
struct list_head memory;
|
||||
unsigned long mode; /**< AGP mode */
|
||||
struct agp_bridge_data *bridge;
|
||||
int enabled; /**< whether the AGP bus as been enabled */
|
||||
int acquired; /**< whether the AGP device has been acquired */
|
||||
unsigned long base;
|
||||
int agp_mtrr;
|
||||
int cant_use_aperture;
|
||||
unsigned long page_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* Scatter-gather memory.
|
||||
*/
|
||||
struct drm_sg_mem {
|
||||
unsigned long handle;
|
||||
void *virtual;
|
||||
int pages;
|
||||
struct page **pagelist;
|
||||
dma_addr_t *busaddr;
|
||||
};
|
||||
|
||||
struct drm_sigdata {
|
||||
int context;
|
||||
struct drm_hw_lock *lock;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Kernel side of a mapping
|
||||
*/
|
||||
struct drm_local_map {
|
||||
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
|
||||
unsigned long size; /**< Requested physical size (bytes) */
|
||||
enum drm_map_type type; /**< Type of memory to map */
|
||||
enum drm_map_flags flags; /**< Flags */
|
||||
void *handle; /**< User-space: "Handle" to pass to mmap() */
|
||||
/**< Kernel-space: kernel-virtual address */
|
||||
int mtrr; /**< MTRR slot used */
|
||||
};
|
||||
|
||||
typedef struct drm_local_map drm_local_map_t;
|
||||
|
||||
/**
|
||||
* Mappings list
|
||||
*/
|
||||
struct drm_map_list {
|
||||
struct list_head head; /**< list head */
|
||||
struct drm_hash_item hash;
|
||||
struct drm_local_map *map; /**< mapping */
|
||||
uint64_t user_token;
|
||||
struct drm_master *master;
|
||||
};
|
||||
|
||||
/* location of GART table */
|
||||
#define DRM_ATI_GART_MAIN 1
|
||||
#define DRM_ATI_GART_FB 2
|
||||
|
||||
#define DRM_ATI_GART_PCI 1
|
||||
#define DRM_ATI_GART_PCIE 2
|
||||
#define DRM_ATI_GART_IGP 3
|
||||
|
||||
struct drm_ati_pcigart_info {
|
||||
int gart_table_location;
|
||||
int gart_reg_if;
|
||||
void *addr;
|
||||
dma_addr_t bus_addr;
|
||||
dma_addr_t table_mask;
|
||||
struct drm_dma_handle *table_handle;
|
||||
struct drm_local_map mapping;
|
||||
int table_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* This structure defines the drm_mm memory object, which will be used by the
|
||||
* DRM for its buffer objects.
|
||||
*/
|
||||
struct drm_gem_object {
|
||||
/** Reference count of this object */
|
||||
struct kref refcount;
|
||||
|
||||
/**
|
||||
* handle_count - gem file_priv handle count of this object
|
||||
*
|
||||
* Each handle also holds a reference. Note that when the handle_count
|
||||
* drops to 0 any global names (e.g. the id in the flink namespace) will
|
||||
* be cleared.
|
||||
*
|
||||
* Protected by dev->object_name_lock.
|
||||
* */
|
||||
unsigned handle_count;
|
||||
|
||||
/** Related drm device */
|
||||
struct drm_device *dev;
|
||||
|
||||
/** File representing the shmem storage */
|
||||
struct file *filp;
|
||||
|
||||
/* Mapping info for this object */
|
||||
struct drm_vma_offset_node vma_node;
|
||||
|
||||
/**
|
||||
* Size of the object, in bytes. Immutable over the object's
|
||||
* lifetime.
|
||||
*/
|
||||
size_t size;
|
||||
|
||||
/**
|
||||
* Global name for this object, starts at 1. 0 means unnamed.
|
||||
* Access is covered by the object_name_lock in the related drm_device
|
||||
*/
|
||||
int name;
|
||||
|
||||
/**
|
||||
* Memory domains. These monitor which caches contain read/write data
|
||||
* related to the object. When transitioning from one set of domains
|
||||
* to another, the driver is called to ensure that caches are suitably
|
||||
* flushed and invalidated
|
||||
*/
|
||||
uint32_t read_domains;
|
||||
uint32_t write_domain;
|
||||
|
||||
/**
|
||||
* While validating an exec operation, the
|
||||
* new read/write domain values are computed here.
|
||||
* They will be transferred to the above values
|
||||
* at the point that any cache flushing occurs
|
||||
*/
|
||||
uint32_t pending_read_domains;
|
||||
uint32_t pending_write_domain;
|
||||
|
||||
/**
|
||||
* dma_buf - dma buf associated with this GEM object
|
||||
*
|
||||
* Pointer to the dma-buf associated with this gem object (either
|
||||
* through importing or exporting). We break the resulting reference
|
||||
* loop when the last gem handle for this object is released.
|
||||
*
|
||||
* Protected by obj->object_name_lock
|
||||
*/
|
||||
struct dma_buf *dma_buf;
|
||||
|
||||
};
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
/**
|
||||
* struct drm_master - drm master structure
|
||||
*
|
||||
@ -666,7 +340,6 @@ struct drm_gem_object {
|
||||
* @minor: Link back to minor char device we are master for. Immutable.
|
||||
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
|
||||
* @unique_len: Length of unique field. Protected by drm_global_mutex.
|
||||
* @unique_size: Amount allocated. Protected by drm_global_mutex.
|
||||
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
|
||||
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
|
||||
* @lock: DRI lock information.
|
||||
@ -677,10 +350,9 @@ struct drm_master {
|
||||
struct drm_minor *minor;
|
||||
char *unique;
|
||||
int unique_len;
|
||||
int unique_size;
|
||||
// struct drm_open_hash magiclist;
|
||||
// struct list_head magicfree;
|
||||
// struct drm_lock_data lock;
|
||||
struct drm_open_hash magiclist;
|
||||
struct list_head magicfree;
|
||||
struct drm_lock_data lock;
|
||||
void *driver_priv;
|
||||
};
|
||||
|
||||
@ -692,17 +364,13 @@ struct drm_master {
|
||||
/* Flags and return codes for get_vblank_timestamp() driver function. */
|
||||
#define DRM_CALLED_FROM_VBLIRQ 1
|
||||
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
|
||||
#define DRM_VBLANKTIME_INVBL (1 << 1)
|
||||
#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
|
||||
|
||||
/* get_scanout_position() return flags */
|
||||
#define DRM_SCANOUTPOS_VALID (1 << 0)
|
||||
#define DRM_SCANOUTPOS_INVBL (1 << 1)
|
||||
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
|
||||
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
|
||||
|
||||
struct drm_bus {
|
||||
int (*set_busid)(struct drm_device *dev, struct drm_master *master);
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM driver structure. This structure represent the common code for
|
||||
* a family of cards. There will one drm_device for each card present
|
||||
@ -894,6 +562,27 @@ struct drm_minor {
|
||||
};
|
||||
|
||||
|
||||
struct drm_pending_vblank_event {
|
||||
struct drm_pending_event base;
|
||||
int pipe;
|
||||
struct drm_event_vblank event;
|
||||
};
|
||||
|
||||
struct drm_vblank_crtc {
|
||||
struct drm_device *dev; /* pointer to the drm_device */
|
||||
wait_queue_head_t queue; /**< VBLANK wait queue */
|
||||
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
|
||||
struct timer_list disable_timer; /* delayed disable timer */
|
||||
atomic_t count; /**< number of VBLANK interrupts */
|
||||
atomic_t refcount; /* number of users of vblank interruptsper crtc */
|
||||
u32 last; /* protected by dev->vbl_lock, used */
|
||||
/* for wraparound handling */
|
||||
u32 last_wait; /* Last vblank seqno waited per CRTC */
|
||||
unsigned int inmodeset; /* Display driver is setting mode */
|
||||
int crtc; /* crtc index */
|
||||
bool enabled; /* so we don't call enable more than
|
||||
once per disable */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM device structure. This structure represent a complete card that
|
||||
@ -903,6 +592,9 @@ struct drm_device {
|
||||
struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */
|
||||
int if_version; /**< Highest interface version set */
|
||||
|
||||
/** \name Lifetime Management */
|
||||
/*@{ */
|
||||
struct kref ref; /**< Object ref-count */
|
||||
struct device *dev; /**< Device structure of bus-device */
|
||||
struct drm_driver *driver; /**< DRM driver managing the device */
|
||||
void *dev_private; /**< DRM driver private data */
|
||||
@ -964,6 +656,16 @@ struct drm_device {
|
||||
*/
|
||||
bool vblank_disable_allowed;
|
||||
|
||||
/*
|
||||
* If true, vblank interrupt will be disabled immediately when the
|
||||
* refcount drops to zero, as opposed to via the vblank disable
|
||||
* timer.
|
||||
* This can be set to true it the hardware has a working vblank
|
||||
* counter and the driver uses drm_vblank_on() and drm_vblank_off()
|
||||
* appropriately.
|
||||
*/
|
||||
bool vblank_disable_immediate;
|
||||
|
||||
/* array of size num_crtcs */
|
||||
struct drm_vblank_crtc *vblank;
|
||||
|
||||
@ -986,6 +688,10 @@ struct drm_device {
|
||||
|
||||
unsigned int num_crtcs; /**< Number of CRTCs on this device */
|
||||
|
||||
struct {
|
||||
int context;
|
||||
struct drm_hw_lock *lock;
|
||||
} sigdata;
|
||||
|
||||
struct drm_mode_config mode_config; /**< Current mode config */
|
||||
|
||||
@ -1032,112 +738,32 @@ extern long drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern long drm_compat_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_lastclose(struct drm_device *dev);
|
||||
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
|
||||
|
||||
/* Device support (drm_fops.h) */
|
||||
extern struct mutex drm_global_mutex;
|
||||
extern int drm_open(struct inode *inode, struct file *filp);
|
||||
extern ssize_t drm_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *offset);
|
||||
extern int drm_release(struct inode *inode, struct file *filp);
|
||||
|
||||
/* Mapping support (drm_vm.h) */
|
||||
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
|
||||
extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
|
||||
extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
|
||||
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
|
||||
|
||||
/* Memory management support (drm_memory.h) */
|
||||
#include <drm/drm_memory.h>
|
||||
|
||||
|
||||
/* Misc. IOCTL support (drm_ioctl.h) */
|
||||
extern int drm_irq_by_busid(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_getunique(struct drm_device *dev, void *data,
|
||||
/* Misc. IOCTL support (drm_ioctl.c) */
|
||||
int drm_noop(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_setunique(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_getmap(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_getclient(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_getstats(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_getcap(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_setclientcap(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_setversion(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_noop(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* Authentication IOCTL support (drm_auth.h) */
|
||||
extern int drm_getmagic(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_authmagic(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
|
||||
|
||||
/* Cache management (drm_cache.c) */
|
||||
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
||||
void drm_clflush_sg(struct sg_table *st);
|
||||
void drm_clflush_virt_range(void *addr, unsigned long length);
|
||||
|
||||
/* Locking IOCTL support (drm_lock.h) */
|
||||
extern int drm_lock(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_unlock(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
|
||||
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
|
||||
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
|
||||
|
||||
/*
|
||||
* These are exported to drivers so that they can implement fencing using
|
||||
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
|
||||
*/
|
||||
|
||||
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
|
||||
|
||||
/* Buffer management support (drm_bufs.h) */
|
||||
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
|
||||
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
|
||||
extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
|
||||
unsigned int size, enum drm_map_type type,
|
||||
enum drm_map_flags flags, struct drm_local_map **map_ptr);
|
||||
extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
|
||||
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
|
||||
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_addbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_infobufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_markbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_freebufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_mapbufs(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_dma_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* DMA support (drm_dma.h) */
|
||||
extern int drm_legacy_dma_setup(struct drm_device *dev);
|
||||
extern void drm_legacy_dma_takedown(struct drm_device *dev);
|
||||
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
|
||||
extern void drm_core_reclaim_buffers(struct drm_device *dev,
|
||||
struct drm_file *filp);
|
||||
|
||||
/* IRQ support (drm_irq.h) */
|
||||
extern int drm_control(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_irq_install(struct drm_device *dev, int irq);
|
||||
extern int drm_irq_uninstall(struct drm_device *dev);
|
||||
|
||||
@ -1154,14 +780,14 @@ extern int drm_vblank_get(struct drm_device *dev, int crtc);
|
||||
extern void drm_vblank_put(struct drm_device *dev, int crtc);
|
||||
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
|
||||
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
|
||||
extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
|
||||
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
|
||||
extern void drm_vblank_off(struct drm_device *dev, int crtc);
|
||||
extern void drm_vblank_on(struct drm_device *dev, int crtc);
|
||||
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
|
||||
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
|
||||
extern void drm_vblank_cleanup(struct drm_device *dev);
|
||||
|
||||
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
|
||||
struct timeval *tvblank, unsigned flags);
|
||||
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
|
||||
int crtc, int *max_error,
|
||||
struct timeval *vblank_time,
|
||||
@ -1171,21 +797,23 @@ extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
|
||||
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode);
|
||||
|
||||
/**
|
||||
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
|
||||
* @crtc: which CRTC's vblank waitqueue to retrieve
|
||||
*
|
||||
* This function returns a pointer to the vblank waitqueue for the CRTC.
|
||||
* Drivers can use this to implement vblank waits using wait_event() & co.
|
||||
*/
|
||||
static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
|
||||
{
|
||||
return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
|
||||
}
|
||||
|
||||
/* Modesetting support */
|
||||
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
|
||||
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
|
||||
extern int drm_modeset_ctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* AGP/GART support (drm_agpsupport.h) */
|
||||
|
||||
/* Stub support (drm_stub.h) */
|
||||
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
struct drm_master *drm_master_create(struct drm_minor *minor);
|
||||
extern struct drm_master *drm_master_get(struct drm_master *master);
|
||||
extern void drm_master_put(struct drm_master **master);
|
||||
|
||||
@ -1193,34 +821,14 @@ extern void drm_put_dev(struct drm_device *dev);
|
||||
extern void drm_unplug_dev(struct drm_device *dev);
|
||||
extern unsigned int drm_debug;
|
||||
|
||||
#if 0
|
||||
extern unsigned int drm_vblank_offdelay;
|
||||
extern unsigned int drm_timestamp_precision;
|
||||
extern unsigned int drm_timestamp_monotonic;
|
||||
|
||||
extern struct class *drm_class;
|
||||
|
||||
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
|
||||
#endif
|
||||
/* Debugfs support */
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
|
||||
struct dentry *root);
|
||||
extern int drm_debugfs_create_files(const struct drm_info_list *files,
|
||||
int count, struct dentry *root,
|
||||
struct drm_minor *minor);
|
||||
extern int drm_debugfs_remove_files(const struct drm_info_list *files,
|
||||
int count, struct drm_minor *minor);
|
||||
extern int drm_debugfs_cleanup(struct drm_minor *minor);
|
||||
extern int drm_debugfs_connector_add(struct drm_connector *connector);
|
||||
extern void drm_debugfs_connector_remove(struct drm_connector *connector);
|
||||
#else
|
||||
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
|
||||
struct dentry *root)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int drm_debugfs_create_files(const struct drm_info_list *files,
|
||||
int count, struct dentry *root,
|
||||
struct drm_minor *minor)
|
||||
@ -1233,164 +841,44 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int drm_debugfs_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int drm_debugfs_connector_add(struct drm_connector *connector)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Info file support */
|
||||
extern int drm_name_info(struct seq_file *m, void *data);
|
||||
extern int drm_vm_info(struct seq_file *m, void *data);
|
||||
extern int drm_bufs_info(struct seq_file *m, void *data);
|
||||
extern int drm_vblank_info(struct seq_file *m, void *data);
|
||||
extern int drm_clients_info(struct seq_file *m, void* data);
|
||||
extern int drm_gem_name_info(struct seq_file *m, void *data);
|
||||
extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
||||
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
|
||||
int *prime_fd);
|
||||
extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
|
||||
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
|
||||
extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
|
||||
|
||||
#if DRM_DEBUG_CODE
|
||||
extern int drm_vma_info(struct seq_file *m, void *data);
|
||||
#endif
|
||||
extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
|
||||
dma_addr_t *addrs, int max_pages);
|
||||
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
|
||||
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
|
||||
|
||||
/* Scatter Gather Support (drm_scatter.h) */
|
||||
extern void drm_legacy_sg_cleanup(struct drm_device *dev);
|
||||
extern int drm_sg_alloc(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_sg_free(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* ATI PCIGART support (ati_pcigart.h) */
|
||||
extern int drm_ati_pcigart_init(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info * gart_info);
|
||||
extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
|
||||
struct drm_ati_pcigart_info * gart_info);
|
||||
|
||||
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
|
||||
extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
|
||||
size_t align);
|
||||
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
|
||||
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
|
||||
extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
|
||||
|
||||
#if 0
|
||||
/* sysfs support (drm_sysfs.c) */
|
||||
struct drm_sysfs_class;
|
||||
extern struct class *drm_sysfs_create(struct module *owner, char *name);
|
||||
extern void drm_sysfs_destroy(void);
|
||||
extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
|
||||
extern void drm_sysfs_hotplug_event(struct drm_device *dev);
|
||||
extern int drm_sysfs_connector_add(struct drm_connector *connector);
|
||||
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
|
||||
#endif
|
||||
|
||||
/* Graphics Execution Manager library functions (drm_gem.c) */
|
||||
int drm_gem_init(struct drm_device *dev);
|
||||
void drm_gem_destroy(struct drm_device *dev);
|
||||
void drm_gem_object_release(struct drm_gem_object *obj);
|
||||
void drm_gem_object_free(struct kref *kref);
|
||||
int drm_gem_object_init(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, size_t size);
|
||||
void drm_gem_private_object_init(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, size_t size);
|
||||
void drm_gem_vm_open(struct vm_area_struct *vma);
|
||||
void drm_gem_vm_close(struct vm_area_struct *vma);
|
||||
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||
struct vm_area_struct *vma);
|
||||
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
#include <drm/drm_global.h>
|
||||
|
||||
static inline void
|
||||
drm_gem_object_reference(struct drm_gem_object *obj)
|
||||
{
|
||||
kref_get(&obj->refcount);
|
||||
}
|
||||
|
||||
static inline void
|
||||
drm_gem_object_unreference(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj != NULL)
|
||||
kref_put(&obj->refcount, drm_gem_object_free);
|
||||
}
|
||||
|
||||
static inline void
|
||||
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
|
||||
drm_gem_object_free(&obj->refcount);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
u32 *handlep);
|
||||
int drm_gem_handle_create(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
u32 *handlep);
|
||||
int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
|
||||
|
||||
|
||||
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
|
||||
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
|
||||
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
|
||||
|
||||
struct page **drm_gem_get_pages(struct drm_gem_object *obj);
|
||||
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
|
||||
bool dirty, bool accessed);
|
||||
|
||||
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
|
||||
struct drm_file *filp,
|
||||
u32 handle);
|
||||
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
|
||||
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
|
||||
|
||||
extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
|
||||
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
|
||||
|
||||
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
|
||||
unsigned int token)
|
||||
{
|
||||
struct drm_map_list *_entry;
|
||||
list_for_each_entry(_entry, &dev->maplist, head)
|
||||
if (_entry->user_token == token)
|
||||
return _entry->map;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static __inline__ void drm_core_dropmap(struct drm_local_map *map)
|
||||
{
|
||||
}
|
||||
|
||||
#include <drm/drm_mem_util.h>
|
||||
|
||||
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
||||
struct device *parent);
|
||||
void drm_dev_ref(struct drm_device *dev);
|
||||
void drm_dev_unref(struct drm_device *dev);
|
||||
int drm_dev_register(struct drm_device *dev, unsigned long flags);
|
||||
void drm_dev_unregister(struct drm_device *dev);
|
||||
int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...);
|
||||
|
||||
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
|
||||
void drm_minor_release(struct drm_minor *minor);
|
||||
|
||||
extern int drm_fill_in_dev(struct drm_device *dev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver *driver);
|
||||
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
|
||||
/*@}*/
|
||||
|
||||
/* PCI section */
|
||||
@ -1420,10 +908,6 @@ static __inline__ int drm_device_is_pcie(struct drm_device *dev)
|
||||
{
|
||||
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#define drm_sysfs_connector_add(connector)
|
||||
#define drm_sysfs_connector_remove(connector)
|
||||
|
||||
#define LFB_SIZE 0x1000000
|
||||
extern struct drm_device *main_device;
|
||||
|
199
drivers/include/drm/drm_agpsupport.h
Normal file
199
drivers/include/drm/drm_agpsupport.h
Normal file
@ -0,0 +1,199 @@
|
||||
#ifndef _DRM_AGPSUPPORT_H_
|
||||
#define _DRM_AGPSUPPORT_H_
|
||||
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
#include <uapi/drm/drm.h>
|
||||
|
||||
struct drm_device;
|
||||
struct drm_file;
|
||||
|
||||
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \
|
||||
defined(MODULE)))
|
||||
|
||||
struct drm_agp_head {
|
||||
struct agp_kern_info agp_info;
|
||||
struct list_head memory;
|
||||
unsigned long mode;
|
||||
struct agp_bridge_data *bridge;
|
||||
int enabled;
|
||||
int acquired;
|
||||
unsigned long base;
|
||||
int agp_mtrr;
|
||||
int cant_use_aperture;
|
||||
unsigned long page_mask;
|
||||
};
|
||||
|
||||
#if __OS_HAS_AGP
|
||||
|
||||
void drm_free_agp(struct agp_memory * handle, int pages);
|
||||
int drm_bind_agp(struct agp_memory * handle, unsigned int start);
|
||||
int drm_unbind_agp(struct agp_memory * handle);
|
||||
struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
|
||||
struct page **pages,
|
||||
unsigned long num_pages,
|
||||
uint32_t gtt_offset,
|
||||
uint32_t type);
|
||||
|
||||
struct drm_agp_head *drm_agp_init(struct drm_device *dev);
|
||||
void drm_agp_clear(struct drm_device *dev);
|
||||
int drm_agp_acquire(struct drm_device *dev);
|
||||
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_release(struct drm_device *dev);
|
||||
int drm_agp_release_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
|
||||
int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
|
||||
int drm_agp_info_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
|
||||
int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
|
||||
int drm_agp_free_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
|
||||
int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
|
||||
int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
#else /* __OS_HAS_AGP */
|
||||
|
||||
static inline void drm_free_agp(struct agp_memory * handle, int pages)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int drm_bind_agp(struct agp_memory * handle, unsigned int start)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_unbind_agp(struct agp_memory * handle)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct agp_memory *drm_agp_bind_pages(struct drm_device *dev,
|
||||
struct page **pages,
|
||||
unsigned long num_pages,
|
||||
uint32_t gtt_offset,
|
||||
uint32_t type)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void drm_agp_clear(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int drm_agp_acquire(struct drm_device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_release(struct drm_device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_enable(struct drm_device *dev,
|
||||
struct drm_agp_mode mode)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_info(struct drm_device *dev,
|
||||
struct drm_agp_info *info)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_alloc(struct drm_device *dev,
|
||||
struct drm_agp_buffer *request)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_free(struct drm_device *dev,
|
||||
struct drm_agp_buffer *request)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_unbind(struct drm_device *dev,
|
||||
struct drm_agp_binding *request)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_bind(struct drm_device *dev,
|
||||
struct drm_agp_binding *request)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
||||
#endif /* _DRM_AGPSUPPORT_H_ */
|
69
drivers/include/drm/drm_atomic.h
Normal file
69
drivers/include/drm/drm_atomic.h
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Red Hat
|
||||
* Copyright (C) 2014 Intel Corp.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*/
|
||||
|
||||
#ifndef DRM_ATOMIC_H_
|
||||
#define DRM_ATOMIC_H_
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
struct drm_atomic_state * __must_check
|
||||
drm_atomic_state_alloc(struct drm_device *dev);
|
||||
void drm_atomic_state_clear(struct drm_atomic_state *state);
|
||||
void drm_atomic_state_free(struct drm_atomic_state *state);
|
||||
|
||||
struct drm_crtc_state * __must_check
|
||||
drm_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc);
|
||||
struct drm_plane_state * __must_check
|
||||
drm_atomic_get_plane_state(struct drm_atomic_state *state,
|
||||
struct drm_plane *plane);
|
||||
struct drm_connector_state * __must_check
|
||||
drm_atomic_get_connector_state(struct drm_atomic_state *state,
|
||||
struct drm_connector *connector);
|
||||
|
||||
int __must_check
|
||||
drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
|
||||
struct drm_plane *plane, struct drm_crtc *crtc);
|
||||
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
|
||||
struct drm_framebuffer *fb);
|
||||
int __must_check
|
||||
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
|
||||
struct drm_crtc *crtc);
|
||||
int __must_check
|
||||
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc);
|
||||
int
|
||||
drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc);
|
||||
|
||||
void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
|
||||
|
||||
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
|
||||
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
|
||||
int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
|
||||
|
||||
#endif /* DRM_ATOMIC_H_ */
|
126
drivers/include/drm/drm_atomic_helper.h
Normal file
126
drivers/include/drm/drm_atomic_helper.h
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Red Hat
|
||||
* Copyright (C) 2014 Intel Corp.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*/
|
||||
|
||||
#ifndef DRM_ATOMIC_HELPER_H_
|
||||
#define DRM_ATOMIC_HELPER_H_
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
|
||||
int drm_atomic_helper_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
int drm_atomic_helper_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async);
|
||||
|
||||
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state);
|
||||
|
||||
void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state);
|
||||
|
||||
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
void drm_atomic_helper_commit_planes(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state);
|
||||
|
||||
void drm_atomic_helper_swap_state(struct drm_device *dev,
|
||||
struct drm_atomic_state *state);
|
||||
|
||||
/* implementations for legacy interfaces */
|
||||
int drm_atomic_helper_update_plane(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
int drm_atomic_helper_disable_plane(struct drm_plane *plane);
|
||||
int drm_atomic_helper_set_config(struct drm_mode_set *set);
|
||||
|
||||
int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t flags);
|
||||
|
||||
/* default implementations for state handling */
|
||||
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
|
||||
struct drm_crtc_state *
|
||||
drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
|
||||
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
|
||||
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
|
||||
struct drm_plane_state *
|
||||
drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
|
||||
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
|
||||
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
|
||||
struct drm_connector_state *
|
||||
drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
|
||||
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
|
||||
struct drm_connector_state *state);
|
||||
|
||||
/**
|
||||
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
|
||||
* @plane: the loop cursor
|
||||
* @crtc: the crtc whose planes are iterated
|
||||
*
|
||||
* This iterates over the current state, useful (for example) when applying
|
||||
* atomic state after it has been checked and swapped. To iterate over the
|
||||
* planes which *will* be attached (for ->atomic_check()) see
|
||||
* drm_crtc_for_each_pending_plane()
|
||||
*/
|
||||
#define drm_atomic_crtc_for_each_plane(plane, crtc) \
|
||||
drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
|
||||
|
||||
/**
|
||||
* drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
|
||||
* @plane: the loop cursor
|
||||
* @crtc_state: the incoming crtc-state
|
||||
*
|
||||
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
|
||||
* attached if the specified state is applied. Useful during (for example)
|
||||
* ->atomic_check() operations, to validate the incoming state
|
||||
*/
|
||||
#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
|
||||
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
|
||||
|
||||
#endif /* DRM_ATOMIC_HELPER_H_ */
|
38
drivers/include/drm/drm_cache.h
Normal file
38
drivers/include/drm/drm_cache.h
Normal file
@ -0,0 +1,38 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2009 Red Hat Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
*/
|
||||
|
||||
#ifndef _DRM_CACHE_H_
|
||||
#define _DRM_CACHE_H_
|
||||
|
||||
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
||||
|
||||
#endif
|
@ -31,8 +31,8 @@
|
||||
#include <linux/idr.h>
|
||||
#include <linux/fb.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <drm/drm_mode.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <uapi/drm/drm_mode.h>
|
||||
#include <uapi/drm/drm_fourcc.h>
|
||||
#include <drm/drm_modeset_lock.h>
|
||||
|
||||
struct drm_device;
|
||||
@ -42,6 +42,7 @@ struct drm_object_properties;
|
||||
struct drm_file;
|
||||
struct drm_clip_rect;
|
||||
struct device_node;
|
||||
struct fence;
|
||||
|
||||
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
|
||||
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
|
||||
@ -136,14 +137,22 @@ struct drm_display_info {
|
||||
u8 cea_rev;
|
||||
};
|
||||
|
||||
/* data corresponds to displayid vend/prod/serial */
|
||||
struct drm_tile_group {
|
||||
struct kref refcount;
|
||||
struct drm_device *dev;
|
||||
int id;
|
||||
u8 group_data[8];
|
||||
};
|
||||
|
||||
struct drm_framebuffer_funcs {
|
||||
/* note: use drm_framebuffer_remove() */
|
||||
void (*destroy)(struct drm_framebuffer *framebuffer);
|
||||
int (*create_handle)(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle);
|
||||
/**
|
||||
* Optinal callback for the dirty fb ioctl.
|
||||
/*
|
||||
* Optional callback for the dirty fb ioctl.
|
||||
*
|
||||
* Userspace can notify the driver via this callback
|
||||
* that a area of the framebuffer has changed and should
|
||||
@ -196,7 +205,7 @@ struct drm_framebuffer {
|
||||
struct drm_property_blob {
|
||||
struct drm_mode_object base;
|
||||
struct list_head head;
|
||||
unsigned int length;
|
||||
size_t length;
|
||||
unsigned char data[];
|
||||
};
|
||||
|
||||
@ -215,32 +224,74 @@ struct drm_property {
|
||||
uint64_t *values;
|
||||
struct drm_device *dev;
|
||||
|
||||
struct list_head enum_blob_list;
|
||||
struct list_head enum_list;
|
||||
};
|
||||
|
||||
void drm_modeset_lock_all(struct drm_device *dev);
|
||||
void drm_modeset_unlock_all(struct drm_device *dev);
|
||||
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
|
||||
|
||||
struct drm_crtc;
|
||||
struct drm_connector;
|
||||
struct drm_encoder;
|
||||
struct drm_pending_vblank_event;
|
||||
struct drm_plane;
|
||||
struct drm_bridge;
|
||||
struct drm_atomic_state;
|
||||
|
||||
/**
|
||||
* drm_crtc_funcs - control CRTCs for a given device
|
||||
* struct drm_crtc_state - mutable CRTC state
|
||||
* @enable: whether the CRTC should be enabled, gates all other state
|
||||
* @mode_changed: for use by helpers and drivers when computing state updates
|
||||
* @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
|
||||
* @last_vblank_count: for helpers and drivers to capture the vblank of the
|
||||
* update to ensure framebuffer cleanup isn't done too early
|
||||
* @planes_changed: for use by helpers and drivers when computing state updates
|
||||
* @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
|
||||
* @mode: current mode timings
|
||||
* @event: optional pointer to a DRM event to signal upon completion of the
|
||||
* state update
|
||||
* @state: backpointer to global drm_atomic_state
|
||||
*/
|
||||
struct drm_crtc_state {
|
||||
bool enable;
|
||||
|
||||
/* computed state bits used by helpers and drivers */
|
||||
bool planes_changed : 1;
|
||||
bool mode_changed : 1;
|
||||
|
||||
/* attached planes bitmask:
|
||||
* WARNING: transitional helpers do not maintain plane_mask so
|
||||
* drivers not converted over to atomic helpers should not rely
|
||||
* on plane_mask being accurate!
|
||||
*/
|
||||
u32 plane_mask;
|
||||
|
||||
/* last_vblank_count: for vblank waits before cleanup */
|
||||
u32 last_vblank_count;
|
||||
|
||||
/* adjusted_mode: for use by helpers and drivers */
|
||||
struct drm_display_mode adjusted_mode;
|
||||
|
||||
struct drm_display_mode mode;
|
||||
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
struct drm_atomic_state *state;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_crtc_funcs - control CRTCs for a given device
|
||||
* @save: save CRTC state
|
||||
* @restore: restore CRTC state
|
||||
* @reset: reset CRTC after state has been invalidated (e.g. resume)
|
||||
* @cursor_set: setup the cursor
|
||||
* @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
|
||||
* @cursor_move: move the cursor
|
||||
* @gamma_set: specify color ramp for CRTC
|
||||
* @destroy: deinit and free object
|
||||
* @set_property: called when a property is changed
|
||||
* @set_config: apply a new CRTC configuration
|
||||
* @page_flip: initiate a page flip
|
||||
* @atomic_duplicate_state: duplicate the atomic state for this CRTC
|
||||
* @atomic_destroy_state: destroy an atomic state for this CRTC
|
||||
* @atomic_set_property: set a property on an atomic state for this CRTC
|
||||
*
|
||||
* The drm_crtc_funcs structure is the central CRTC management structure
|
||||
* in the DRM. Each CRTC controls one or more connectors (note that the name
|
||||
@ -291,16 +342,28 @@ struct drm_crtc_funcs {
|
||||
|
||||
int (*set_property)(struct drm_crtc *crtc,
|
||||
struct drm_property *property, uint64_t val);
|
||||
|
||||
/* atomic update handling */
|
||||
struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
|
||||
void (*atomic_destroy_state)(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
int (*atomic_set_property)(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_crtc - central CRTC control structure
|
||||
* struct drm_crtc - central CRTC control structure
|
||||
* @dev: parent DRM device
|
||||
* @port: OF node used by drm_of_find_possible_crtcs()
|
||||
* @head: list management
|
||||
* @mutex: per-CRTC locking
|
||||
* @base: base KMS object for ID tracking etc.
|
||||
* @primary: primary plane for this CRTC
|
||||
* @cursor: cursor plane for this CRTC
|
||||
* @cursor_x: current x position of the cursor, used for universal cursor planes
|
||||
* @cursor_y: current y position of the cursor, used for universal cursor planes
|
||||
* @enabled: is this CRTC enabled?
|
||||
* @mode: current mode timings
|
||||
* @hwmode: mode timings as programmed to hw regs
|
||||
@ -313,10 +376,13 @@ struct drm_crtc_funcs {
|
||||
* @gamma_size: size of gamma ramp
|
||||
* @gamma_store: gamma ramp values
|
||||
* @framedur_ns: precise frame timing
|
||||
* @framedur_ns: precise line timing
|
||||
* @linedur_ns: precise line timing
|
||||
* @pixeldur_ns: precise pixel timing
|
||||
* @helper_private: mid-layer private data
|
||||
* @properties: property tracking for this CRTC
|
||||
* @state: current atomic state for this CRTC
|
||||
* @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
|
||||
* legacy ioctls
|
||||
*
|
||||
* Each CRTC may have one or more connectors associated with it. This structure
|
||||
* allows the CRTC to be controlled.
|
||||
@ -326,7 +392,7 @@ struct drm_crtc {
|
||||
struct device_node *port;
|
||||
struct list_head head;
|
||||
|
||||
/**
|
||||
/*
|
||||
* crtc mutex
|
||||
*
|
||||
* This provides a read lock for the overall crtc state (mode, dpms
|
||||
@ -345,10 +411,6 @@ struct drm_crtc {
|
||||
int cursor_x;
|
||||
int cursor_y;
|
||||
|
||||
/* Temporary tracking of the old fb while a modeset is ongoing. Used
|
||||
* by drm_mode_set_config_internal to implement correct refcounting. */
|
||||
struct drm_framebuffer *old_fb;
|
||||
|
||||
bool enabled;
|
||||
|
||||
/* Requested mode from modesetting. */
|
||||
@ -375,11 +437,32 @@ struct drm_crtc {
|
||||
void *helper_private;
|
||||
|
||||
struct drm_object_properties properties;
|
||||
|
||||
struct drm_crtc_state *state;
|
||||
|
||||
/*
|
||||
* For legacy crtc ioctls so that atomic drivers can get at the locking
|
||||
* acquire context.
|
||||
*/
|
||||
struct drm_modeset_acquire_ctx *acquire_ctx;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_connector_state - mutable connector state
|
||||
* @crtc: CRTC to connect connector to, NULL if disabled
|
||||
* @best_encoder: can be used by helpers and drivers to select the encoder
|
||||
* @state: backpointer to global drm_atomic_state
|
||||
*/
|
||||
struct drm_connector_state {
|
||||
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
|
||||
|
||||
struct drm_encoder *best_encoder;
|
||||
|
||||
struct drm_atomic_state *state;
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_connector_funcs - control connectors on a given device
|
||||
* struct drm_connector_funcs - control connectors on a given device
|
||||
* @dpms: set power state (see drm_crtc_funcs above)
|
||||
* @save: save connector state
|
||||
* @restore: restore connector state
|
||||
@ -389,6 +472,9 @@ struct drm_crtc {
|
||||
* @set_property: property for this connector may need an update
|
||||
* @destroy: make object go away
|
||||
* @force: notify the driver that the connector is forced on
|
||||
* @atomic_duplicate_state: duplicate the atomic state for this connector
|
||||
* @atomic_destroy_state: destroy an atomic state for this connector
|
||||
* @atomic_set_property: set a property on an atomic state for this connector
|
||||
*
|
||||
* Each CRTC may have one or more connectors attached to it. The functions
|
||||
* below allow the core DRM code to control connectors, enumerate available modes,
|
||||
@ -413,10 +499,19 @@ struct drm_connector_funcs {
|
||||
uint64_t val);
|
||||
void (*destroy)(struct drm_connector *connector);
|
||||
void (*force)(struct drm_connector *connector);
|
||||
|
||||
/* atomic update handling */
|
||||
struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
|
||||
void (*atomic_destroy_state)(struct drm_connector *connector,
|
||||
struct drm_connector_state *state);
|
||||
int (*atomic_set_property)(struct drm_connector *connector,
|
||||
struct drm_connector_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_encoder_funcs - encoder controls
|
||||
* struct drm_encoder_funcs - encoder controls
|
||||
* @reset: reset state (e.g. at init or resume time)
|
||||
* @destroy: cleanup and free associated data
|
||||
*
|
||||
@ -430,7 +525,7 @@ struct drm_encoder_funcs {
|
||||
#define DRM_CONNECTOR_MAX_ENCODER 3
|
||||
|
||||
/**
|
||||
* drm_encoder - central DRM encoder structure
|
||||
* struct drm_encoder - central DRM encoder structure
|
||||
* @dev: parent DRM device
|
||||
* @head: list management
|
||||
* @base: base KMS object
|
||||
@ -474,7 +569,7 @@ struct drm_encoder {
|
||||
#define MAX_ELD_BYTES 128
|
||||
|
||||
/**
|
||||
* drm_connector - central DRM connector control structure
|
||||
* struct drm_connector - central DRM connector control structure
|
||||
* @dev: parent DRM device
|
||||
* @kdev: kernel device for sysfs attributes
|
||||
* @attr: sysfs attributes
|
||||
@ -485,6 +580,7 @@ struct drm_encoder {
|
||||
* @connector_type_id: index into connector type enum
|
||||
* @interlace_allowed: can this connector handle interlaced modes?
|
||||
* @doublescan_allowed: can this connector handle doublescan?
|
||||
* @stereo_allowed: can this connector handle stereo modes?
|
||||
* @modes: modes available on this connector (from fill_modes() + user)
|
||||
* @status: one of the drm_connector_status enums (connected, not, or unknown)
|
||||
* @probed_modes: list of modes derived directly from the display
|
||||
@ -492,10 +588,13 @@ struct drm_encoder {
|
||||
* @funcs: connector control functions
|
||||
* @edid_blob_ptr: DRM property containing EDID if present
|
||||
* @properties: property tracking for this connector
|
||||
* @path_blob_ptr: DRM blob property data for the DP MST path property
|
||||
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
|
||||
* @dpms: current dpms state
|
||||
* @helper_private: mid-layer private data
|
||||
* @cmdline_mode: mode line parsed from the kernel cmdline for this connector
|
||||
* @force: a %DRM_FORCE_<foo> state for forced mode sets
|
||||
* @override_edid: has the EDID been overwritten through debugfs for testing?
|
||||
* @encoder_ids: valid encoders for this connector
|
||||
* @encoder: encoder driving this connector, if any
|
||||
* @eld: EDID-like data, if present
|
||||
@ -505,6 +604,18 @@ struct drm_encoder {
|
||||
* @video_latency: video latency info from ELD, if found
|
||||
* @audio_latency: audio latency info from ELD, if found
|
||||
* @null_edid_counter: track sinks that give us all zeros for the EDID
|
||||
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
|
||||
* @debugfs_entry: debugfs directory for this connector
|
||||
* @state: current atomic state for this connector
|
||||
* @has_tile: is this connector connected to a tiled monitor
|
||||
* @tile_group: tile group for the connected monitor
|
||||
* @tile_is_single_monitor: whether the tile is one monitor housing
|
||||
* @num_h_tile: number of horizontal tiles in the tile group
|
||||
* @num_v_tile: number of vertical tiles in the tile group
|
||||
* @tile_h_loc: horizontal location of this tile
|
||||
* @tile_v_loc: vertical location of this tile
|
||||
* @tile_h_size: horizontal size of this tile.
|
||||
* @tile_v_size: vertical size of this tile.
|
||||
*
|
||||
* Each connector may be connected to one or more CRTCs, or may be clonable by
|
||||
* another connector if they can share a CRTC. Each connector also has a specific
|
||||
@ -540,6 +651,8 @@ struct drm_connector {
|
||||
|
||||
struct drm_property_blob *path_blob_ptr;
|
||||
|
||||
struct drm_property_blob *tile_blob_ptr;
|
||||
|
||||
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
|
||||
|
||||
/* requested DPMS state */
|
||||
@ -548,6 +661,7 @@ struct drm_connector {
|
||||
void *helper_private;
|
||||
|
||||
/* forced on connector */
|
||||
struct drm_cmdline_mode cmdline_mode;
|
||||
enum drm_connector_force force;
|
||||
bool override_edid;
|
||||
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
|
||||
@ -564,14 +678,63 @@ struct drm_connector {
|
||||
unsigned bad_edid_counter;
|
||||
|
||||
struct dentry *debugfs_entry;
|
||||
|
||||
struct drm_connector_state *state;
|
||||
|
||||
/* DisplayID bits */
|
||||
bool has_tile;
|
||||
struct drm_tile_group *tile_group;
|
||||
bool tile_is_single_monitor;
|
||||
|
||||
uint8_t num_h_tile, num_v_tile;
|
||||
uint8_t tile_h_loc, tile_v_loc;
|
||||
uint16_t tile_h_size, tile_v_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_plane_funcs - driver plane control functions
|
||||
* struct drm_plane_state - mutable plane state
|
||||
* @crtc: currently bound CRTC, NULL if disabled
|
||||
* @fb: currently bound framebuffer
|
||||
* @fence: optional fence to wait for before scanning out @fb
|
||||
* @crtc_x: left position of visible portion of plane on crtc
|
||||
* @crtc_y: upper position of visible portion of plane on crtc
|
||||
* @crtc_w: width of visible portion of plane on crtc
|
||||
* @crtc_h: height of visible portion of plane on crtc
|
||||
* @src_x: left position of visible portion of plane within
|
||||
* plane (in 16.16)
|
||||
* @src_y: upper position of visible portion of plane within
|
||||
* plane (in 16.16)
|
||||
* @src_w: width of visible portion of plane (in 16.16)
|
||||
* @src_h: height of visible portion of plane (in 16.16)
|
||||
* @state: backpointer to global drm_atomic_state
|
||||
*/
|
||||
struct drm_plane_state {
|
||||
struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
|
||||
struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
|
||||
struct fence *fence;
|
||||
|
||||
/* Signed dest location allows it to be partially off screen */
|
||||
int32_t crtc_x, crtc_y;
|
||||
uint32_t crtc_w, crtc_h;
|
||||
|
||||
/* Source values are 16.16 fixed point */
|
||||
uint32_t src_x, src_y;
|
||||
uint32_t src_h, src_w;
|
||||
|
||||
struct drm_atomic_state *state;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct drm_plane_funcs - driver plane control functions
|
||||
* @update_plane: update the plane configuration
|
||||
* @disable_plane: shut down the plane
|
||||
* @destroy: clean up plane resources
|
||||
* @reset: reset plane after state has been invalidated (e.g. resume)
|
||||
* @set_property: called when a property is changed
|
||||
* @atomic_duplicate_state: duplicate the atomic state for this plane
|
||||
* @atomic_destroy_state: destroy an atomic state for this plane
|
||||
* @atomic_set_property: set a property on an atomic state for this plane
|
||||
*/
|
||||
struct drm_plane_funcs {
|
||||
int (*update_plane)(struct drm_plane *plane,
|
||||
@ -582,9 +745,19 @@ struct drm_plane_funcs {
|
||||
uint32_t src_w, uint32_t src_h);
|
||||
int (*disable_plane)(struct drm_plane *plane);
|
||||
void (*destroy)(struct drm_plane *plane);
|
||||
void (*reset)(struct drm_plane *plane);
|
||||
|
||||
int (*set_property)(struct drm_plane *plane,
|
||||
struct drm_property *property, uint64_t val);
|
||||
|
||||
/* atomic update handling */
|
||||
struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
|
||||
void (*atomic_destroy_state)(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
int (*atomic_set_property)(struct drm_plane *plane,
|
||||
struct drm_plane_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
};
|
||||
|
||||
enum drm_plane_type {
|
||||
@ -594,7 +767,7 @@ enum drm_plane_type {
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_plane - central DRM plane control structure
|
||||
* struct drm_plane - central DRM plane control structure
|
||||
* @dev: DRM device this plane belongs to
|
||||
* @head: for list management
|
||||
* @base: base mode object
|
||||
@ -603,14 +776,19 @@ enum drm_plane_type {
|
||||
* @format_count: number of formats supported
|
||||
* @crtc: currently bound CRTC
|
||||
* @fb: currently bound fb
|
||||
* @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
|
||||
* drm_mode_set_config_internal() to implement correct refcounting.
|
||||
* @funcs: helper functions
|
||||
* @properties: property tracking for this plane
|
||||
* @type: type of plane (overlay, primary, cursor)
|
||||
* @state: current atomic state for this plane
|
||||
*/
|
||||
struct drm_plane {
|
||||
struct drm_device *dev;
|
||||
struct list_head head;
|
||||
|
||||
struct drm_modeset_lock mutex;
|
||||
|
||||
struct drm_mode_object base;
|
||||
|
||||
uint32_t possible_crtcs;
|
||||
@ -620,15 +798,21 @@ struct drm_plane {
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
struct drm_framebuffer *old_fb;
|
||||
|
||||
const struct drm_plane_funcs *funcs;
|
||||
|
||||
struct drm_object_properties properties;
|
||||
|
||||
enum drm_plane_type type;
|
||||
|
||||
void *helper_private;
|
||||
|
||||
struct drm_plane_state *state;
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_bridge_funcs - drm_bridge control functions
|
||||
* struct drm_bridge_funcs - drm_bridge control functions
|
||||
* @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
|
||||
* @disable: Called right before encoder prepare, disables the bridge
|
||||
* @post_disable: Called right after encoder prepare, for lockstepped disable
|
||||
@ -652,7 +836,7 @@ struct drm_bridge_funcs {
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_bridge - central DRM bridge control structure
|
||||
* struct drm_bridge - central DRM bridge control structure
|
||||
* @dev: DRM device this bridge belongs to
|
||||
* @head: list management
|
||||
* @base: base mode object
|
||||
@ -670,8 +854,35 @@ struct drm_bridge {
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_mode_set - new values for a CRTC config change
|
||||
* @head: list management
|
||||
* struct struct drm_atomic_state - the global state object for atomic updates
|
||||
* @dev: parent DRM device
|
||||
* @flags: state flags like async update
|
||||
* @planes: pointer to array of plane pointers
|
||||
* @plane_states: pointer to array of plane states pointers
|
||||
* @crtcs: pointer to array of CRTC pointers
|
||||
* @crtc_states: pointer to array of CRTC states pointers
|
||||
* @num_connector: size of the @connectors and @connector_states arrays
|
||||
* @connectors: pointer to array of connector pointers
|
||||
* @connector_states: pointer to array of connector states pointers
|
||||
* @acquire_ctx: acquire context for this atomic modeset state update
|
||||
*/
|
||||
struct drm_atomic_state {
|
||||
struct drm_device *dev;
|
||||
uint32_t flags;
|
||||
struct drm_plane **planes;
|
||||
struct drm_plane_state **plane_states;
|
||||
struct drm_crtc **crtcs;
|
||||
struct drm_crtc_state **crtc_states;
|
||||
int num_connector;
|
||||
struct drm_connector **connectors;
|
||||
struct drm_connector_state **connector_states;
|
||||
|
||||
struct drm_modeset_acquire_ctx *acquire_ctx;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct drm_mode_set - new values for a CRTC config change
|
||||
* @fb: framebuffer to use for new config
|
||||
* @crtc: CRTC whose configuration we're about to change
|
||||
* @mode: mode timings to use
|
||||
@ -701,6 +912,9 @@ struct drm_mode_set {
|
||||
* struct drm_mode_config_funcs - basic driver provided mode setting functions
|
||||
* @fb_create: create a new framebuffer object
|
||||
* @output_poll_changed: function to handle output configuration changes
|
||||
* @atomic_check: check whether a give atomic state update is possible
|
||||
* @atomic_commit: commit an atomic state update previously verified with
|
||||
* atomic_check()
|
||||
*
|
||||
* Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
|
||||
* involve drivers.
|
||||
@ -710,13 +924,20 @@ struct drm_mode_config_funcs {
|
||||
struct drm_file *file_priv,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
void (*output_poll_changed)(struct drm_device *dev);
|
||||
|
||||
int (*atomic_check)(struct drm_device *dev,
|
||||
struct drm_atomic_state *a);
|
||||
int (*atomic_commit)(struct drm_device *dev,
|
||||
struct drm_atomic_state *a,
|
||||
bool async);
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_mode_group - group of mode setting resources for potential sub-grouping
|
||||
* struct drm_mode_group - group of mode setting resources for potential sub-grouping
|
||||
* @num_crtcs: CRTC count
|
||||
* @num_encoders: encoder count
|
||||
* @num_connectors: connector count
|
||||
* @num_bridges: bridge count
|
||||
* @id_list: list of KMS object IDs in this group
|
||||
*
|
||||
* Currently this simply tracks the global mode setting state. But in the
|
||||
@ -736,10 +957,14 @@ struct drm_mode_group {
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_mode_config - Mode configuration control structure
|
||||
* struct drm_mode_config - Mode configuration control structure
|
||||
* @mutex: mutex protecting KMS related lists and structures
|
||||
* @connection_mutex: ww mutex protecting connector state and routing
|
||||
* @acquire_ctx: global implicit acquire context used by atomic drivers for
|
||||
* legacy ioctls
|
||||
* @idr_mutex: mutex for KMS ID allocation and management
|
||||
* @crtc_idr: main KMS ID tracking object
|
||||
* @fb_lock: mutex to protect fb state and lists
|
||||
* @num_fb: number of fbs available
|
||||
* @fb_list: list of framebuffers available
|
||||
* @num_connector: number of connectors on this device
|
||||
@ -748,17 +973,28 @@ struct drm_mode_group {
|
||||
* @bridge_list: list of bridge objects
|
||||
* @num_encoder: number of encoders on this device
|
||||
* @encoder_list: list of encoder objects
|
||||
* @num_overlay_plane: number of overlay planes on this device
|
||||
* @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
|
||||
* @plane_list: list of plane objects
|
||||
* @num_crtc: number of CRTCs on this device
|
||||
* @crtc_list: list of CRTC objects
|
||||
* @property_list: list of property objects
|
||||
* @min_width: minimum pixel width on this device
|
||||
* @min_height: minimum pixel height on this device
|
||||
* @max_width: maximum pixel width on this device
|
||||
* @max_height: maximum pixel height on this device
|
||||
* @funcs: core driver provided mode setting functions
|
||||
* @fb_base: base address of the framebuffer
|
||||
* @poll_enabled: track polling status for this device
|
||||
* @poll_enabled: track polling support for this device
|
||||
* @poll_running: track polling status for this device
|
||||
* @output_poll_work: delayed work for polling in process context
|
||||
* @property_blob_list: list of all the blob property objects
|
||||
* @*_property: core property tracking
|
||||
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
|
||||
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
|
||||
* @async_page_flip: does this device support async flips on the primary plane?
|
||||
* @cursor_width: hint to userspace for max cursor width
|
||||
* @cursor_height: hint to userspace for max cursor height
|
||||
*
|
||||
* Core mode resource tracking structure. All CRTC, encoders, and connectors
|
||||
* enumerated by the driver are added here, as are global properties. Some
|
||||
@ -770,16 +1006,10 @@ struct drm_mode_config {
|
||||
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
|
||||
struct mutex idr_mutex; /* for IDR management */
|
||||
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
|
||||
struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
|
||||
/* this is limited to one for now */
|
||||
|
||||
|
||||
/**
|
||||
* fb_lock - mutex to protect fb state
|
||||
*
|
||||
* Besides the global fb list his also protects the fbs list in the
|
||||
* file_priv
|
||||
*/
|
||||
struct mutex fb_lock;
|
||||
struct mutex fb_lock; /* proctects global and per-file fb lists */
|
||||
int num_fb;
|
||||
struct list_head fb_list;
|
||||
|
||||
@ -820,7 +1050,9 @@ struct drm_mode_config {
|
||||
struct drm_property *edid_property;
|
||||
struct drm_property *dpms_property;
|
||||
struct drm_property *path_property;
|
||||
struct drm_property *tile_property;
|
||||
struct drm_property *plane_type_property;
|
||||
struct drm_property *rotation_property;
|
||||
|
||||
/* DVI-I properties */
|
||||
struct drm_property *dvi_i_subconnector_property;
|
||||
@ -846,6 +1078,10 @@ struct drm_mode_config {
|
||||
struct drm_property *aspect_ratio_property;
|
||||
struct drm_property *dirty_info_property;
|
||||
|
||||
/* properties for virtual machine layout */
|
||||
struct drm_property *suggested_x_property;
|
||||
struct drm_property *suggested_y_property;
|
||||
|
||||
/* dumb ioctl parameters */
|
||||
uint32_t preferred_depth, prefer_shadow;
|
||||
|
||||
@ -856,6 +1092,19 @@ struct drm_mode_config {
|
||||
uint32_t cursor_width, cursor_height;
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_for_each_plane_mask - iterate over planes specified by bitmask
|
||||
* @plane: the loop cursor
|
||||
* @dev: the DRM device
|
||||
* @plane_mask: bitmask of plane indices
|
||||
*
|
||||
* Iterate over all planes specified by bitmask.
|
||||
*/
|
||||
#define drm_for_each_plane_mask(plane, dev, plane_mask) \
|
||||
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
|
||||
if ((plane_mask) & (1 << drm_plane_index(plane)))
|
||||
|
||||
|
||||
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
|
||||
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
|
||||
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
|
||||
@ -875,9 +1124,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev,
|
||||
struct drm_plane *primary,
|
||||
struct drm_plane *cursor,
|
||||
const struct drm_crtc_funcs *funcs);
|
||||
extern int drm_crtc_init(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
const struct drm_crtc_funcs *funcs);
|
||||
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
|
||||
extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
|
||||
|
||||
@ -903,6 +1149,7 @@ int drm_connector_register(struct drm_connector *connector);
|
||||
void drm_connector_unregister(struct drm_connector *connector);
|
||||
|
||||
extern void drm_connector_cleanup(struct drm_connector *connector);
|
||||
extern unsigned int drm_connector_index(struct drm_connector *connector);
|
||||
/* helper to unplug all connectors from sysfs for device */
|
||||
extern void drm_connector_unplug_all(struct drm_device *dev);
|
||||
|
||||
@ -942,6 +1189,7 @@ extern int drm_plane_init(struct drm_device *dev,
|
||||
const uint32_t *formats, uint32_t format_count,
|
||||
bool is_primary);
|
||||
extern void drm_plane_cleanup(struct drm_plane *plane);
|
||||
extern unsigned int drm_plane_index(struct drm_plane *plane);
|
||||
extern void drm_plane_force_disable(struct drm_plane *plane);
|
||||
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
|
||||
int x, int y,
|
||||
@ -971,9 +1219,10 @@ extern void drm_mode_config_reset(struct drm_device *dev);
|
||||
extern void drm_mode_config_cleanup(struct drm_device *dev);
|
||||
|
||||
extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
|
||||
char *path);
|
||||
const char *path);
|
||||
int drm_mode_connector_set_tile_property(struct drm_connector *connector);
|
||||
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
|
||||
struct edid *edid);
|
||||
const struct edid *edid);
|
||||
|
||||
static inline bool drm_property_type_is(struct drm_property *property,
|
||||
uint32_t type)
|
||||
@ -1034,11 +1283,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr
|
||||
extern int drm_property_add_enum(struct drm_property *property, int index,
|
||||
uint64_t value, const char *name);
|
||||
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
|
||||
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
|
||||
char *formats[]);
|
||||
extern int drm_mode_create_tv_properties(struct drm_device *dev,
|
||||
unsigned int num_modes,
|
||||
char *modes[]);
|
||||
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
|
||||
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
|
||||
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
|
||||
extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
|
||||
|
||||
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
|
||||
struct drm_encoder *encoder);
|
||||
@ -1106,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
|
||||
extern int drm_edid_header_is_valid(const u8 *raw_edid);
|
||||
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
|
||||
extern bool drm_edid_is_valid(struct edid *edid);
|
||||
|
||||
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
|
||||
char topology[8]);
|
||||
extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
|
||||
char topology[8]);
|
||||
extern void drm_mode_put_tile_group(struct drm_device *dev,
|
||||
struct drm_tile_group *tg);
|
||||
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
|
||||
int hsize, int vsize, int fresh,
|
||||
bool rb);
|
||||
@ -1120,6 +1378,9 @@ extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
|
||||
struct drm_property *property,
|
||||
uint64_t value);
|
||||
|
||||
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
|
||||
int *bpp);
|
||||
|
@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs {
|
||||
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode, int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
void (*mode_set_nofb)(struct drm_crtc *crtc);
|
||||
|
||||
/* Move the crtc on the current fb to the given position *optional* */
|
||||
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
|
||||
@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs {
|
||||
|
||||
/* disable crtc when not in use - more explicit than dpms off */
|
||||
void (*disable)(struct drm_crtc *crtc);
|
||||
|
||||
/* atomic helpers */
|
||||
int (*atomic_check)(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
void (*atomic_begin)(struct drm_crtc *crtc);
|
||||
void (*atomic_flush)(struct drm_crtc *crtc);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
|
||||
|
||||
extern void drm_helper_resume_force_mode(struct drm_device *dev);
|
||||
|
||||
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode, int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb);
|
||||
|
||||
/* drm_probe_helper.c */
|
||||
extern int drm_helper_probe_single_connector_modes(struct drm_connector
|
||||
*connector, uint32_t maxX,
|
||||
|
76
drivers/include/drm/drm_displayid.h
Normal file
76
drivers/include/drm/drm_displayid.h
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright © 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef DRM_DISPLAYID_H
|
||||
#define DRM_DISPLAYID_H
|
||||
|
||||
#define DATA_BLOCK_PRODUCT_ID 0x00
|
||||
#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
|
||||
#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
|
||||
#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
|
||||
#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
|
||||
#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
|
||||
#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
|
||||
#define DATA_BLOCK_VESA_TIMING 0x07
|
||||
#define DATA_BLOCK_CEA_TIMING 0x08
|
||||
#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
|
||||
#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
|
||||
#define DATA_BLOCK_GP_ASCII_STRING 0x0b
|
||||
#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
|
||||
#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
|
||||
#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
|
||||
#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
|
||||
#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
|
||||
#define DATA_BLOCK_TILED_DISPLAY 0x12
|
||||
|
||||
#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
|
||||
|
||||
#define PRODUCT_TYPE_EXTENSION 0
|
||||
#define PRODUCT_TYPE_TEST 1
|
||||
#define PRODUCT_TYPE_PANEL 2
|
||||
#define PRODUCT_TYPE_MONITOR 3
|
||||
#define PRODUCT_TYPE_TV 4
|
||||
#define PRODUCT_TYPE_REPEATER 5
|
||||
#define PRODUCT_TYPE_DIRECT_DRIVE 6
|
||||
|
||||
struct displayid_hdr {
|
||||
u8 rev;
|
||||
u8 bytes;
|
||||
u8 prod_id;
|
||||
u8 ext_count;
|
||||
} __packed;
|
||||
|
||||
struct displayid_block {
|
||||
u8 tag;
|
||||
u8 rev;
|
||||
u8 num_bytes;
|
||||
} __packed;
|
||||
|
||||
struct displayid_tiled_block {
|
||||
struct displayid_block base;
|
||||
u8 tile_cap;
|
||||
u8 topo[3];
|
||||
u8 tile_size[4];
|
||||
u8 tile_pixel_bezel[5];
|
||||
u8 topology_id[8];
|
||||
} __packed;
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user