Import kvm-unit-test into this repo

This commit is contained in:
copy 2017-04-02 15:17:34 -05:00
parent 3595456ee2
commit d50101d20c
157 changed files with 29510 additions and 0 deletions

19
tests/kvm-unit-tests/.gitignore vendored Normal file
View file

@ -0,0 +1,19 @@
.gdbinit
*.a
*.d
*.o
*.flat
*.elf
.pc
patches
.stgit-*
cscope.*
*.swp
/lib/asm
/config.mak
/*-run
/msr.out
/tests
/build-head
/logs/
/logs.old/

View file

@ -0,0 +1,4 @@
Copyright (C) 2006 Qumranet.
The files in this directory and its subdirectories are licensed under the
GNU LGPL, version 2.

View file

@ -0,0 +1,84 @@
KVM Unit Tests Maintainers
==========================
The intention of this file is not to establish who owns what portions of the
code base, but to provide a set of names that developers can consult when they
have a question about a particular subset and also to provide a set of names
to be CC'd when submitting a patch to obtain appropriate review.
In general, if you have a question about inclusion of a patch, you
should consult the KVM mailing list <kvm@vger.kernel.org> and not any
specific individual privately.
Descriptions of section entries:
M: Mail patches to: FullName <address@domain>
L: Mailing list that is relevant to this area
W: Web-page with status/info
Q: Patchwork web based patch tracking system site
T: SCM tree type and location. Type is one of: git, hg, quilt, stgit.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
Odd Fixes: It has a maintainer but they don't have time to do
much other than throw the odd patch in. See below.
Orphan: No current maintainer [but maybe you could take the
role as you write your new code].
Obsolete: Old code. Something tagged obsolete generally means
it has been replaced by a better system and you
should be using that.
F: Files and directories with wildcard patterns.
A trailing slash includes all files and subdirectory files.
F: drivers/net/ all files in and below drivers/net
F: drivers/net/* all files in drivers/net, but not below
F: */net/* all files in "any top level directory"/net
One pattern per line. Multiple F: lines acceptable.
X: Files and directories that are NOT maintained, same rules as F:
Files exclusions are tested before file matches.
Can be useful for excluding a specific subdirectory, for instance:
F: net/
X: net/ipv6/
matches all files in and below net excluding net/ipv6/
K: Keyword perl extended regex pattern to match content in a
patch or file. For instance:
K: of_get_profile
matches patches or files that contain "of_get_profile"
K: \b(printk|pr_(info|err))\b
matches patches or files that contain one or more of the words
printk, pr_info or pr_err
One regex pattern per line. Multiple K: lines acceptable.
Maintainers
-----------
M: Paolo Bonzini <pbonzini@redhat.com>
M: Radim Krčmář <rkrcmar@redhat.com>
L: kvm@vger.kernel.org
T: git://git.kernel.org/pub/scm/virt/kvm/kvm-unit-tests.git
Architecture Specific Code:
---------------------------
ARM
M: Drew Jones <drjones@redhat.com>
L: kvm@vger.kernel.org
L: kvmarm@lists.cs.columbia.edu
F: arm/*
F: lib/arm/*
F: lib/arm64/*
POWERPC
M: Laurent Vivier <lvivier@redhat.com>
M: Thomas Huth <thuth@redhat.com>
L: kvm@vger.kernel.org
L: kvm-ppc@vger.kernel.org
F: powerpc/*
F: lib/powerpc/*
F: lib/ppc64/*
X86
M: Paolo Bonzini <pbonzini@redhat.com>
M: Radim Krčmář <rkrcmar@redhat.com>
L: kvm@vger.kernel.org
F: x86/*
F: lib/x86/*

View file

@ -0,0 +1,109 @@
SHELL := /bin/bash
ifeq ($(wildcard config.mak),)
$(error run ./configure first. See ./configure -h)
endif
include config.mak
libdirs-get = $(shell [ -d "lib/$(1)" ] && echo "lib/$(1) lib/$(1)/asm")
ARCH_LIBDIRS := $(call libdirs-get,$(ARCH)) $(call libdirs-get,$(TEST_DIR))
DESTDIR := $(PREFIX)/share/kvm-unit-tests/
.PHONY: arch_clean clean distclean cscope
#make sure env CFLAGS variable is not used
CFLAGS =
libgcc := $(shell $(CC) --print-libgcc-file-name)
libcflat := lib/libcflat.a
cflatobjs := \
lib/argv.o \
lib/printf.o \
lib/string.o \
lib/abort.o \
lib/report.o \
lib/stack.o
# libfdt paths
LIBFDT_objdir = lib/libfdt
LIBFDT_srcdir = lib/libfdt
LIBFDT_archive = $(LIBFDT_objdir)/libfdt.a
LIBFDT_include = $(addprefix $(LIBFDT_srcdir)/,$(LIBFDT_INCLUDES))
LIBFDT_version = $(addprefix $(LIBFDT_srcdir)/,$(LIBFDT_VERSION))
#include architecure specific make rules
include $(TEST_DIR)/Makefile
# cc-option
# Usage: OP_CFLAGS+=$(call cc-option, -falign-functions=0, -malign-functions=0)
cc-option = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null \
> /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi ;)
CFLAGS += -g
CFLAGS += $(autodepend-flags) -Wall -Werror
frame-pointer-flag=-f$(if $(KEEP_FRAME_POINTER),no-,)omit-frame-pointer
fomit_frame_pointer := $(call cc-option, $(frame-pointer-flag), "")
fnostack_protector := $(call cc-option, -fno-stack-protector, "")
fnostack_protector_all := $(call cc-option, -fno-stack-protector-all, "")
wno_frame_address := $(call cc-option, -Wno-frame-address, "")
fno_pic := $(call cc-option, -fno-pic, "")
no_pie := $(call cc-option, -no-pie, "")
CFLAGS += $(fomit_frame_pointer)
CFLAGS += $(fno_stack_protector)
CFLAGS += $(fno_stack_protector_all)
CFLAGS += $(wno_frame_address)
CFLAGS += $(if $(U32_LONG_FMT),-D__U32_LONG_FMT__,)
CFLAGS += $(fno_pic) $(no_pie)
CXXFLAGS += $(CFLAGS)
autodepend-flags = -MMD -MF $(dir $*).$(notdir $*).d
LDFLAGS += $(CFLAGS)
LDFLAGS += -pthread -lrt
$(libcflat): $(cflatobjs)
$(AR) rcs $@ $^
include $(LIBFDT_srcdir)/Makefile.libfdt
$(LIBFDT_archive): CFLAGS += -ffreestanding -I lib -I lib/libfdt -Wno-sign-compare
$(LIBFDT_archive): $(addprefix $(LIBFDT_objdir)/,$(LIBFDT_OBJS))
$(AR) rcs $@ $^
%.o: %.S
$(CC) $(CFLAGS) -c -nostdlib -o $@ $<
-include */.*.d */*/.*.d
all: $(shell git rev-parse --verify --short=8 HEAD >build-head 2>/dev/null)
standalone: all
@scripts/mkstandalone.sh
install: standalone
mkdir -p $(DESTDIR)
install tests/* $(DESTDIR)
clean: arch_clean
$(RM) lib/.*.d $(libcflat) $(cflatobjs)
libfdt_clean:
$(RM) $(LIBFDT_archive) \
$(addprefix $(LIBFDT_objdir)/,$(LIBFDT_OBJS)) \
$(LIBFDT_objdir)/.*.d
distclean: clean libfdt_clean
$(RM) lib/asm config.mak $(TEST_DIR)-run msr.out cscope.* build-head
$(RM) -r tests logs logs.old
cscope: cscope_dirs = lib lib/libfdt lib/linux $(TEST_DIR) $(ARCH_LIBDIRS) lib/asm-generic
cscope:
$(RM) ./cscope.*
find -L $(cscope_dirs) -maxdepth 1 \
-name '*.[chsS]' -print | sed 's,^\./,,' | sort -u > ./cscope.files
cscope -bk

View file

@ -0,0 +1,131 @@
# kvm-unit-tests for v86
Run the following to run this test:
```sh
./configure
make
make -C ../../build/libv86.js
./run.js x86/realmode.flat
./run.js x86/setjmp.flat
./run.js x86/cmpxchg8b.flat
./run.js x86/sieve.flat
./run.js x86/ioapic.flat
./run.js x86/apic.flat
```
Tests can also be run in browser by going to `?profile=test-$name` (for
example, `?profile=test-realmode`).
# Welcome to kvm-unit-tests
See http://www.linux-kvm.org/page/KVM-unit-tests for a high-level
description of this project, as well as running tests and adding
tests HOWTOs.
# Building the tests
This directory contains sources for a kvm test suite.
To create the test images do:
./configure
make
in this directory. Test images are created in ./<ARCH>/*.flat
## Standalone tests
The tests can be built as standalone
To create and use standalone tests do:
./configure
make standalone
(send tests/some-test somewhere)
(go to somewhere)
./some-test
'make install' will install all tests in PREFIX/share/kvm-unit-tests/tests,
each as a standalone test.
# Running the tests
Then use the runner script to detect the correct invocation and
invoke the test:
./x86-run ./x86/msr.flat
or:
./run_tests.sh
to run them all.
To select a specific qemu binary, specify the QEMU=<path>
environment variable:
QEMU=/tmp/qemu/x86_64-softmmu/qemu-system-x86_64 ./x86-run ./x86/msr.flat
# Unit test inputs
Unit tests use QEMU's '-append <args...>' parameter for command line
inputs, i.e. all args will be available as argv strings in main().
Additionally a file of the form
KEY=VAL
KEY2=VAL
...
may be passed with '-initrd <file>' to become the unit test's environ,
which can then be accessed in the usual ways, e.g. VAL = getenv("KEY")
Any key=val strings can be passed, but some have reserved meanings in
the framework. The list of reserved environment variables is below
QEMU_ACCEL ... either kvm or tcg
QEMU_VERSION_STRING ... string of the form `qemu -h | head -1`
KERNEL_VERSION_STRING ... string of the form `uname -r`
Additionally these self-explanatory variables are reserved
QEMU_MAJOR, QEMU_MINOR, QEMU_MICRO, KERNEL_VERSION, KERNEL_PATCHLEVEL,
KERNEL_SUBLEVEL, KERNEL_EXTRAVERSION
# Contributing
## Directory structure
.: configure script, top-level Makefile, and run_tests.sh
./scripts: helper scripts for building and running tests
./lib: general architecture neutral services for the tests
./lib/<ARCH>: architecture dependent services for the tests
./<ARCH>: the sources of the tests and the created objects/images
See <ARCH>/README for architecture specific documentation.
## Style
Currently there is a mix of indentation styles so any changes to
existing files should be consistent with the existing style. For new
files:
- C: please use standard linux-with-tabs
- Shell: use TABs for indentation
## Patches
Patches are welcome at the KVM mailing list <kvm@vger.kernel.org>.
Please prefix messages with: [kvm-unit-tests PATCH]
You can add the following to .git/config to do this automatically for you:
[format]
subjectprefix = kvm-unit-tests PATCH
Additionally it's helpful to have a common order of file types in patches.
Our chosen order attempts to place the more declarative files before
the code files. We also start with common code and finish with unit test
code. git-diff's orderFile feature allows us to specify the order in a
file. The orderFile we use is `scripts/git.difforder`. Adding the config
with `git config diff.orderFile scripts/git.difforder` enables it.

167
tests/kvm-unit-tests/configure vendored Executable file
View file

@ -0,0 +1,167 @@
#!/bin/bash
prefix=/usr/local
cc=gcc
ld=ld
objcopy=objcopy
objdump=objdump
ar=ar
addr2line=addr2line
arch=i386
host=$arch
cross_prefix=
endian=""
pretty_print_stacks=yes
u32_long=
usage() {
cat <<-EOF
Usage: $0 [options]
Options include:
--arch=ARCH architecture to compile for ($arch)
--processor=PROCESSOR processor to compile for ($arch)
--cross-prefix=PREFIX cross compiler prefix
--cc=CC c compiler to use ($cc)
--ld=LD ld linker to use ($ld)
--prefix=PREFIX where to install things ($prefix)
--endian=ENDIAN endianness to compile for (little or big, ppc64 only)
--[enable|disable]-pretty-print-stacks
enable or disable pretty stack printing (enabled by default)
EOF
exit 1
}
while [[ "$1" = -* ]]; do
opt="$1"; shift
arg=
if [[ "$opt" = *=* ]]; then
arg="${opt#*=}"
opt="${opt%%=*}"
fi
case "$opt" in
--prefix)
prefix="$arg"
;;
--arch)
arch="$arg"
;;
--processor)
processor="$arg"
;;
--cross-prefix)
cross_prefix="$arg"
;;
--endian)
endian="$arg"
;;
--cc)
cc="$arg"
;;
--ld)
ld="$arg"
;;
--enable-pretty-print-stacks)
pretty_print_stacks=yes
;;
--disable-pretty-print-stacks)
pretty_print_stacks=no
;;
--help)
usage
;;
*)
usage
;;
esac
done
arch_name=$arch
[ "$arch" = "aarch64" ] && arch="arm64"
[ "$arch_name" = "arm64" ] && arch_name="aarch64"
[ -z "$processor" ] && processor="$arch"
if [ "$processor" = "arm64" ]; then
processor="cortex-a57"
elif [ "$processor" = "arm" ]; then
processor="cortex-a15"
fi
if [ "$arch" = "i386" ] || [ "$arch" = "x86_64" ]; then
testdir=x86
elif [ "$arch" = "arm" ] || [ "$arch" = "arm64" ]; then
testdir=arm
elif [ "$arch" = "ppc64" ]; then
testdir=powerpc
firmware="$testdir/boot_rom.bin"
if [ "$endian" != "little" ] && [ "$endian" != "big" ]; then
echo "You must provide endianness (big or little)!"
usage
fi
else
testdir=$arch
fi
if [ ! -d $testdir ]; then
echo "$testdir does not exist!"
exit 1
fi
if [ -f $testdir/run ]; then
ln -fs $testdir/run $testdir-run
fi
# check if uint32_t needs a long format modifier
cat << EOF > lib-test.c
__UINT32_TYPE__
EOF
u32_long=$($cross_prefix$cc -E lib-test.c | grep -v '^#' | grep -q long && echo yes)
rm -f lib-test.c
# check for dependent 32 bit libraries
if [ "$arch" != "arm" ]; then
cat << EOF > lib_test.c
#include <stdc++.h>
#include <boost_thread-mt.h>
#include <pthread.h>
int main ()
{}
EOF
$cc -m32 -o /dev/null lib_test.c &> /dev/null
exit=$?
if [ $exit -eq 0 ]; then
api=true
fi
rm -f lib_test.c
fi
# link lib/asm for the architecture
rm -f lib/asm
asm=asm-generic
if [ -d lib/$arch/asm ]; then
asm=$arch/asm
elif [ -d lib/$testdir/asm ]; then
asm=$testdir/asm
fi
ln -s $asm lib/asm
# create the config
cat <<EOF > config.mak
PREFIX=$prefix
HOST=$host
ARCH=$arch
ARCH_NAME=$arch_name
PROCESSOR=$processor
CC=$cross_prefix$cc
LD=$cross_prefix$ld
OBJCOPY=$cross_prefix$objcopy
OBJDUMP=$cross_prefix$objdump
AR=$cross_prefix$ar
ADDR2LINE=$cross_prefix$addr2line
API=$api
TEST_DIR=$testdir
FIRMWARE=$firmware
ENDIAN=$endian
PRETTY_PRINT_STACKS=$pretty_print_stacks
U32_LONG_FMT=$u32_long
EOF

View file

@ -0,0 +1,20 @@
/*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
/*
* When exit(code) is invoked, qemu will exit with ((code << 1) | 1),
* leaving us 128 exit status codes. To avoid confusion with signal
* status, we further limit exit codes to those resulting in qemu
* exiting with a status < 128. We give abort() the highest (127),
* leaving the lower status codes for unit tests.
*/
#define ABORT_EXIT_STATUS 63 /* 127 exit status from qemu */
void abort(void)
{
exit(ABORT_EXIT_STATUS);
}

View file

@ -0,0 +1,179 @@
/*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "alloc.h"
#include "asm/spinlock.h"
#include "asm/io.h"
#define PHYS_ALLOC_NR_REGIONS 256
struct phys_alloc_region {
phys_addr_t base;
phys_addr_t size;
};
static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
static int nr_regions;
static struct spinlock lock;
static phys_addr_t base, top, align_min;
void phys_alloc_show(void)
{
int i;
spin_lock(&lock);
printf("phys_alloc minimum alignment: 0x%" PRIx64 "\n",
(u64)align_min);
for (i = 0; i < nr_regions; ++i)
printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
(u64)regions[i].base,
(u64)(regions[i].base + regions[i].size - 1),
"USED");
printf("%016" PRIx64 "-%016" PRIx64 " [%s]\n",
(u64)base, (u64)(top - 1), "FREE");
spin_unlock(&lock);
}
void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
{
spin_lock(&lock);
base = base_addr;
top = base + size;
align_min = DEFAULT_MINIMUM_ALIGNMENT;
nr_regions = 0;
spin_unlock(&lock);
}
void phys_alloc_set_minimum_alignment(phys_addr_t align)
{
assert(align && !(align & (align - 1)));
spin_lock(&lock);
align_min = align;
spin_unlock(&lock);
}
static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
phys_addr_t align, bool safe)
{
static bool warned = false;
phys_addr_t addr, size_orig = size;
u64 top_safe;
spin_lock(&lock);
top_safe = top;
if (safe && sizeof(long) == 4)
top_safe = MIN(top_safe, 1ULL << 32);
align = MAX(align, align_min);
addr = ALIGN(base, align);
size += addr - base;
if ((top_safe - base) < size) {
printf("phys_alloc: requested=0x%" PRIx64
" (align=0x%" PRIx64 "), "
"need=0x%" PRIx64 ", but free=0x%" PRIx64 ". "
"top=0x%" PRIx64 ", top_safe=0x%" PRIx64 "\n",
(u64)size_orig, (u64)align, (u64)size, top_safe - base,
(u64)top, top_safe);
spin_unlock(&lock);
return INVALID_PHYS_ADDR;
}
base += size;
if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
regions[nr_regions].base = addr;
regions[nr_regions].size = size_orig;
++nr_regions;
} else if (!warned) {
printf("WARNING: phys_alloc: No free log entries, "
"can no longer log allocations...\n");
warned = true;
}
spin_unlock(&lock);
return addr;
}
static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size,
phys_addr_t align, bool safe)
{
phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe);
if (addr == INVALID_PHYS_ADDR)
return addr;
memset(phys_to_virt(addr), 0, size);
return addr;
}
phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align)
{
return phys_alloc_aligned_safe(size, align, false);
}
phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align)
{
return phys_zalloc_aligned_safe(size, align, false);
}
phys_addr_t phys_alloc(phys_addr_t size)
{
return phys_alloc_aligned(size, align_min);
}
phys_addr_t phys_zalloc(phys_addr_t size)
{
return phys_zalloc_aligned(size, align_min);
}
static void *early_malloc(size_t size)
{
phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
if (addr == INVALID_PHYS_ADDR)
return NULL;
return phys_to_virt(addr);
}
static void *early_calloc(size_t nmemb, size_t size)
{
phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size,
align_min, true);
if (addr == INVALID_PHYS_ADDR)
return NULL;
return phys_to_virt(addr);
}
static void early_free(void *ptr __unused)
{
}
static void *early_memalign(size_t alignment, size_t size)
{
phys_addr_t addr;
assert(alignment && !(alignment & (alignment - 1)));
addr = phys_alloc_aligned_safe(size, alignment, true);
if (addr == INVALID_PHYS_ADDR)
return NULL;
return phys_to_virt(addr);
}
static struct alloc_ops early_alloc_ops = {
.malloc = early_malloc,
.calloc = early_calloc,
.free = early_free,
.memalign = early_memalign,
};
struct alloc_ops *alloc_ops = &early_alloc_ops;

View file

@ -0,0 +1,116 @@
#ifndef _ALLOC_H_
#define _ALLOC_H_
/*
* alloc supplies three ingredients to the test framework that are all
* related to the support of dynamic memory allocation.
*
* The first is a set of alloc function wrappers for malloc and its
* friends. Using wrappers allows test code and common code to use the
* same interface for memory allocation at all stages, even though the
* implementations may change with the stage, e.g. pre/post paging.
*
* The second is a set of implementations for the alloc function
* interfaces. These implementations are named early_*, as they can be
* used almost immediately by the test framework.
*
* The third is a very simple physical memory allocator, which the
* early_* alloc functions build on.
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
struct alloc_ops {
void *(*malloc)(size_t size);
void *(*calloc)(size_t nmemb, size_t size);
void (*free)(void *ptr);
void *(*memalign)(size_t alignment, size_t size);
};
/*
* alloc_ops is initialized to early_alloc_ops
*/
extern struct alloc_ops *alloc_ops;
static inline void *malloc(size_t size)
{
assert(alloc_ops && alloc_ops->malloc);
return alloc_ops->malloc(size);
}
static inline void *calloc(size_t nmemb, size_t size)
{
assert(alloc_ops && alloc_ops->calloc);
return alloc_ops->calloc(nmemb, size);
}
static inline void free(void *ptr)
{
assert(alloc_ops && alloc_ops->free);
alloc_ops->free(ptr);
}
static inline void *memalign(size_t alignment, size_t size)
{
assert(alloc_ops && alloc_ops->memalign);
return alloc_ops->memalign(alignment, size);
}
/*
* phys_alloc is a very simple allocator which allows physical memory
* to be partitioned into regions until all memory is allocated.
*
* Note: This is such a simple allocator that there is no way to free
* a region. For more complicated memory management a single region
* can be allocated, but then have its memory managed by a more
* sophisticated allocator, e.g. a page allocator.
*/
#define DEFAULT_MINIMUM_ALIGNMENT 32
/*
* phys_alloc_init creates the initial free memory region of size @size
* at @base. The minimum alignment is set to DEFAULT_MINIMUM_ALIGNMENT.
*/
extern void phys_alloc_init(phys_addr_t base, phys_addr_t size);
/*
* phys_alloc_set_minimum_alignment sets the minimum alignment to
* @align.
*/
extern void phys_alloc_set_minimum_alignment(phys_addr_t align);
/*
* phys_alloc_aligned returns the base address of a region of size @size,
* where the address is aligned to @align, or INVALID_PHYS_ADDR if there
* isn't enough free memory to satisfy the request.
*/
extern phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align);
/*
* phys_zalloc_aligned is like phys_alloc_aligned, but zeros the memory
* before returning the address.
*/
extern phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align);
/*
* phys_alloc returns the base address of a region of size @size, or
* INVALID_PHYS_ADDR if there isn't enough free memory to satisfy the
* request.
*/
extern phys_addr_t phys_alloc(phys_addr_t size);
/*
* phys_zalloc is like phys_alloc, but zeros the memory before returning.
*/
extern phys_addr_t phys_zalloc(phys_addr_t size);
/*
* phys_alloc_show outputs all currently allocated regions with the
* following format
* <start_addr>-<end_addr> [<USED|FREE>]
*/
extern void phys_alloc_show(void);
#endif /* _ALLOC_H_ */

View file

@ -0,0 +1,141 @@
#include "libcflat.h"
#include "auxinfo.h"
int __argc;
char *__args;
char *__argv[100];
char *__environ[200];
char **environ = __environ;
static char args_copy[1000];
static char *copy_ptr = args_copy;
#define isblank(c) ((c) == ' ' || (c) == '\t')
#define isalpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z') || (c) == '_')
#define isalnum(c) (isalpha(c) || ((c) >= '0' && (c) <= '9'))
static char *skip_blanks(char *p)
{
while (isblank(*p))
++p;
return p;
}
void __setup_args(void)
{
char *args = __args;
char **argv = __argv + __argc;
while (*(args = skip_blanks(args)) != '\0') {
*argv++ = copy_ptr;
while (*args != '\0' && !isblank(*args))
*copy_ptr++ = *args++;
*copy_ptr++ = '\0';
}
__argc = argv - __argv;
}
void setup_args(char *args)
{
if (!args)
return;
__args = args;
__setup_args();
}
void setup_args_progname(char *args)
{
__argv[0] = copy_ptr;
strcpy(__argv[0], auxinfo.progname);
copy_ptr += strlen(auxinfo.progname) + 1;
++__argc;
if (args) {
__args = args;
__setup_args();
}
}
static char *env_eol(char *env)
{
while (*env && *env != '\n')
++env;
return env;
}
static char *env_invalid_eol(char *env)
{
char *eol = env_eol(env);
char eol_old = *eol;
*eol = '\0';
printf("Invalid environment variable: %s\n", env);
*eol = eol_old;
return eol;
}
static char *env_next(char *env)
{
char *p;
if (!*env)
return env;
if (isalpha(*env)) {
bool invalid = false;
p = env + 1;
while (*p && *p != '=' && *p != '\n') {
if (!isalnum(*p))
invalid = true;
++p;
}
if (*p != '=')
invalid = true;
if (invalid) {
env = env_invalid_eol(env);
return *env ? env_next(env + 1) : env;
}
return env;
}
p = env;
while (isblank(*p))
++p;
if (*p == '\n')
return env_next(p + 1);
if (*p == '#')
env = env_eol(env);
else
env = env_invalid_eol(env);
return *env ? env_next(env + 1) : env;
}
void setup_env(char *env, int size)
{
char *eof = env + size, *p = env;
bool newline = false;
int i = 0;
while (*p)
++p;
if (p == eof)
newline = true;
while (env < eof) {
if (newline)
env = env_next(env);
if (!*env || env >= eof)
break;
__environ[i++] = env;
while (env < eof && *env && !(newline && *env == '\n'))
++env;
*env++ = '\0';
}
}

View file

@ -0,0 +1,21 @@
#ifndef __ASM_GENERIC_ATOMIC_H__
#define __ASM_GENERIC_ATOMIC_H__
/* From QEMU include/qemu/atomic.h */
#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
#endif

View file

@ -0,0 +1,35 @@
#ifndef _ASM_BARRIER_H_
#define _ASM_BARRIER_H_
/*
* asm-generic/barrier.h
*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#ifndef mb
#define mb() asm volatile("":::"memory")
#endif
#ifndef rmb
#define rmb() asm volatile("":::"memory")
#endif
#ifndef wmb
#define wmb() asm volatile("":::"memory")
#endif
#ifndef smp_mb
#define smp_mb() mb()
#endif
#ifndef smp_rmb
#define smp_rmb() rmb()
#endif
#ifndef smp_wmb
#define smp_wmb() wmb()
#endif
#ifndef cpu_relax
#define cpu_relax() asm volatile ("":::"memory")
#endif
#endif /* _ASM_BARRIER_H_ */

View file

@ -0,0 +1,213 @@
#ifndef _ASM_GENERIC_IO_H_
#define _ASM_GENERIC_IO_H_
/*
* asm-generic/io.h
* adapted from the Linux kernel's include/asm-generic/io.h
* and arch/arm/include/asm/io.h
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "asm/page.h"
#include "asm/barrier.h"
#ifndef __raw_readb
static inline u8 __raw_readb(const volatile void *addr)
{
return *(const volatile u8 *)addr;
}
#endif
#ifndef __raw_readw
static inline u16 __raw_readw(const volatile void *addr)
{
return *(const volatile u16 *)addr;
}
#endif
#ifndef __raw_readl
static inline u32 __raw_readl(const volatile void *addr)
{
return *(const volatile u32 *)addr;
}
#endif
#ifndef __raw_readq
static inline u64 __raw_readq(const volatile void *addr)
{
assert(sizeof(unsigned long) == sizeof(u64));
return *(const volatile u64 *)addr;
}
#endif
#ifndef __raw_writeb
static inline void __raw_writeb(u8 b, volatile void *addr)
{
*(volatile u8 *)addr = b;
}
#endif
#ifndef __raw_writew
static inline void __raw_writew(u16 b, volatile void *addr)
{
*(volatile u16 *)addr = b;
}
#endif
#ifndef __raw_writel
static inline void __raw_writel(u32 b, volatile void *addr)
{
*(volatile u32 *)addr = b;
}
#endif
#ifndef __raw_writeq
static inline void __raw_writeq(u64 b, volatile void *addr)
{
assert(sizeof(unsigned long) == sizeof(u64));
*(volatile u64 *)addr = b;
}
#endif
#ifndef __bswap16
static inline u16 __bswap16(u16 x)
{
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
}
#endif
#ifndef __bswap32
static inline u32 __bswap32(u32 x)
{
return ((x & 0xff000000) >> 24) | ((x & 0x00ff0000) >> 8) |
((x & 0x0000ff00) << 8) | ((x & 0x000000ff) << 24);
}
#endif
#ifndef __bswap64
static inline u64 __bswap64(u64 x)
{
return ((x & 0x00000000000000ffULL) << 56) |
((x & 0x000000000000ff00ULL) << 40) |
((x & 0x0000000000ff0000ULL) << 24) |
((x & 0x00000000ff000000ULL) << 8) |
((x & 0x000000ff00000000ULL) >> 8) |
((x & 0x0000ff0000000000ULL) >> 24) |
((x & 0x00ff000000000000ULL) >> 40) |
((x & 0xff00000000000000ULL) >> 56);
}
#endif
#ifndef __cpu_is_be
#define __cpu_is_be() (0)
#endif
#define le16_to_cpu(x) \
({ u16 __r = __cpu_is_be() ? __bswap16(x) : ((u16)x); __r; })
#define cpu_to_le16 le16_to_cpu
#define le32_to_cpu(x) \
({ u32 __r = __cpu_is_be() ? __bswap32(x) : ((u32)x); __r; })
#define cpu_to_le32 le32_to_cpu
#define le64_to_cpu(x) \
({ u64 __r = __cpu_is_be() ? __bswap64(x) : ((u64)x); __r; })
#define cpu_to_le64 le64_to_cpu
#define be16_to_cpu(x) \
({ u16 __r = !__cpu_is_be() ? __bswap16(x) : ((u16)x); __r; })
#define cpu_to_be16 be16_to_cpu
#define be32_to_cpu(x) \
({ u32 __r = !__cpu_is_be() ? __bswap32(x) : ((u32)x); __r; })
#define cpu_to_be32 be32_to_cpu
#define be64_to_cpu(x) \
({ u64 __r = !__cpu_is_be() ? __bswap64(x) : ((u64)x); __r; })
#define cpu_to_be64 be64_to_cpu
#define readb(addr) \
({ u8 __r = __raw_readb(addr); rmb(); __r; })
#define readw(addr) \
({ u16 __r = le16_to_cpu(__raw_readw(addr)); rmb(); __r; })
#define readl(addr) \
({ u32 __r = le32_to_cpu(__raw_readl(addr)); rmb(); __r; })
#define readq(addr) \
({ u64 __r = le64_to_cpu(__raw_readq(addr)); rmb(); __r; })
#define writeb(b, addr) \
({ wmb(); __raw_writeb(b, addr); })
#define writew(b, addr) \
({ wmb(); __raw_writew(cpu_to_le16(b), addr); })
#define writel(b, addr) \
({ wmb(); __raw_writel(cpu_to_le32(b), addr); })
#define writeq(b, addr) \
({ wmb(); __raw_writeq(cpu_to_le64(b), addr); })
#ifndef inb
static inline uint8_t inb(unsigned long port)
{
return readb((const volatile void __iomem *)port);
}
#endif
#ifndef inw
static inline uint16_t inw(unsigned long port)
{
return readw((const volatile void __iomem *)port);
}
#endif
#ifndef inl
static inline uint32_t inl(unsigned long port)
{
return readl((const volatile void __iomem *)port);
}
#endif
#ifndef outb
static inline void outb(uint8_t value, unsigned long port)
{
writeb(value, (volatile void __iomem *)port);
}
#endif
#ifndef outw
static inline void outw(uint16_t value, unsigned long port)
{
writew(value, (volatile void __iomem *)port);
}
#endif
#ifndef outl
static inline void outl(uint32_t value, unsigned long port)
{
writel(value, (volatile void __iomem *)port);
}
#endif
#ifndef ioremap
static inline void __iomem *ioremap(phys_addr_t phys_addr, size_t size __unused)
{
assert(sizeof(long) == 8 || !(phys_addr >> 32));
return (void __iomem *)(unsigned long)phys_addr;
}
#endif
#ifndef virt_to_phys
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa((unsigned long)address);
}
#endif
#ifndef phys_to_virt
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
#endif /* _ASM_GENERIC_IO_H_ */

View file

@ -0,0 +1,29 @@
#ifndef _ASM_GENERIC_PAGE_H_
#define _ASM_GENERIC_PAGE_H_
/*
* asm-generic/page.h
* adapted from the Linux kernel's include/asm-generic/page.h
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include <linux/const.h>
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifndef __ASSEMBLY__
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
#define __va(x) ((void *)((unsigned long) (x)))
#define __pa(x) ((unsigned long) (x))
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_GENERIC_PAGE_H_ */

View file

@ -0,0 +1,28 @@
#ifndef _ASM_PCI_HOST_BRIDGE_H_
#define _ASM_PCI_HOST_BRIDGE_H_
/*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
phys_addr_t pci_host_bridge_get_paddr(uint64_t addr);
static inline
phys_addr_t pci_translate_addr(pcidevaddr_t dev __unused, uint64_t addr)
{
/*
* Assume we only have single PCI host bridge in a system.
*/
return pci_host_bridge_get_paddr(addr);
}
uint8_t pci_config_readb(pcidevaddr_t dev, uint8_t reg);
uint16_t pci_config_readw(pcidevaddr_t dev, uint8_t reg);
uint32_t pci_config_readl(pcidevaddr_t dev, uint8_t reg);
void pci_config_writeb(pcidevaddr_t dev, uint8_t reg, uint8_t val);
void pci_config_writew(pcidevaddr_t dev, uint8_t reg, uint16_t val);
void pci_config_writel(pcidevaddr_t dev, uint8_t reg, uint32_t val);
#endif

View file

@ -0,0 +1,4 @@
#ifndef _ASM_GENERIC_PCI_H_
#define _ASM_GENERIC_PCI_H_
#error need architecture specific asm/pci.h
#endif

View file

@ -0,0 +1,4 @@
#ifndef _ASM_GENERIC_SPINLOCK_H_
#define _ASM_GENERIC_SPINLOCK_H_
#error need architecture specific asm/spinlock.h
#endif

View file

@ -0,0 +1,2 @@
#include "auxinfo.h"
struct auxinfo auxinfo = { PROGNAME };

View file

@ -0,0 +1,9 @@
#ifndef _AUXINFO_H_
#define _AUXINFO_H_
struct auxinfo {
const char *progname;
};
/* No extern! Define a common symbol. */
struct auxinfo auxinfo;
#endif

View file

@ -0,0 +1,36 @@
#ifndef _BITOPS_H_
#define _BITOPS_H_
/*
* Adapated from
* include/linux/bitops.h
*
* Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#define BITS_PER_LONG_LONG 64
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#include <asm/bitops.h>
/*
* Create a contiguous bitmask starting at bit position @l and ending at
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif

View file

@ -0,0 +1,72 @@
/*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "virtio.h"
#include "asm/spinlock.h"
#define TESTDEV_NAME "chr-testdev"
static struct virtio_device *vcon;
static struct virtqueue *in_vq, *out_vq;
static struct spinlock lock;
static void __testdev_send(char *buf, unsigned int len)
{
int ret;
ret = virtqueue_add_outbuf(out_vq, buf, len);
virtqueue_kick(out_vq);
if (ret < 0)
return;
while (!virtqueue_get_buf(out_vq, &len))
;
}
void chr_testdev_exit(int code)
{
unsigned int len;
char buf[8];
snprintf(buf, sizeof(buf), "%dq", code);
len = strlen(buf);
spin_lock(&lock);
if (!vcon)
goto out;
__testdev_send(buf, len);
out:
spin_unlock(&lock);
}
void chr_testdev_init(void)
{
const char *io_names[] = { "input", "output" };
struct virtqueue *vqs[2];
int ret;
vcon = virtio_bind(VIRTIO_ID_CONSOLE);
if (vcon == NULL) {
printf("%s: %s: can't find a virtio-console\n",
__func__, TESTDEV_NAME);
return;
}
ret = vcon->config->find_vqs(vcon, 2, vqs, NULL, io_names);
if (ret < 0) {
printf("%s: %s: can't init virtqueues\n",
__func__, TESTDEV_NAME);
vcon = NULL;
return;
}
in_vq = vqs[0];
out_vq = vqs[1];
}

View file

@ -0,0 +1,14 @@
#ifndef _CHR_TESTDEV_H_
#define _CHR_TESTDEV_H_
/*
* chr-testdev is a driver for the chr-testdev qemu backend.
* The chr-testdev backend exposes a simple control interface to
* qemu for kvm-unit-tests accessible through virtio-console.
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
extern void chr_testdev_init(void);
extern void chr_testdev_exit(int code);
#endif

View file

@ -0,0 +1,334 @@
/*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "libfdt/libfdt.h"
#include "devicetree.h"
static const void *fdt;
const void *dt_fdt(void)
{
return fdt;
}
bool dt_available(void)
{
return fdt_check_header(fdt) == 0;
}
int dt_get_nr_cells(int fdtnode, u32 *nr_address_cells, u32 *nr_size_cells)
{
const struct fdt_property *prop;
u32 *nr_cells;
int len, nac, nsc;
prop = fdt_get_property(fdt, fdtnode, "#address-cells", &len);
if (prop == NULL)
return len;
nr_cells = (u32 *)prop->data;
nac = fdt32_to_cpu(*nr_cells);
prop = fdt_get_property(fdt, fdtnode, "#size-cells", &len);
if (prop == NULL)
return len;
nr_cells = (u32 *)prop->data;
nsc = fdt32_to_cpu(*nr_cells);
*nr_address_cells = nac;
*nr_size_cells = nsc;
return 0;
}
void dt_reg_init(struct dt_reg *reg, u32 nr_address_cells, u32 nr_size_cells)
{
memset(reg, 0, sizeof(struct dt_reg));
reg->nr_address_cells = nr_address_cells;
reg->nr_size_cells = nr_size_cells;
}
int dt_get_reg(int fdtnode, int regidx, struct dt_reg *reg)
{
const struct fdt_property *prop;
u32 *cells, i;
unsigned nr_tuple_cells;
int len;
prop = fdt_get_property(fdt, fdtnode, "reg", &len);
if (prop == NULL)
return len;
cells = (u32 *)prop->data;
nr_tuple_cells = reg->nr_address_cells + reg->nr_size_cells;
regidx *= nr_tuple_cells;
if (regidx + nr_tuple_cells > len/sizeof(u32))
return -FDT_ERR_NOTFOUND;
for (i = 0; i < reg->nr_address_cells; ++i)
reg->address_cells[i] = fdt32_to_cpu(cells[regidx + i]);
regidx += reg->nr_address_cells;
for (i = 0; i < reg->nr_size_cells; ++i)
reg->size_cells[i] = fdt32_to_cpu(cells[regidx + i]);
return 0;
}
int dt_pbus_translate_node(int fdtnode, int regidx,
struct dt_pbus_reg *pbus_reg)
{
struct dt_reg raw_reg;
u32 nac, nsc;
int parent, ret;
parent = fdt_parent_offset(fdt, fdtnode);
if (parent < 0)
return parent;
ret = dt_get_nr_cells(parent, &nac, &nsc);
if (ret != 0)
return ret;
dt_reg_init(&raw_reg, nac, nsc);
ret = dt_get_reg(fdtnode, regidx, &raw_reg);
if (ret < 0)
return ret;
pbus_reg->addr = dt_pbus_read_cells(raw_reg.nr_address_cells,
raw_reg.address_cells);
pbus_reg->size = dt_pbus_read_cells(raw_reg.nr_size_cells,
raw_reg.size_cells);
return 0;
}
int dt_pbus_translate(const struct dt_device *dev, int regidx,
void *reg)
{
return dt_pbus_translate_node(dev->fdtnode, regidx, reg);
}
int dt_bus_match_any(const struct dt_device *dev __unused, int fdtnode)
{
/* matches any device with a valid node */
return fdtnode < 0 ? fdtnode : 1;
}
static const struct dt_bus dt_default_bus = {
.match = dt_bus_match_any,
.translate = dt_pbus_translate,
};
void dt_bus_init_defaults(struct dt_bus *bus)
{
memcpy(bus, &dt_default_bus, sizeof(struct dt_bus));
}
void dt_device_init(struct dt_device *dev, const struct dt_bus *bus,
void *info)
{
memset(dev, 0, sizeof(struct dt_device));
dev->bus = bus;
dev->info = info;
}
int dt_device_find_compatible(const struct dt_device *dev,
const char *compatible)
{
int node, ret;
node = fdt_node_offset_by_compatible(fdt, -1, compatible);
while (node >= 0) {
ret = dev->bus->match(dev, node);
if (ret < 0)
return ret;
else if (ret)
break;
node = fdt_node_offset_by_compatible(fdt, node, compatible);
}
return node;
}
int dt_pbus_get_base_compatible(const char *compatible,
struct dt_pbus_reg *base)
{
struct dt_device dev;
int node;
dt_device_init(&dev, &dt_default_bus, NULL);
node = dt_device_find_compatible(&dev, compatible);
if (node < 0)
return node;
dt_device_bind_node(&dev, node);
return dt_pbus_get_base(&dev, base);
}
int dt_get_memory_params(struct dt_pbus_reg *regs, int nr_regs)
{
const char *pn = "device_type", *pv = "memory";
int node, ret, reg_idx, pl = strlen(pv) + 1, nr = 0;
struct dt_pbus_reg reg;
node = fdt_node_offset_by_prop_value(fdt, -1, pn, pv, pl);
while (node >= 0) {
reg_idx = 0;
while (nr < nr_regs) {
ret = dt_pbus_translate_node(node, reg_idx, &reg);
if (ret == -FDT_ERR_NOTFOUND)
break;
if (ret < 0)
return ret;
regs[nr].addr = reg.addr;
regs[nr].size = reg.size;
++nr, ++reg_idx;
}
node = fdt_node_offset_by_prop_value(fdt, node, pn, pv, pl);
}
return node != -FDT_ERR_NOTFOUND ? node : nr;
}
int dt_for_each_cpu_node(void (*func)(int fdtnode, u64 regval, void *info),
void *info)
{
const struct fdt_property *prop;
int cpus, cpu, ret, len;
struct dt_reg raw_reg;
u32 nac, nsc;
u64 regval;
cpus = fdt_path_offset(fdt, "/cpus");
if (cpus < 0)
return cpus;
ret = dt_get_nr_cells(cpus, &nac, &nsc);
if (ret < 0)
return ret;
dt_reg_init(&raw_reg, nac, nsc);
dt_for_each_subnode(cpus, cpu) {
prop = fdt_get_property(fdt, cpu, "device_type", &len);
if (prop == NULL)
return len;
if (len != 4 || strcmp((char *)prop->data, "cpu"))
continue;
ret = dt_get_reg(cpu, 0, &raw_reg);
if (ret < 0)
return ret;
regval = raw_reg.address_cells[0];
if (nac == 2)
regval = (regval << 32) | raw_reg.address_cells[1];
func(cpu, regval, info);
}
return 0;
}
int dt_get_bootargs(const char **bootargs)
{
const struct fdt_property *prop;
int node, len;
*bootargs = NULL;
node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
return node;
prop = fdt_get_property(fdt, node, "bootargs", &len);
if (!prop)
return len;
*bootargs = prop->data;
return 0;
}
int dt_get_default_console_node(void)
{
const struct fdt_property *prop;
int node, len;
node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
return node;
prop = fdt_get_property(fdt, node, "stdout-path", &len);
if (!prop) {
prop = fdt_get_property(fdt, node, "linux,stdout-path", &len);
if (!prop)
return len;
}
return fdt_path_offset(fdt, prop->data);
}
int dt_get_initrd(const char **initrd, u32 *size)
{
const struct fdt_property *prop;
const char *start, *end;
int node, len;
u32 *data;
*initrd = NULL;
*size = 0;
node = fdt_path_offset(fdt, "/chosen");
if (node < 0)
return node;
prop = fdt_get_property(fdt, node, "linux,initrd-start", &len);
if (!prop)
return len;
data = (u32 *)prop->data;
start = (const char *)(unsigned long)fdt32_to_cpu(*data);
prop = fdt_get_property(fdt, node, "linux,initrd-end", &len);
if (!prop) {
assert(len != -FDT_ERR_NOTFOUND);
return len;
}
data = (u32 *)prop->data;
end = (const char *)(unsigned long)fdt32_to_cpu(*data);
*initrd = start;
*size = (unsigned long)end - (unsigned long)start;
return 0;
}
int dt_init(const void *fdt_ptr)
{
int ret;
ret = fdt_check_header(fdt_ptr);
if (ret < 0)
return ret;
/* Sanity check the path. */
ret = fdt_path_offset(fdt_ptr, "/");
if (ret < 0)
return ret;
fdt = fdt_ptr;
return 0;
}

View file

@ -0,0 +1,251 @@
#ifndef _DEVICETREE_H_
#define _DEVICETREE_H_
/*
* devicetree builds on libfdt to implement abstractions and accessors
* for Linux required device tree content. The accessors provided are
* common across architectures. See section III of the kernel doc
* Documentation/devicetree/booting-without-of.txt
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "libfdt/libfdt.h"
/**********************************************************************
* devicetree init and libfdt helpers
**********************************************************************/
/* dt_init initializes devicetree with a pointer to an fdt, @fdt_ptr */
extern int dt_init(const void *fdt_ptr);
/* get the fdt pointer that devicetree is using */
extern const void *dt_fdt(void);
/* check for an initialized, valid devicetree */
extern bool dt_available(void);
/* traverse child nodes */
#define dt_for_each_subnode(n, s) \
for (s = fdt_first_subnode(dt_fdt(), n); \
s != -FDT_ERR_NOTFOUND; \
s = fdt_next_subnode(dt_fdt(), s))
/**********************************************************************
* Abstractions for required node types and properties
**********************************************************************/
struct dt_device {
int fdtnode;
const struct dt_bus *bus;
/*
* info is a pointer to device specific data, which may be
* used by the bus match() and translate() functions
*/
void *info;
};
struct dt_bus {
/*
* match a device @dev to an fdt node @fdtnode
* returns
* - a positive value on match
* - zero on no match
* - a negative FDT_ERR_* value on failure
*/
int (*match)(const struct dt_device *dev, int fdtnode);
/*
* translate the @regidx'th "address size" tuple of
* @dev's fdt node's "reg" property, and store the result
* in @reg, a bus specific structure
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
int (*translate)(const struct dt_device *dev, int regidx, void *reg);
};
/* dt_bus_match_any matches any fdt node, i.e. it always returns true */
extern int dt_bus_match_any(const struct dt_device *dev, int fdtnode);
/* the processor bus (pbus) address type and register tuple */
typedef u64 dt_pbus_addr_t;
struct dt_pbus_reg {
dt_pbus_addr_t addr;
dt_pbus_addr_t size;
};
static inline dt_pbus_addr_t dt_pbus_read_cells(u32 nr_cells, u32 *cells)
{
switch (nr_cells) {
case 1: return cells[0];
case 2: return ((u64)cells[0] << 32) | cells[1];
}
return (~0ULL);
}
/*
* dt_pbus_translate translates device node regs for the
* processor bus using the parent node's #address-cells
* and #size-cells and dt_pbus_read_cells()
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_pbus_translate(const struct dt_device *dev, int regidx,
void *reg);
/*
* dt_pbus_translate_node is the same as dt_pbus_translate but
* operates on an fdt node instead of a dt_device
*/
extern int dt_pbus_translate_node(int fdtnode, int regidx,
struct dt_pbus_reg *reg);
/*
* dt_pbus_get_base is an alias for
* dt_pbus_translate(dev, 0, base)
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
static inline int dt_pbus_get_base(const struct dt_device *dev,
struct dt_pbus_reg *base)
{
return dt_pbus_translate(dev, 0, base);
}
/*
* dt_bus_init_defaults initializes @bus with
* match <- dt_bus_match_any
* translate <- dt_pbus_translate
*/
extern void dt_bus_init_defaults(struct dt_bus *bus);
/*
* dt_device_init initializes a dt_device with the given parameters
*/
extern void dt_device_init(struct dt_device *dev, const struct dt_bus *bus,
void *info);
static inline void dt_device_bind_node(struct dt_device *dev, int fdtnode)
{
dev->fdtnode = fdtnode;
}
/*
* dt_device_find_compatible finds a @compatible node
* returns
* - node (>= 0) on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_device_find_compatible(const struct dt_device *dev,
const char *compatible);
/*
* dt_pbus_get_base_compatible simply bundles many functions into one.
* It finds the first @compatible fdt node, then translates the 0th reg
* tuple (the base) using the processor bus translation, and finally it
* stores that result in @base.
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_pbus_get_base_compatible(const char *compatible,
struct dt_pbus_reg *base);
/**********************************************************************
* Low-level accessors for required node types and properties
**********************************************************************/
/*
* dt_get_nr_cells sets @nr_address_cells and @nr_size_cells to the
* #address-cells and #size-cells properties of @fdtnode
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_get_nr_cells(int fdtnode, u32 *nr_address_cells,
u32 *nr_size_cells);
/* dt_reg is a structure for "raw" reg tuples */
#define MAX_ADDRESS_CELLS 4
#define MAX_SIZE_CELLS 4
struct dt_reg {
u32 nr_address_cells, nr_size_cells;
u32 address_cells[MAX_ADDRESS_CELLS];
u32 size_cells[MAX_SIZE_CELLS];
};
/*
* dt_reg_init initialize a dt_reg struct to zero and sets
* nr_address_cells and nr_size_cells to @nr_address_cells and
* @nr_size_cells respectively.
*/
extern void dt_reg_init(struct dt_reg *reg, u32 nr_address_cells,
u32 nr_size_cells);
/*
* dt_get_reg gets the @regidx'th reg tuple of @fdtnode's reg property
* and stores it in @reg. @reg must be initialized.
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_get_reg(int fdtnode, int regidx, struct dt_reg *reg);
/**********************************************************************
* High-level accessors for required node types and properties
**********************************************************************/
/*
* dt_get_bootargs gets the string pointer from /chosen/bootargs
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure, and @bootargs
* will be set to NULL
*/
extern int dt_get_bootargs(const char **bootargs);
/*
* dt_get_default_console_node gets the node of the path stored in
* /chosen/stdout-path (or the deprecated /chosen/linux,stdout-path)
* returns
* - the node (>= 0) on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_get_default_console_node(void);
/*
* dt_get_initrd gets the physical address of the initrd and its
* size from /chosen
* returns
* - zero on success
* - a negative FDT_ERR_* value on failure, and @initrd will be
* set to NULL and @size set to zero
*/
extern int dt_get_initrd(const char **initrd, u32 *size);
/*
* dt_get_memory_params gets the memory parameters from the /memory node(s)
* storing each memory region ("address size" tuple) in consecutive entries
* of @regs, up to @nr_regs
* returns
* - number of memory regions found on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_get_memory_params(struct dt_pbus_reg *regs, int nr_regs);
/*
* dt_for_each_cpu_node runs @func on each cpu node in the /cpus node
* passing it its fdt node, its reg property value, and @info
* - zero on success
* - a negative FDT_ERR_* value on failure
*/
extern int dt_for_each_cpu_node(void (*func)(int fdtnode, u64 regval,
void *info), void *info);
#endif /* _DEVICETREE_H_ */

View file

@ -0,0 +1,24 @@
#ifndef _ERRATA_H_
#define _ERRATA_H_
#define _ERRATA(erratum) errata("ERRATA_" # erratum)
#define ERRATA(erratum) _ERRATA(erratum)
#define _ERRATA_RELAXED(erratum) errata_relaxed("ERRATA_" # erratum)
#define ERRATA_RELAXED(erratum) _ERRATA_RELAXED(erratum)
static inline bool errata(const char *erratum)
{
char *s = getenv(erratum);
return s && (*s == '1' || *s == 'y' || *s == 'Y');
}
static inline bool errata_relaxed(const char *erratum)
{
char *s = getenv(erratum);
return !(s && (*s == '0' || *s == 'n' || *s == 'N'));
}
#endif

View file

@ -0,0 +1,8 @@
#ifndef _KBUILD_H_
#define _KBUILD_H_
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define OFFSET(sym, str, mem) DEFINE(sym, offsetof(struct str, mem))
#define COMMENT(x) asm volatile("\n->#" x)
#define BLANK() asm volatile("\n->" : : )
#endif

View file

@ -0,0 +1,132 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#ifndef __LIBCFLAT_H
#define __LIBCFLAT_H
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#define __unused __attribute__((__unused__))
#define xstr(s...) xxstr(s)
#define xxstr(s...) #s
#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define __ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1)
#define ALIGN(x, a) __ALIGN((x), (a))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
#define SZ_4K (1 << 12)
#define SZ_64K (1 << 16)
#define SZ_2M (1 << 21)
#define SZ_1G (1 << 30)
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
typedef int64_t s64;
typedef unsigned long ulong;
typedef _Bool bool;
#define false 0
#define true 1
#if __SIZEOF_LONG__ == 8
# define __PRI32_PREFIX
# define __PRI64_PREFIX "l"
# define __PRIPTR_PREFIX "l"
#else
#if defined(__U32_LONG_FMT__)
# define __PRI32_PREFIX "l"
#else
# define __PRI32_PREFIX
#endif
# define __PRI64_PREFIX "ll"
# define __PRIPTR_PREFIX
#endif
#define PRId32 __PRI32_PREFIX "d"
#define PRIu32 __PRI32_PREFIX "u"
#define PRIx32 __PRI32_PREFIX "x"
#define PRId64 __PRI64_PREFIX "d"
#define PRIu64 __PRI64_PREFIX "u"
#define PRIx64 __PRI64_PREFIX "x"
#define PRIxPTR __PRIPTR_PREFIX "x"
typedef u64 phys_addr_t;
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
extern void puts(const char *s);
extern void exit(int code);
extern void abort(void);
extern long atol(const char *ptr);
extern char *getenv(const char *name);
extern int printf(const char *fmt, ...)
__attribute__((format(printf, 1, 2)));
extern int snprintf(char *buf, int size, const char *fmt, ...)
__attribute__((format(printf, 3, 4)));
extern int vsnprintf(char *buf, int size, const char *fmt, va_list va)
__attribute__((format(printf, 3, 0)));
extern int vprintf(const char *fmt, va_list va)
__attribute__((format(printf, 1, 0)));
extern void report_prefix_push(const char *prefix);
extern void report_prefix_pop(void);
extern void report(const char *msg_fmt, bool pass, ...);
extern void report_xfail(const char *msg_fmt, bool xfail, bool pass, ...);
extern void report_abort(const char *msg_fmt, ...);
extern void report_skip(const char *msg_fmt, ...);
extern void report_info(const char *msg_fmt, ...);
extern int report_summary(void);
extern void dump_stack(void);
extern void dump_frame_stack(const void *instruction, const void *frame);
#define ARRAY_SIZE(_a) (sizeof(_a)/sizeof((_a)[0]))
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );})
#define assert(cond) \
do { \
if (!(cond)) { \
printf("%s:%d: assert failed: %s\n", \
__FILE__, __LINE__, #cond); \
dump_stack(); \
abort(); \
} \
} while (0)
static inline bool is_power_of_2(unsigned long n)
{
return n && !(n & (n - 1));
}
#endif

View file

@ -0,0 +1,10 @@
# Makefile.libfdt
#
# This is not a complete Makefile of itself. Instead, it is designed to
# be easily embeddable into other systems of Makefiles.
#
LIBFDT_soname = libfdt.$(SHAREDLIB_EXT).1
LIBFDT_INCLUDES = fdt.h libfdt.h libfdt_env.h
LIBFDT_VERSION = version.lds
LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c
LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o)

View file

@ -0,0 +1,4 @@
The code in this directory is originally imported from the libfdt
directory of git://git.jdl.com/software/dtc.git - version 1.4.0.

View file

@ -0,0 +1,250 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
int fdt_check_header(const void *fdt)
{
if (fdt_magic(fdt) == FDT_MAGIC) {
/* Complete tree */
if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
return -FDT_ERR_BADVERSION;
if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)
return -FDT_ERR_BADVERSION;
} else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
/* Unfinished sequential-write blob */
if (fdt_size_dt_struct(fdt) == 0)
return -FDT_ERR_BADSTATE;
} else {
return -FDT_ERR_BADMAGIC;
}
return 0;
}
const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
{
const char *p;
if (fdt_version(fdt) >= 0x11)
if (((offset + len) < offset)
|| ((offset + len) > fdt_size_dt_struct(fdt)))
return NULL;
p = _fdt_offset_ptr(fdt, offset);
if (p + len < p)
return NULL;
return p;
}
uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset)
{
const fdt32_t *tagp, *lenp;
uint32_t tag;
int offset = startoffset;
const char *p;
*nextoffset = -FDT_ERR_TRUNCATED;
tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE);
if (!tagp)
return FDT_END; /* premature end */
tag = fdt32_to_cpu(*tagp);
offset += FDT_TAGSIZE;
*nextoffset = -FDT_ERR_BADSTRUCTURE;
switch (tag) {
case FDT_BEGIN_NODE:
/* skip name */
do {
p = fdt_offset_ptr(fdt, offset++, 1);
} while (p && (*p != '\0'));
if (!p)
return FDT_END; /* premature end */
break;
case FDT_PROP:
lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp));
if (!lenp)
return FDT_END; /* premature end */
/* skip-name offset, length and value */
offset += sizeof(struct fdt_property) - FDT_TAGSIZE
+ fdt32_to_cpu(*lenp);
break;
case FDT_END:
case FDT_END_NODE:
case FDT_NOP:
break;
default:
return FDT_END;
}
if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset))
return FDT_END; /* premature end */
*nextoffset = FDT_TAGALIGN(offset);
return tag;
}
int _fdt_check_node_offset(const void *fdt, int offset)
{
if ((offset < 0) || (offset % FDT_TAGSIZE)
|| (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE))
return -FDT_ERR_BADOFFSET;
return offset;
}
int _fdt_check_prop_offset(const void *fdt, int offset)
{
if ((offset < 0) || (offset % FDT_TAGSIZE)
|| (fdt_next_tag(fdt, offset, &offset) != FDT_PROP))
return -FDT_ERR_BADOFFSET;
return offset;
}
int fdt_next_node(const void *fdt, int offset, int *depth)
{
int nextoffset = 0;
uint32_t tag;
if (offset >= 0)
if ((nextoffset = _fdt_check_node_offset(fdt, offset)) < 0)
return nextoffset;
do {
offset = nextoffset;
tag = fdt_next_tag(fdt, offset, &nextoffset);
switch (tag) {
case FDT_PROP:
case FDT_NOP:
break;
case FDT_BEGIN_NODE:
if (depth)
(*depth)++;
break;
case FDT_END_NODE:
if (depth && ((--(*depth)) < 0))
return nextoffset;
break;
case FDT_END:
if ((nextoffset >= 0)
|| ((nextoffset == -FDT_ERR_TRUNCATED) && !depth))
return -FDT_ERR_NOTFOUND;
else
return nextoffset;
}
} while (tag != FDT_BEGIN_NODE);
return offset;
}
int fdt_first_subnode(const void *fdt, int offset)
{
int depth = 0;
offset = fdt_next_node(fdt, offset, &depth);
if (offset < 0 || depth != 1)
return -FDT_ERR_NOTFOUND;
return offset;
}
int fdt_next_subnode(const void *fdt, int offset)
{
int depth = 1;
/*
* With respect to the parent, the depth of the next subnode will be
* the same as the last.
*/
do {
offset = fdt_next_node(fdt, offset, &depth);
if (offset < 0 || depth < 1)
return -FDT_ERR_NOTFOUND;
} while (depth > 1);
return offset;
}
const char *_fdt_find_string(const char *strtab, int tabsize, const char *s)
{
int len = strlen(s) + 1;
const char *last = strtab + tabsize - len;
const char *p;
for (p = strtab; p <= last; p++)
if (memcmp(p, s, len) == 0)
return p;
return NULL;
}
int fdt_move(const void *fdt, void *buf, int bufsize)
{
FDT_CHECK_HEADER(fdt);
if (fdt_totalsize(fdt) > bufsize)
return -FDT_ERR_NOSPACE;
memmove(buf, fdt, fdt_totalsize(fdt));
return 0;
}

View file

@ -0,0 +1,111 @@
#ifndef _FDT_H
#define _FDT_H
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
* Copyright 2012 Kim Phillips, Freescale Semiconductor.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __ASSEMBLY__
struct fdt_header {
fdt32_t magic; /* magic word FDT_MAGIC */
fdt32_t totalsize; /* total size of DT block */
fdt32_t off_dt_struct; /* offset to structure */
fdt32_t off_dt_strings; /* offset to strings */
fdt32_t off_mem_rsvmap; /* offset to memory reserve map */
fdt32_t version; /* format version */
fdt32_t last_comp_version; /* last compatible version */
/* version 2 fields below */
fdt32_t boot_cpuid_phys; /* Which physical CPU id we're
booting on */
/* version 3 fields below */
fdt32_t size_dt_strings; /* size of the strings block */
/* version 17 fields below */
fdt32_t size_dt_struct; /* size of the structure block */
};
struct fdt_reserve_entry {
fdt64_t address;
fdt64_t size;
};
struct fdt_node_header {
fdt32_t tag;
char name[0];
};
struct fdt_property {
fdt32_t tag;
fdt32_t len;
fdt32_t nameoff;
char data[0];
};
#endif /* !__ASSEMBLY */
#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */
#define FDT_TAGSIZE sizeof(fdt32_t)
#define FDT_BEGIN_NODE 0x1 /* Start node: full name */
#define FDT_END_NODE 0x2 /* End node */
#define FDT_PROP 0x3 /* Property: name off,
size, content */
#define FDT_NOP 0x4 /* nop */
#define FDT_END 0x9
#define FDT_V1_SIZE (7*sizeof(fdt32_t))
#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(fdt32_t))
#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(fdt32_t))
#define FDT_V16_SIZE FDT_V3_SIZE
#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(fdt32_t))
#endif /* _FDT_H */

View file

@ -0,0 +1,84 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2012 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
int fdt_create_empty_tree(void *buf, int bufsize)
{
int err;
err = fdt_create(buf, bufsize);
if (err)
return err;
err = fdt_finish_reservemap(buf);
if (err)
return err;
err = fdt_begin_node(buf, "");
if (err)
return err;
err = fdt_end_node(buf);
if (err)
return err;
err = fdt_finish(buf);
if (err)
return err;
return fdt_open_into(buf, buf, bufsize);
}

View file

@ -0,0 +1,573 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int _fdt_nodename_eq(const void *fdt, int offset,
const char *s, int len)
{
const char *p = fdt_offset_ptr(fdt, offset + FDT_TAGSIZE, len+1);
if (! p)
/* short match */
return 0;
if (memcmp(p, s, len) != 0)
return 0;
if (p[len] == '\0')
return 1;
else if (!memchr(s, '@', len) && (p[len] == '@'))
return 1;
else
return 0;
}
const char *fdt_string(const void *fdt, int stroffset)
{
return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
}
static int _fdt_string_eq(const void *fdt, int stroffset,
const char *s, int len)
{
const char *p = fdt_string(fdt, stroffset);
return (strlen(p) == len) && (memcmp(p, s, len) == 0);
}
int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
{
FDT_CHECK_HEADER(fdt);
*address = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->address);
*size = fdt64_to_cpu(_fdt_mem_rsv(fdt, n)->size);
return 0;
}
int fdt_num_mem_rsv(const void *fdt)
{
int i = 0;
while (fdt64_to_cpu(_fdt_mem_rsv(fdt, i)->size) != 0)
i++;
return i;
}
static int _nextprop(const void *fdt, int offset)
{
uint32_t tag;
int nextoffset;
do {
tag = fdt_next_tag(fdt, offset, &nextoffset);
switch (tag) {
case FDT_END:
if (nextoffset >= 0)
return -FDT_ERR_BADSTRUCTURE;
else
return nextoffset;
case FDT_PROP:
return offset;
}
offset = nextoffset;
} while (tag == FDT_NOP);
return -FDT_ERR_NOTFOUND;
}
int fdt_subnode_offset_namelen(const void *fdt, int offset,
const char *name, int namelen)
{
int depth;
FDT_CHECK_HEADER(fdt);
for (depth = 0;
(offset >= 0) && (depth >= 0);
offset = fdt_next_node(fdt, offset, &depth))
if ((depth == 1)
&& _fdt_nodename_eq(fdt, offset, name, namelen))
return offset;
if (depth < 0)
return -FDT_ERR_NOTFOUND;
return offset; /* error */
}
int fdt_subnode_offset(const void *fdt, int parentoffset,
const char *name)
{
return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name));
}
int fdt_path_offset(const void *fdt, const char *path)
{
const char *end = path + strlen(path);
const char *p = path;
int offset = 0;
FDT_CHECK_HEADER(fdt);
/* see if we have an alias */
if (*path != '/') {
const char *q = strchr(path, '/');
if (!q)
q = end;
p = fdt_get_alias_namelen(fdt, p, q - p);
if (!p)
return -FDT_ERR_BADPATH;
offset = fdt_path_offset(fdt, p);
p = q;
}
while (*p) {
const char *q;
while (*p == '/')
p++;
if (! *p)
return offset;
q = strchr(p, '/');
if (! q)
q = end;
offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p);
if (offset < 0)
return offset;
p = q;
}
return offset;
}
const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
{
const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset);
int err;
if (((err = fdt_check_header(fdt)) != 0)
|| ((err = _fdt_check_node_offset(fdt, nodeoffset)) < 0))
goto fail;
if (len)
*len = strlen(nh->name);
return nh->name;
fail:
if (len)
*len = err;
return NULL;
}
int fdt_first_property_offset(const void *fdt, int nodeoffset)
{
int offset;
if ((offset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
return offset;
return _nextprop(fdt, offset);
}
int fdt_next_property_offset(const void *fdt, int offset)
{
if ((offset = _fdt_check_prop_offset(fdt, offset)) < 0)
return offset;
return _nextprop(fdt, offset);
}
const struct fdt_property *fdt_get_property_by_offset(const void *fdt,
int offset,
int *lenp)
{
int err;
const struct fdt_property *prop;
if ((err = _fdt_check_prop_offset(fdt, offset)) < 0) {
if (lenp)
*lenp = err;
return NULL;
}
prop = _fdt_offset_ptr(fdt, offset);
if (lenp)
*lenp = fdt32_to_cpu(prop->len);
return prop;
}
const struct fdt_property *fdt_get_property_namelen(const void *fdt,
int offset,
const char *name,
int namelen, int *lenp)
{
for (offset = fdt_first_property_offset(fdt, offset);
(offset >= 0);
(offset = fdt_next_property_offset(fdt, offset))) {
const struct fdt_property *prop;
if (!(prop = fdt_get_property_by_offset(fdt, offset, lenp))) {
offset = -FDT_ERR_INTERNAL;
break;
}
if (_fdt_string_eq(fdt, fdt32_to_cpu(prop->nameoff),
name, namelen))
return prop;
}
if (lenp)
*lenp = offset;
return NULL;
}
const struct fdt_property *fdt_get_property(const void *fdt,
int nodeoffset,
const char *name, int *lenp)
{
return fdt_get_property_namelen(fdt, nodeoffset, name,
strlen(name), lenp);
}
const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
const char *name, int namelen, int *lenp)
{
const struct fdt_property *prop;
prop = fdt_get_property_namelen(fdt, nodeoffset, name, namelen, lenp);
if (! prop)
return NULL;
return prop->data;
}
const void *fdt_getprop_by_offset(const void *fdt, int offset,
const char **namep, int *lenp)
{
const struct fdt_property *prop;
prop = fdt_get_property_by_offset(fdt, offset, lenp);
if (!prop)
return NULL;
if (namep)
*namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
return prop->data;
}
const void *fdt_getprop(const void *fdt, int nodeoffset,
const char *name, int *lenp)
{
return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp);
}
uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
{
const fdt32_t *php;
int len;
/* FIXME: This is a bit sub-optimal, since we potentially scan
* over all the properties twice. */
php = fdt_getprop(fdt, nodeoffset, "phandle", &len);
if (!php || (len != sizeof(*php))) {
php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len);
if (!php || (len != sizeof(*php)))
return 0;
}
return fdt32_to_cpu(*php);
}
const char *fdt_get_alias_namelen(const void *fdt,
const char *name, int namelen)
{
int aliasoffset;
aliasoffset = fdt_path_offset(fdt, "/aliases");
if (aliasoffset < 0)
return NULL;
return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL);
}
const char *fdt_get_alias(const void *fdt, const char *name)
{
return fdt_get_alias_namelen(fdt, name, strlen(name));
}
int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
{
int pdepth = 0, p = 0;
int offset, depth, namelen;
const char *name;
FDT_CHECK_HEADER(fdt);
if (buflen < 2)
return -FDT_ERR_NOSPACE;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
while (pdepth > depth) {
do {
p--;
} while (buf[p-1] != '/');
pdepth--;
}
if (pdepth >= depth) {
name = fdt_get_name(fdt, offset, &namelen);
if (!name)
return namelen;
if ((p + namelen + 1) <= buflen) {
memcpy(buf + p, name, namelen);
p += namelen;
buf[p++] = '/';
pdepth++;
}
}
if (offset == nodeoffset) {
if (pdepth < (depth + 1))
return -FDT_ERR_NOSPACE;
if (p > 1) /* special case so that root path is "/", not "" */
p--;
buf[p] = '\0';
return 0;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
int supernodedepth, int *nodedepth)
{
int offset, depth;
int supernodeoffset = -FDT_ERR_INTERNAL;
FDT_CHECK_HEADER(fdt);
if (supernodedepth < 0)
return -FDT_ERR_NOTFOUND;
for (offset = 0, depth = 0;
(offset >= 0) && (offset <= nodeoffset);
offset = fdt_next_node(fdt, offset, &depth)) {
if (depth == supernodedepth)
supernodeoffset = offset;
if (offset == nodeoffset) {
if (nodedepth)
*nodedepth = depth;
if (supernodedepth > depth)
return -FDT_ERR_NOTFOUND;
else
return supernodeoffset;
}
}
if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0))
return -FDT_ERR_BADOFFSET;
else if (offset == -FDT_ERR_BADOFFSET)
return -FDT_ERR_BADSTRUCTURE;
return offset; /* error from fdt_next_node() */
}
int fdt_node_depth(const void *fdt, int nodeoffset)
{
int nodedepth;
int err;
err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth);
if (err)
return (err < 0) ? err : -FDT_ERR_INTERNAL;
return nodedepth;
}
int fdt_parent_offset(const void *fdt, int nodeoffset)
{
int nodedepth = fdt_node_depth(fdt, nodeoffset);
if (nodedepth < 0)
return nodedepth;
return fdt_supernode_atdepth_offset(fdt, nodeoffset,
nodedepth - 1, NULL);
}
int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
const char *propname,
const void *propval, int proplen)
{
int offset;
const void *val;
int len;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_getprop(), then if that didn't
* find what we want, we scan over them again making our way
* to the next node. Still it's the easiest to implement
* approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
val = fdt_getprop(fdt, offset, propname, &len);
if (val && (len == proplen)
&& (memcmp(val, propval, len) == 0))
return offset;
}
return offset; /* error from fdt_next_node() */
}
int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
{
int offset;
if ((phandle == 0) || (phandle == -1))
return -FDT_ERR_BADPHANDLE;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we
* potentially scan each property of a node in
* fdt_get_phandle(), then if that didn't find what
* we want, we scan over them again making our way to the next
* node. Still it's the easiest to implement approach;
* performance can come later. */
for (offset = fdt_next_node(fdt, -1, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
if (fdt_get_phandle(fdt, offset) == phandle)
return offset;
}
return offset; /* error from fdt_next_node() */
}
int fdt_stringlist_contains(const char *strlist, int listlen, const char *str)
{
int len = strlen(str);
const char *p;
while (listlen >= len) {
if (memcmp(str, strlist, len+1) == 0)
return 1;
p = memchr(strlist, '\0', listlen);
if (!p)
return 0; /* malformed strlist.. */
listlen -= (p-strlist) + 1;
strlist = p + 1;
}
return 0;
}
int fdt_node_check_compatible(const void *fdt, int nodeoffset,
const char *compatible)
{
const void *prop;
int len;
prop = fdt_getprop(fdt, nodeoffset, "compatible", &len);
if (!prop)
return len;
if (fdt_stringlist_contains(prop, len, compatible))
return 0;
else
return 1;
}
int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
const char *compatible)
{
int offset, err;
FDT_CHECK_HEADER(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_node_check_compatible(), then if
* that didn't find what we want, we scan over them again
* making our way to the next node. Still it's the easiest to
* implement approach; performance can come later. */
for (offset = fdt_next_node(fdt, startoffset, NULL);
offset >= 0;
offset = fdt_next_node(fdt, offset, NULL)) {
err = fdt_node_check_compatible(fdt, offset, compatible);
if ((err < 0) && (err != -FDT_ERR_NOTFOUND))
return err;
else if (err == 0)
return offset;
}
return offset; /* error from fdt_next_node() */
}

View file

@ -0,0 +1,492 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int _fdt_blocks_misordered(const void *fdt,
int mem_rsv_size, int struct_size)
{
return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8))
|| (fdt_off_dt_struct(fdt) <
(fdt_off_mem_rsvmap(fdt) + mem_rsv_size))
|| (fdt_off_dt_strings(fdt) <
(fdt_off_dt_struct(fdt) + struct_size))
|| (fdt_totalsize(fdt) <
(fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt)));
}
static int _fdt_rw_check_header(void *fdt)
{
FDT_CHECK_HEADER(fdt);
if (fdt_version(fdt) < 17)
return -FDT_ERR_BADVERSION;
if (_fdt_blocks_misordered(fdt, sizeof(struct fdt_reserve_entry),
fdt_size_dt_struct(fdt)))
return -FDT_ERR_BADLAYOUT;
if (fdt_version(fdt) > 17)
fdt_set_version(fdt, 17);
return 0;
}
#define FDT_RW_CHECK_HEADER(fdt) \
{ \
int err; \
if ((err = _fdt_rw_check_header(fdt)) != 0) \
return err; \
}
static inline int _fdt_data_size(void *fdt)
{
return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
}
static int _fdt_splice(void *fdt, void *splicepoint, int oldlen, int newlen)
{
char *p = splicepoint;
char *end = (char *)fdt + _fdt_data_size(fdt);
if (((p + oldlen) < p) || ((p + oldlen) > end))
return -FDT_ERR_BADOFFSET;
if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt)))
return -FDT_ERR_NOSPACE;
memmove(p + newlen, p + oldlen, end - p - oldlen);
return 0;
}
static int _fdt_splice_mem_rsv(void *fdt, struct fdt_reserve_entry *p,
int oldn, int newn)
{
int delta = (newn - oldn) * sizeof(*p);
int err;
err = _fdt_splice(fdt, p, oldn * sizeof(*p), newn * sizeof(*p));
if (err)
return err;
fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta);
fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
return 0;
}
static int _fdt_splice_struct(void *fdt, void *p,
int oldlen, int newlen)
{
int delta = newlen - oldlen;
int err;
if ((err = _fdt_splice(fdt, p, oldlen, newlen)))
return err;
fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta);
fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta);
return 0;
}
static int _fdt_splice_string(void *fdt, int newlen)
{
void *p = (char *)fdt
+ fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt);
int err;
if ((err = _fdt_splice(fdt, p, 0, newlen)))
return err;
fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen);
return 0;
}
static int _fdt_find_add_string(void *fdt, const char *s)
{
char *strtab = (char *)fdt + fdt_off_dt_strings(fdt);
const char *p;
char *new;
int len = strlen(s) + 1;
int err;
p = _fdt_find_string(strtab, fdt_size_dt_strings(fdt), s);
if (p)
/* found it */
return (p - strtab);
new = strtab + fdt_size_dt_strings(fdt);
err = _fdt_splice_string(fdt, len);
if (err)
return err;
memcpy(new, s, len);
return (new - strtab);
}
int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size)
{
struct fdt_reserve_entry *re;
int err;
FDT_RW_CHECK_HEADER(fdt);
re = _fdt_mem_rsv_w(fdt, fdt_num_mem_rsv(fdt));
err = _fdt_splice_mem_rsv(fdt, re, 0, 1);
if (err)
return err;
re->address = cpu_to_fdt64(address);
re->size = cpu_to_fdt64(size);
return 0;
}
int fdt_del_mem_rsv(void *fdt, int n)
{
struct fdt_reserve_entry *re = _fdt_mem_rsv_w(fdt, n);
int err;
FDT_RW_CHECK_HEADER(fdt);
if (n >= fdt_num_mem_rsv(fdt))
return -FDT_ERR_NOTFOUND;
err = _fdt_splice_mem_rsv(fdt, re, 1, 0);
if (err)
return err;
return 0;
}
static int _fdt_resize_property(void *fdt, int nodeoffset, const char *name,
int len, struct fdt_property **prop)
{
int oldlen;
int err;
*prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
if (! (*prop))
return oldlen;
if ((err = _fdt_splice_struct(fdt, (*prop)->data, FDT_TAGALIGN(oldlen),
FDT_TAGALIGN(len))))
return err;
(*prop)->len = cpu_to_fdt32(len);
return 0;
}
static int _fdt_add_property(void *fdt, int nodeoffset, const char *name,
int len, struct fdt_property **prop)
{
int proplen;
int nextoffset;
int namestroff;
int err;
if ((nextoffset = _fdt_check_node_offset(fdt, nodeoffset)) < 0)
return nextoffset;
namestroff = _fdt_find_add_string(fdt, name);
if (namestroff < 0)
return namestroff;
*prop = _fdt_offset_ptr_w(fdt, nextoffset);
proplen = sizeof(**prop) + FDT_TAGALIGN(len);
err = _fdt_splice_struct(fdt, *prop, 0, proplen);
if (err)
return err;
(*prop)->tag = cpu_to_fdt32(FDT_PROP);
(*prop)->nameoff = cpu_to_fdt32(namestroff);
(*prop)->len = cpu_to_fdt32(len);
return 0;
}
int fdt_set_name(void *fdt, int nodeoffset, const char *name)
{
char *namep;
int oldlen, newlen;
int err;
FDT_RW_CHECK_HEADER(fdt);
namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen);
if (!namep)
return oldlen;
newlen = strlen(name);
err = _fdt_splice_struct(fdt, namep, FDT_TAGALIGN(oldlen+1),
FDT_TAGALIGN(newlen+1));
if (err)
return err;
memcpy(namep, name, newlen+1);
return 0;
}
int fdt_setprop(void *fdt, int nodeoffset, const char *name,
const void *val, int len)
{
struct fdt_property *prop;
int err;
FDT_RW_CHECK_HEADER(fdt);
err = _fdt_resize_property(fdt, nodeoffset, name, len, &prop);
if (err == -FDT_ERR_NOTFOUND)
err = _fdt_add_property(fdt, nodeoffset, name, len, &prop);
if (err)
return err;
memcpy(prop->data, val, len);
return 0;
}
int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
const void *val, int len)
{
struct fdt_property *prop;
int err, oldlen, newlen;
FDT_RW_CHECK_HEADER(fdt);
prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
if (prop) {
newlen = len + oldlen;
err = _fdt_splice_struct(fdt, prop->data,
FDT_TAGALIGN(oldlen),
FDT_TAGALIGN(newlen));
if (err)
return err;
prop->len = cpu_to_fdt32(newlen);
memcpy(prop->data + oldlen, val, len);
} else {
err = _fdt_add_property(fdt, nodeoffset, name, len, &prop);
if (err)
return err;
memcpy(prop->data, val, len);
}
return 0;
}
int fdt_delprop(void *fdt, int nodeoffset, const char *name)
{
struct fdt_property *prop;
int len, proplen;
FDT_RW_CHECK_HEADER(fdt);
prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
if (! prop)
return len;
proplen = sizeof(*prop) + FDT_TAGALIGN(len);
return _fdt_splice_struct(fdt, prop, proplen, 0);
}
int fdt_add_subnode_namelen(void *fdt, int parentoffset,
const char *name, int namelen)
{
struct fdt_node_header *nh;
int offset, nextoffset;
int nodelen;
int err;
uint32_t tag;
fdt32_t *endtag;
FDT_RW_CHECK_HEADER(fdt);
offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen);
if (offset >= 0)
return -FDT_ERR_EXISTS;
else if (offset != -FDT_ERR_NOTFOUND)
return offset;
/* Try to place the new node after the parent's properties */
fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */
do {
offset = nextoffset;
tag = fdt_next_tag(fdt, offset, &nextoffset);
} while ((tag == FDT_PROP) || (tag == FDT_NOP));
nh = _fdt_offset_ptr_w(fdt, offset);
nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE;
err = _fdt_splice_struct(fdt, nh, 0, nodelen);
if (err)
return err;
nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
memset(nh->name, 0, FDT_TAGALIGN(namelen+1));
memcpy(nh->name, name, namelen);
endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE);
*endtag = cpu_to_fdt32(FDT_END_NODE);
return offset;
}
int fdt_add_subnode(void *fdt, int parentoffset, const char *name)
{
return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name));
}
int fdt_del_node(void *fdt, int nodeoffset)
{
int endoffset;
FDT_RW_CHECK_HEADER(fdt);
endoffset = _fdt_node_end_offset(fdt, nodeoffset);
if (endoffset < 0)
return endoffset;
return _fdt_splice_struct(fdt, _fdt_offset_ptr_w(fdt, nodeoffset),
endoffset - nodeoffset, 0);
}
static void _fdt_packblocks(const char *old, char *new,
int mem_rsv_size, int struct_size)
{
int mem_rsv_off, struct_off, strings_off;
mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8);
struct_off = mem_rsv_off + mem_rsv_size;
strings_off = struct_off + struct_size;
memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size);
fdt_set_off_mem_rsvmap(new, mem_rsv_off);
memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size);
fdt_set_off_dt_struct(new, struct_off);
fdt_set_size_dt_struct(new, struct_size);
memmove(new + strings_off, old + fdt_off_dt_strings(old),
fdt_size_dt_strings(old));
fdt_set_off_dt_strings(new, strings_off);
fdt_set_size_dt_strings(new, fdt_size_dt_strings(old));
}
int fdt_open_into(const void *fdt, void *buf, int bufsize)
{
int err;
int mem_rsv_size, struct_size;
int newsize;
const char *fdtstart = fdt;
const char *fdtend = fdtstart + fdt_totalsize(fdt);
char *tmp;
FDT_CHECK_HEADER(fdt);
mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
* sizeof(struct fdt_reserve_entry);
if (fdt_version(fdt) >= 17) {
struct_size = fdt_size_dt_struct(fdt);
} else {
struct_size = 0;
while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END)
;
if (struct_size < 0)
return struct_size;
}
if (!_fdt_blocks_misordered(fdt, mem_rsv_size, struct_size)) {
/* no further work necessary */
err = fdt_move(fdt, buf, bufsize);
if (err)
return err;
fdt_set_version(buf, 17);
fdt_set_size_dt_struct(buf, struct_size);
fdt_set_totalsize(buf, bufsize);
return 0;
}
/* Need to reorder */
newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size
+ struct_size + fdt_size_dt_strings(fdt);
if (bufsize < newsize)
return -FDT_ERR_NOSPACE;
/* First attempt to build converted tree at beginning of buffer */
tmp = buf;
/* But if that overlaps with the old tree... */
if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) {
/* Try right after the old tree instead */
tmp = (char *)(uintptr_t)fdtend;
if ((tmp + newsize) > ((char *)buf + bufsize))
return -FDT_ERR_NOSPACE;
}
_fdt_packblocks(fdt, tmp, mem_rsv_size, struct_size);
memmove(buf, tmp, newsize);
fdt_set_magic(buf, FDT_MAGIC);
fdt_set_totalsize(buf, bufsize);
fdt_set_version(buf, 17);
fdt_set_last_comp_version(buf, 16);
fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt));
return 0;
}
int fdt_pack(void *fdt)
{
int mem_rsv_size;
FDT_RW_CHECK_HEADER(fdt);
mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
* sizeof(struct fdt_reserve_entry);
_fdt_packblocks(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt));
fdt_set_totalsize(fdt, _fdt_data_size(fdt));
return 0;
}

View file

@ -0,0 +1,96 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
struct fdt_errtabent {
const char *str;
};
#define FDT_ERRTABENT(val) \
[(val)] = { .str = #val, }
static struct fdt_errtabent fdt_errtable[] = {
FDT_ERRTABENT(FDT_ERR_NOTFOUND),
FDT_ERRTABENT(FDT_ERR_EXISTS),
FDT_ERRTABENT(FDT_ERR_NOSPACE),
FDT_ERRTABENT(FDT_ERR_BADOFFSET),
FDT_ERRTABENT(FDT_ERR_BADPATH),
FDT_ERRTABENT(FDT_ERR_BADSTATE),
FDT_ERRTABENT(FDT_ERR_TRUNCATED),
FDT_ERRTABENT(FDT_ERR_BADMAGIC),
FDT_ERRTABENT(FDT_ERR_BADVERSION),
FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE),
FDT_ERRTABENT(FDT_ERR_BADLAYOUT),
};
#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0]))
const char *fdt_strerror(int errval)
{
if (errval > 0)
return "<valid offset/length>";
else if (errval == 0)
return "<no error>";
else if (errval > -FDT_ERRTABSIZE) {
const char *s = fdt_errtable[-errval].str;
if (s)
return s;
}
return "<unknown error>";
}

View file

@ -0,0 +1,256 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
static int _fdt_sw_check_header(void *fdt)
{
if (fdt_magic(fdt) != FDT_SW_MAGIC)
return -FDT_ERR_BADMAGIC;
/* FIXME: should check more details about the header state */
return 0;
}
#define FDT_SW_CHECK_HEADER(fdt) \
{ \
int err; \
if ((err = _fdt_sw_check_header(fdt)) != 0) \
return err; \
}
static void *_fdt_grab_space(void *fdt, size_t len)
{
int offset = fdt_size_dt_struct(fdt);
int spaceleft;
spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt)
- fdt_size_dt_strings(fdt);
if ((offset + len < offset) || (offset + len > spaceleft))
return NULL;
fdt_set_size_dt_struct(fdt, offset + len);
return _fdt_offset_ptr_w(fdt, offset);
}
int fdt_create(void *buf, int bufsize)
{
void *fdt = buf;
if (bufsize < sizeof(struct fdt_header))
return -FDT_ERR_NOSPACE;
memset(buf, 0, bufsize);
fdt_set_magic(fdt, FDT_SW_MAGIC);
fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION);
fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
fdt_set_totalsize(fdt, bufsize);
fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header),
sizeof(struct fdt_reserve_entry)));
fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
fdt_set_off_dt_strings(fdt, bufsize);
return 0;
}
int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
{
struct fdt_reserve_entry *re;
int offset;
FDT_SW_CHECK_HEADER(fdt);
if (fdt_size_dt_struct(fdt))
return -FDT_ERR_BADSTATE;
offset = fdt_off_dt_struct(fdt);
if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
return -FDT_ERR_NOSPACE;
re = (struct fdt_reserve_entry *)((char *)fdt + offset);
re->address = cpu_to_fdt64(addr);
re->size = cpu_to_fdt64(size);
fdt_set_off_dt_struct(fdt, offset + sizeof(*re));
return 0;
}
int fdt_finish_reservemap(void *fdt)
{
return fdt_add_reservemap_entry(fdt, 0, 0);
}
int fdt_begin_node(void *fdt, const char *name)
{
struct fdt_node_header *nh;
int namelen = strlen(name) + 1;
FDT_SW_CHECK_HEADER(fdt);
nh = _fdt_grab_space(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen));
if (! nh)
return -FDT_ERR_NOSPACE;
nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE);
memcpy(nh->name, name, namelen);
return 0;
}
int fdt_end_node(void *fdt)
{
fdt32_t *en;
FDT_SW_CHECK_HEADER(fdt);
en = _fdt_grab_space(fdt, FDT_TAGSIZE);
if (! en)
return -FDT_ERR_NOSPACE;
*en = cpu_to_fdt32(FDT_END_NODE);
return 0;
}
static int _fdt_find_add_string(void *fdt, const char *s)
{
char *strtab = (char *)fdt + fdt_totalsize(fdt);
const char *p;
int strtabsize = fdt_size_dt_strings(fdt);
int len = strlen(s) + 1;
int struct_top, offset;
p = _fdt_find_string(strtab - strtabsize, strtabsize, s);
if (p)
return p - strtab;
/* Add it */
offset = -strtabsize - len;
struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
if (fdt_totalsize(fdt) + offset < struct_top)
return 0; /* no more room :( */
memcpy(strtab + offset, s, len);
fdt_set_size_dt_strings(fdt, strtabsize + len);
return offset;
}
int fdt_property(void *fdt, const char *name, const void *val, int len)
{
struct fdt_property *prop;
int nameoff;
FDT_SW_CHECK_HEADER(fdt);
nameoff = _fdt_find_add_string(fdt, name);
if (nameoff == 0)
return -FDT_ERR_NOSPACE;
prop = _fdt_grab_space(fdt, sizeof(*prop) + FDT_TAGALIGN(len));
if (! prop)
return -FDT_ERR_NOSPACE;
prop->tag = cpu_to_fdt32(FDT_PROP);
prop->nameoff = cpu_to_fdt32(nameoff);
prop->len = cpu_to_fdt32(len);
memcpy(prop->data, val, len);
return 0;
}
int fdt_finish(void *fdt)
{
char *p = (char *)fdt;
fdt32_t *end;
int oldstroffset, newstroffset;
uint32_t tag;
int offset, nextoffset;
FDT_SW_CHECK_HEADER(fdt);
/* Add terminator */
end = _fdt_grab_space(fdt, sizeof(*end));
if (! end)
return -FDT_ERR_NOSPACE;
*end = cpu_to_fdt32(FDT_END);
/* Relocate the string table */
oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt);
newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt));
fdt_set_off_dt_strings(fdt, newstroffset);
/* Walk the structure, correcting string offsets */
offset = 0;
while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) {
if (tag == FDT_PROP) {
struct fdt_property *prop =
_fdt_offset_ptr_w(fdt, offset);
int nameoff;
nameoff = fdt32_to_cpu(prop->nameoff);
nameoff += fdt_size_dt_strings(fdt);
prop->nameoff = cpu_to_fdt32(nameoff);
}
offset = nextoffset;
}
if (nextoffset < 0)
return nextoffset;
/* Finally, adjust the header */
fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt));
fdt_set_magic(fdt, FDT_MAGIC);
return 0;
}

View file

@ -0,0 +1,118 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name,
const void *val, int len)
{
void *propval;
int proplen;
propval = fdt_getprop_w(fdt, nodeoffset, name, &proplen);
if (! propval)
return proplen;
if (proplen != len)
return -FDT_ERR_NOSPACE;
memcpy(propval, val, len);
return 0;
}
static void _fdt_nop_region(void *start, int len)
{
fdt32_t *p;
for (p = start; (char *)p < ((char *)start + len); p++)
*p = cpu_to_fdt32(FDT_NOP);
}
int fdt_nop_property(void *fdt, int nodeoffset, const char *name)
{
struct fdt_property *prop;
int len;
prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
if (! prop)
return len;
_fdt_nop_region(prop, len + sizeof(*prop));
return 0;
}
int _fdt_node_end_offset(void *fdt, int offset)
{
int depth = 0;
while ((offset >= 0) && (depth >= 0))
offset = fdt_next_node(fdt, offset, &depth);
return offset;
}
int fdt_nop_node(void *fdt, int nodeoffset)
{
int endoffset;
endoffset = _fdt_node_end_offset(fdt, nodeoffset);
if (endoffset < 0)
return endoffset;
_fdt_nop_region(fdt_offset_ptr_w(fdt, nodeoffset, 0),
endoffset - nodeoffset);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,111 @@
#ifndef _LIBFDT_ENV_H
#define _LIBFDT_ENV_H
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
* Copyright 2012 Kim Phillips, Freescale Semiconductor.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifdef __CHECKER__
#define __force __attribute__((force))
#define __bitwise __attribute__((bitwise))
#else
#define __force
#define __bitwise
#endif
typedef uint16_t __bitwise fdt16_t;
typedef uint32_t __bitwise fdt32_t;
typedef uint64_t __bitwise fdt64_t;
#define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n])
#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1))
#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \
(EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3))
#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \
(EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \
(EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \
(EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7))
static inline uint16_t fdt16_to_cpu(fdt16_t x)
{
return (__force uint16_t)CPU_TO_FDT16(x);
}
static inline fdt16_t cpu_to_fdt16(uint16_t x)
{
return (__force fdt16_t)CPU_TO_FDT16(x);
}
static inline uint32_t fdt32_to_cpu(fdt32_t x)
{
return (__force uint32_t)CPU_TO_FDT32(x);
}
static inline fdt32_t cpu_to_fdt32(uint32_t x)
{
return (__force fdt32_t)CPU_TO_FDT32(x);
}
static inline uint64_t fdt64_to_cpu(fdt64_t x)
{
return (__force uint64_t)CPU_TO_FDT64(x);
}
static inline fdt64_t cpu_to_fdt64(uint64_t x)
{
return (__force fdt64_t)CPU_TO_FDT64(x);
}
#undef CPU_TO_FDT64
#undef CPU_TO_FDT32
#undef CPU_TO_FDT16
#undef EXTRACT_BYTE
#endif /* _LIBFDT_ENV_H */

View file

@ -0,0 +1,95 @@
#ifndef _LIBFDT_INTERNAL_H
#define _LIBFDT_INTERNAL_H
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <fdt.h>
#define FDT_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define FDT_TAGALIGN(x) (FDT_ALIGN((x), FDT_TAGSIZE))
#define FDT_CHECK_HEADER(fdt) \
{ \
int err; \
if ((err = fdt_check_header(fdt)) != 0) \
return err; \
}
int _fdt_check_node_offset(const void *fdt, int offset);
int _fdt_check_prop_offset(const void *fdt, int offset);
const char *_fdt_find_string(const char *strtab, int tabsize, const char *s);
int _fdt_node_end_offset(void *fdt, int nodeoffset);
static inline const void *_fdt_offset_ptr(const void *fdt, int offset)
{
return (const char *)fdt + fdt_off_dt_struct(fdt) + offset;
}
static inline void *_fdt_offset_ptr_w(void *fdt, int offset)
{
return (void *)(uintptr_t)_fdt_offset_ptr(fdt, offset);
}
static inline const struct fdt_reserve_entry *_fdt_mem_rsv(const void *fdt, int n)
{
const struct fdt_reserve_entry *rsv_table =
(const struct fdt_reserve_entry *)
((const char *)fdt + fdt_off_mem_rsvmap(fdt));
return rsv_table + n;
}
static inline struct fdt_reserve_entry *_fdt_mem_rsv_w(void *fdt, int n)
{
return (void *)(uintptr_t)_fdt_mem_rsv(fdt, n);
}
#define FDT_SW_MAGIC (~FDT_MAGIC)
#endif /* _LIBFDT_INTERNAL_H */

View file

@ -0,0 +1,60 @@
LIBFDT_1.2 {
global:
fdt_next_node;
fdt_check_header;
fdt_move;
fdt_string;
fdt_num_mem_rsv;
fdt_get_mem_rsv;
fdt_subnode_offset_namelen;
fdt_subnode_offset;
fdt_path_offset;
fdt_get_name;
fdt_get_property_namelen;
fdt_get_property;
fdt_getprop_namelen;
fdt_getprop;
fdt_get_phandle;
fdt_get_alias_namelen;
fdt_get_alias;
fdt_get_path;
fdt_supernode_atdepth_offset;
fdt_node_depth;
fdt_parent_offset;
fdt_node_offset_by_prop_value;
fdt_node_offset_by_phandle;
fdt_node_check_compatible;
fdt_node_offset_by_compatible;
fdt_setprop_inplace;
fdt_nop_property;
fdt_nop_node;
fdt_create;
fdt_add_reservemap_entry;
fdt_finish_reservemap;
fdt_begin_node;
fdt_property;
fdt_end_node;
fdt_finish;
fdt_open_into;
fdt_pack;
fdt_add_mem_rsv;
fdt_del_mem_rsv;
fdt_set_name;
fdt_setprop;
fdt_delprop;
fdt_add_subnode_namelen;
fdt_add_subnode;
fdt_del_node;
fdt_strerror;
fdt_offset_ptr;
fdt_next_tag;
fdt_appendprop;
fdt_create_empty_tree;
fdt_first_property_offset;
fdt_get_property_by_offset;
fdt_getprop_by_offset;
fdt_next_property_offset;
local:
*;
};

View file

@ -0,0 +1,27 @@
/* const.h: Macros for dealing with constants. */
#ifndef _LINUX_CONST_H
#define _LINUX_CONST_H
/* Some constant macros are used in both assembler and
* C code. Therefore we cannot annotate them always with
* 'UL' and other type specifiers unilaterally. We
* use the following macros to deal with this.
*
* Similarly, _AT() will cast an expression with a type in C, but
* leave it unchanged in asm.
*/
#ifdef __ASSEMBLY__
#define _AC(X,Y) X
#define _AT(T,X) X
#else
#define __AC(X,Y) (X##Y)
#define _AC(X,Y) __AC(X,Y)
#define _AT(T,X) ((T)(X))
#endif
#define _BITUL(x) (_AC(1,UL) << (x))
#define _BITULL(x) (_AC(1,ULL) << (x))
#endif /* !(_LINUX_CONST_H) */

View file

@ -0,0 +1,949 @@
/*
* pci_regs.h
*
* PCI standard defines
* Copyright 1994, Drew Eckhardt
* Copyright 1997--1999 Martin Mares <mj@ucw.cz>
*
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
*
* PCI BIOS Specification
* PCI Local Bus Specification
* PCI to PCI Bridge Specification
* PCI System Design Guide
*
* For HyperTransport information, please consult the following manuals
* from http://www.hypertransport.org
*
* The HyperTransport I/O Link Specification
*/
#ifndef LINUX_PCI_REGS_H
#define LINUX_PCI_REGS_H
/*
* Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_STD_HEADER_SIZEOF 64
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
#define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */
#define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */
#define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */
#define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */
#define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */
#define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */
#define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */
#define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */
#define PCI_COMMAND_SERR 0x100 /* Enable SERR */
#define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
#define PCI_STATUS 0x06 /* 16 bits */
#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
#define PCI_STATUS_UDF 0x40 /* Support User Definable Features [obsolete] */
#define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */
#define PCI_STATUS_PARITY 0x100 /* Detected parity error */
#define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */
#define PCI_STATUS_DEVSEL_FAST 0x000
#define PCI_STATUS_DEVSEL_MEDIUM 0x200
#define PCI_STATUS_DEVSEL_SLOW 0x400
#define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */
#define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */
#define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */
#define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */
#define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */
#define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */
#define PCI_REVISION_ID 0x08 /* Revision ID */
#define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */
#define PCI_CLASS_DEVICE 0x0a /* Device class */
#define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */
#define PCI_LATENCY_TIMER 0x0d /* 8 bits */
#define PCI_HEADER_TYPE 0x0e /* 8 bits */
#define PCI_HEADER_TYPE_NORMAL 0
#define PCI_HEADER_TYPE_BRIDGE 1
#define PCI_HEADER_TYPE_CARDBUS 2
#define PCI_BIST 0x0f /* 8 bits */
#define PCI_BIST_CODE_MASK 0x0f /* Return result */
#define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */
#define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */
/*
* Base addresses specify locations in memory or I/O space.
* Decoded size can be determined by writing a value of
* 0xffffffff to the register, and reading it back. Only
* 1 bits are decoded.
*/
#define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */
#define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */
#define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */
#define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */
#define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */
#define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */
#define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */
#define PCI_BASE_ADDRESS_SPACE_IO 0x01
#define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00
#define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06
#define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */
#define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */
#define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */
#define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */
#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL)
#define PCI_BASE_ADDRESS_IO_MASK (~0x03UL)
/* bit 1 is reserved if address_space = 1 */
/* Header type 0 (normal devices) */
#define PCI_CARDBUS_CIS 0x28
#define PCI_SUBSYSTEM_VENDOR_ID 0x2c
#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
/* 0x35-0x3b are reserved */
#define PCI_INTERRUPT_LINE 0x3c /* 8 bits */
#define PCI_INTERRUPT_PIN 0x3d /* 8 bits */
#define PCI_MIN_GNT 0x3e /* 8 bits */
#define PCI_MAX_LAT 0x3f /* 8 bits */
/* Header type 1 (PCI-to-PCI bridges) */
#define PCI_PRIMARY_BUS 0x18 /* Primary bus number */
#define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */
#define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */
#define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */
#define PCI_IO_BASE 0x1c /* I/O range behind the bridge */
#define PCI_IO_LIMIT 0x1d
#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
#define PCI_IO_RANGE_TYPE_16 0x00
#define PCI_IO_RANGE_TYPE_32 0x01
#define PCI_IO_RANGE_MASK (~0x0fUL) /* Standard 4K I/O windows */
#define PCI_IO_1K_RANGE_MASK (~0x03UL) /* Intel 1K I/O windows */
#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
#define PCI_MEMORY_LIMIT 0x22
#define PCI_MEMORY_RANGE_TYPE_MASK 0x0fUL
#define PCI_MEMORY_RANGE_MASK (~0x0fUL)
#define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */
#define PCI_PREF_MEMORY_LIMIT 0x26
#define PCI_PREF_RANGE_TYPE_MASK 0x0fUL
#define PCI_PREF_RANGE_TYPE_32 0x00
#define PCI_PREF_RANGE_TYPE_64 0x01
#define PCI_PREF_RANGE_MASK (~0x0fUL)
#define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */
#define PCI_PREF_LIMIT_UPPER32 0x2c
#define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */
#define PCI_IO_LIMIT_UPPER16 0x32
/* 0x34 same as for htype 0 */
/* 0x35-0x3b is reserved */
#define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */
/* 0x3c-0x3d are same as for htype 0 */
#define PCI_BRIDGE_CONTROL 0x3e
#define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */
#define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */
#define PCI_BRIDGE_CTL_ISA 0x04 /* Enable ISA mode */
#define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */
#define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */
#define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */
#define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */
/* Header type 2 (CardBus bridges) */
#define PCI_CB_CAPABILITY_LIST 0x14
/* 0x15 reserved */
#define PCI_CB_SEC_STATUS 0x16 /* Secondary status */
#define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */
#define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */
#define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */
#define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */
#define PCI_CB_MEMORY_BASE_0 0x1c
#define PCI_CB_MEMORY_LIMIT_0 0x20
#define PCI_CB_MEMORY_BASE_1 0x24
#define PCI_CB_MEMORY_LIMIT_1 0x28
#define PCI_CB_IO_BASE_0 0x2c
#define PCI_CB_IO_BASE_0_HI 0x2e
#define PCI_CB_IO_LIMIT_0 0x30
#define PCI_CB_IO_LIMIT_0_HI 0x32
#define PCI_CB_IO_BASE_1 0x34
#define PCI_CB_IO_BASE_1_HI 0x36
#define PCI_CB_IO_LIMIT_1 0x38
#define PCI_CB_IO_LIMIT_1_HI 0x3a
#define PCI_CB_IO_RANGE_MASK (~0x03UL)
/* 0x3c-0x3d are same as for htype 0 */
#define PCI_CB_BRIDGE_CONTROL 0x3e
#define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */
#define PCI_CB_BRIDGE_CTL_SERR 0x02
#define PCI_CB_BRIDGE_CTL_ISA 0x04
#define PCI_CB_BRIDGE_CTL_VGA 0x08
#define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20
#define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */
#define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */
#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */
#define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200
#define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400
#define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40
#define PCI_CB_SUBSYSTEM_ID 0x42
#define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */
/* 0x48-0x7f reserved */
/* Capability lists */
#define PCI_CAP_LIST_ID 0 /* Capability ID */
#define PCI_CAP_ID_PM 0x01 /* Power Management */
#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */
#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
#define PCI_CAP_ID_SLOTID 0x04 /* Slot Identification */
#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */
#define PCI_CAP_ID_CHSWP 0x06 /* CompactPCI HotSwap */
#define PCI_CAP_ID_PCIX 0x07 /* PCI-X */
#define PCI_CAP_ID_HT 0x08 /* HyperTransport */
#define PCI_CAP_ID_VNDR 0x09 /* Vendor-Specific */
#define PCI_CAP_ID_DBG 0x0A /* Debug port */
#define PCI_CAP_ID_CCRC 0x0B /* CompactPCI Central Resource Control */
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_ID_EA 0x14 /* PCI Enhanced Allocation */
#define PCI_CAP_ID_MAX PCI_CAP_ID_EA
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
/* Power Management Registers */
#define PCI_PM_PMC 2 /* PM Capabilities Register */
#define PCI_PM_CAP_VER_MASK 0x0007 /* Version */
#define PCI_PM_CAP_PME_CLOCK 0x0008 /* PME clock required */
#define PCI_PM_CAP_RESERVED 0x0010 /* Reserved field */
#define PCI_PM_CAP_DSI 0x0020 /* Device specific initialization */
#define PCI_PM_CAP_AUX_POWER 0x01C0 /* Auxiliary power support mask */
#define PCI_PM_CAP_D1 0x0200 /* D1 power state support */
#define PCI_PM_CAP_D2 0x0400 /* D2 power state support */
#define PCI_PM_CAP_PME 0x0800 /* PME pin supported */
#define PCI_PM_CAP_PME_MASK 0xF800 /* PME Mask of all supported states */
#define PCI_PM_CAP_PME_D0 0x0800 /* PME# from D0 */
#define PCI_PM_CAP_PME_D1 0x1000 /* PME# from D1 */
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
#define PCI_PM_CTRL_PME_STATUS 0x8000 /* PME pin status */
#define PCI_PM_PPB_EXTENSIONS 6 /* PPB support extensions (??) */
#define PCI_PM_PPB_B2_B3 0x40 /* Stop clock when in D3hot (??) */
#define PCI_PM_BPCC_ENABLE 0x80 /* Bus power/clock control enable (??) */
#define PCI_PM_DATA_REGISTER 7 /* (??) */
#define PCI_PM_SIZEOF 8
/* AGP registers */
#define PCI_AGP_VERSION 2 /* BCD version number */
#define PCI_AGP_RFU 3 /* Rest of capability flags */
#define PCI_AGP_STATUS 4 /* Status register */
#define PCI_AGP_STATUS_RQ_MASK 0xff000000 /* Maximum number of requests - 1 */
#define PCI_AGP_STATUS_SBA 0x0200 /* Sideband addressing supported */
#define PCI_AGP_STATUS_64BIT 0x0020 /* 64-bit addressing supported */
#define PCI_AGP_STATUS_FW 0x0010 /* FW transfers supported */
#define PCI_AGP_STATUS_RATE4 0x0004 /* 4x transfer rate supported */
#define PCI_AGP_STATUS_RATE2 0x0002 /* 2x transfer rate supported */
#define PCI_AGP_STATUS_RATE1 0x0001 /* 1x transfer rate supported */
#define PCI_AGP_COMMAND 8 /* Control register */
#define PCI_AGP_COMMAND_RQ_MASK 0xff000000 /* Master: Maximum number of requests */
#define PCI_AGP_COMMAND_SBA 0x0200 /* Sideband addressing enabled */
#define PCI_AGP_COMMAND_AGP 0x0100 /* Allow processing of AGP transactions */
#define PCI_AGP_COMMAND_64BIT 0x0020 /* Allow processing of 64-bit addresses */
#define PCI_AGP_COMMAND_FW 0x0010 /* Force FW transfers */
#define PCI_AGP_COMMAND_RATE4 0x0004 /* Use 4x rate */
#define PCI_AGP_COMMAND_RATE2 0x0002 /* Use 2x rate */
#define PCI_AGP_COMMAND_RATE1 0x0001 /* Use 1x rate */
#define PCI_AGP_SIZEOF 12
/* Vital Product Data */
#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
#define PCI_CAP_VPD_SIZEOF 8
/* Slot Identification */
#define PCI_SID_ESR 2 /* Expansion Slot Register */
#define PCI_SID_ESR_NSLOTS 0x1f /* Number of expansion slots available */
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
/* Message Signalled Interrupts registers */
#define PCI_MSI_FLAGS 2 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
/* MSI-X registers */
#define PCI_MSIX_FLAGS 2 /* Message Control */
#define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */
#define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */
#define PCI_MSIX_FLAGS_ENABLE 0x8000 /* MSI-X enable */
#define PCI_MSIX_TABLE 4 /* Table offset */
#define PCI_MSIX_TABLE_BIR 0x00000007 /* BAR index */
#define PCI_MSIX_TABLE_OFFSET 0xfffffff8 /* Offset into specified BAR */
#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X Table entry format */
#define PCI_MSIX_ENTRY_SIZE 16
#define PCI_MSIX_ENTRY_LOWER_ADDR 0
#define PCI_MSIX_ENTRY_UPPER_ADDR 4
#define PCI_MSIX_ENTRY_DATA 8
#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
/* CompactPCI Hotswap Register */
#define PCI_CHSWP_CSR 2 /* Control and Status Register */
#define PCI_CHSWP_DHA 0x01 /* Device Hiding Arm */
#define PCI_CHSWP_EIM 0x02 /* ENUM# Signal Mask */
#define PCI_CHSWP_PIE 0x04 /* Pending Insert or Extract */
#define PCI_CHSWP_LOO 0x08 /* LED On / Off */
#define PCI_CHSWP_PI 0x30 /* Programming Interface */
#define PCI_CHSWP_EXT 0x40 /* ENUM# status - extraction */
#define PCI_CHSWP_INS 0x80 /* ENUM# status - insertion */
/* PCI Advanced Feature registers */
#define PCI_AF_LENGTH 2
#define PCI_AF_CAP 3
#define PCI_AF_CAP_TP 0x01
#define PCI_AF_CAP_FLR 0x02
#define PCI_AF_CTRL 4
#define PCI_AF_CTRL_FLR 0x01
#define PCI_AF_STATUS 5
#define PCI_AF_STATUS_TP 0x01
#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
/* PCI Enhanced Allocation registers */
#define PCI_EA_NUM_ENT 2 /* Number of Capability Entries */
#define PCI_EA_NUM_ENT_MASK 0x3f /* Num Entries Mask */
#define PCI_EA_FIRST_ENT 4 /* First EA Entry in List */
#define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */
#define PCI_EA_ES 0x00000007 /* Entry Size */
#define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */
/* 0-5 map to BARs 0-5 respectively */
#define PCI_EA_BEI_BAR0 0
#define PCI_EA_BEI_BAR5 5
#define PCI_EA_BEI_BRIDGE 6 /* Resource behind bridge */
#define PCI_EA_BEI_ENI 7 /* Equivalent Not Indicated */
#define PCI_EA_BEI_ROM 8 /* Expansion ROM */
/* 9-14 map to VF BARs 0-5 respectively */
#define PCI_EA_BEI_VF_BAR0 9
#define PCI_EA_BEI_VF_BAR5 14
#define PCI_EA_BEI_RESERVED 15 /* Reserved - Treat like ENI */
#define PCI_EA_PP 0x0000ff00 /* Primary Properties */
#define PCI_EA_SP 0x00ff0000 /* Secondary Properties */
#define PCI_EA_P_MEM 0x00 /* Non-Prefetch Memory */
#define PCI_EA_P_MEM_PREFETCH 0x01 /* Prefetchable Memory */
#define PCI_EA_P_IO 0x02 /* I/O Space */
#define PCI_EA_P_VF_MEM_PREFETCH 0x03 /* VF Prefetchable Memory */
#define PCI_EA_P_VF_MEM 0x04 /* VF Non-Prefetch Memory */
#define PCI_EA_P_BRIDGE_MEM 0x05 /* Bridge Non-Prefetch Memory */
#define PCI_EA_P_BRIDGE_MEM_PREFETCH 0x06 /* Bridge Prefetchable Memory */
#define PCI_EA_P_BRIDGE_IO 0x07 /* Bridge I/O Space */
/* 0x08-0xfc reserved */
#define PCI_EA_P_MEM_RESERVED 0xfd /* Reserved Memory */
#define PCI_EA_P_IO_RESERVED 0xfe /* Reserved I/O Space */
#define PCI_EA_P_UNAVAILABLE 0xff /* Entry Unavailable */
#define PCI_EA_WRITABLE 0x40000000 /* Writable: 1 = RW, 0 = HwInit */
#define PCI_EA_ENABLE 0x80000000 /* Enable for this entry */
#define PCI_EA_BASE 4 /* Base Address Offset */
#define PCI_EA_MAX_OFFSET 8 /* MaxOffset (resource length) */
/* bit 0 is reserved */
#define PCI_EA_IS_64 0x00000002 /* 64-bit field flag */
#define PCI_EA_FIELD_MASK 0xfffffffc /* For Base & Max Offset */
/* PCI-X registers (Type 0 (non-bridge) devices) */
#define PCI_X_CMD 2 /* Modes & Features */
#define PCI_X_CMD_DPERR_E 0x0001 /* Data Parity Error Recovery Enable */
#define PCI_X_CMD_ERO 0x0002 /* Enable Relaxed Ordering */
#define PCI_X_CMD_READ_512 0x0000 /* 512 byte maximum read byte count */
#define PCI_X_CMD_READ_1K 0x0004 /* 1Kbyte maximum read byte count */
#define PCI_X_CMD_READ_2K 0x0008 /* 2Kbyte maximum read byte count */
#define PCI_X_CMD_READ_4K 0x000c /* 4Kbyte maximum read byte count */
#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */
/* Max # of outstanding split transactions */
#define PCI_X_CMD_SPLIT_1 0x0000 /* Max 1 */
#define PCI_X_CMD_SPLIT_2 0x0010 /* Max 2 */
#define PCI_X_CMD_SPLIT_3 0x0020 /* Max 3 */
#define PCI_X_CMD_SPLIT_4 0x0030 /* Max 4 */
#define PCI_X_CMD_SPLIT_8 0x0040 /* Max 8 */
#define PCI_X_CMD_SPLIT_12 0x0050 /* Max 12 */
#define PCI_X_CMD_SPLIT_16 0x0060 /* Max 16 */
#define PCI_X_CMD_SPLIT_32 0x0070 /* Max 32 */
#define PCI_X_CMD_MAX_SPLIT 0x0070 /* Max Outstanding Split Transactions */
#define PCI_X_CMD_VERSION(x) (((x) >> 12) & 3) /* Version */
#define PCI_X_STATUS 4 /* PCI-X capabilities */
#define PCI_X_STATUS_DEVFN 0x000000ff /* A copy of devfn */
#define PCI_X_STATUS_BUS 0x0000ff00 /* A copy of bus nr */
#define PCI_X_STATUS_64BIT 0x00010000 /* 64-bit device */
#define PCI_X_STATUS_133MHZ 0x00020000 /* 133 MHz capable */
#define PCI_X_STATUS_SPL_DISC 0x00040000 /* Split Completion Discarded */
#define PCI_X_STATUS_UNX_SPL 0x00080000 /* Unexpected Split Completion */
#define PCI_X_STATUS_COMPLEX 0x00100000 /* Device Complexity */
#define PCI_X_STATUS_MAX_READ 0x00600000 /* Designed Max Memory Read Count */
#define PCI_X_STATUS_MAX_SPLIT 0x03800000 /* Designed Max Outstanding Split Transactions */
#define PCI_X_STATUS_MAX_CUM 0x1c000000 /* Designed Max Cumulative Read Size */
#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
#define PCI_X_ECC_CSR 8 /* ECC control and status */
#define PCI_CAP_PCIX_SIZEOF_V0 8 /* size of registers for Version 0 */
#define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */
#define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */
/* PCI-X registers (Type 1 (bridge) devices) */
#define PCI_X_BRIDGE_SSTATUS 2 /* Secondary Status */
#define PCI_X_SSTATUS_64BIT 0x0001 /* Secondary AD interface is 64 bits */
#define PCI_X_SSTATUS_133MHZ 0x0002 /* 133 MHz capable */
#define PCI_X_SSTATUS_FREQ 0x03c0 /* Secondary Bus Mode and Frequency */
#define PCI_X_SSTATUS_VERS 0x3000 /* PCI-X Capability Version */
#define PCI_X_SSTATUS_V1 0x1000 /* Mode 2, not Mode 1 */
#define PCI_X_SSTATUS_V2 0x2000 /* Mode 1 or Modes 1 and 2 */
#define PCI_X_SSTATUS_266MHZ 0x4000 /* 266 MHz capable */
#define PCI_X_SSTATUS_533MHZ 0x8000 /* 533 MHz capable */
#define PCI_X_BRIDGE_STATUS 4 /* Bridge Status */
/* PCI Bridge Subsystem ID registers */
#define PCI_SSVID_VENDOR_ID 4 /* PCI Bridge subsystem vendor ID */
#define PCI_SSVID_DEVICE_ID 6 /* PCI Bridge subsystem device ID */
/* PCI Express capability registers */
#define PCI_EXP_FLAGS 2 /* Capabilities register */
#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
#define PCI_EXP_DEVCAP_L0S 0x000001c0 /* L0s Acceptable Latency */
#define PCI_EXP_DEVCAP_L1 0x00000e00 /* L1 Acceptable Latency */
#define PCI_EXP_DEVCAP_ATN_BUT 0x00001000 /* Attention Button Present */
#define PCI_EXP_DEVCAP_ATN_IND 0x00002000 /* Attention Indicator Present */
#define PCI_EXP_DEVCAP_PWR_IND 0x00004000 /* Power Indicator Present */
#define PCI_EXP_DEVCAP_RBER 0x00008000 /* Role-Based Error Reporting */
#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
#define PCI_EXP_DEVCTL 8 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */
#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
#define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
#define PCI_EXP_DEVCTL_READRQ_128B 0x0000 /* 128 Bytes */
#define PCI_EXP_DEVCTL_READRQ_256B 0x1000 /* 256 Bytes */
#define PCI_EXP_DEVCTL_READRQ_512B 0x2000 /* 512 Bytes */
#define PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
#define PCI_EXP_DEVSTA_URD 0x0008 /* Unsupported Request Detected */
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
#define PCI_EXP_LNKCAP_L1EL 0x00038000 /* L1 Exit Latency */
#define PCI_EXP_LNKCAP_CLKPM 0x00040000 /* Clock Power Management */
#define PCI_EXP_LNKCAP_SDERC 0x00080000 /* Surprise Down Error Reporting Capable */
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
#define PCI_EXP_LNKCTL 16 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
#define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */
#define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */
#define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */
#define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */
#define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */
#define PCI_EXP_LNKCTL_CLKREQ_EN 0x0100 /* Enable clkreq */
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
#define PCI_EXP_LNKSTA 18 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */
#define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */
#define PCI_EXP_LNKSTA_NLW_X4 0x0040 /* Current Link Width x4 */
#define PCI_EXP_LNKSTA_NLW_X8 0x0080 /* Current Link Width x8 */
#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
#define PCI_EXP_LNKSTA_SLC 0x1000 /* Slot Clock Configuration */
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints end here */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
#define PCI_EXP_SLTCAP_AIP 0x00000008 /* Attention Indicator Present */
#define PCI_EXP_SLTCAP_PIP 0x00000010 /* Power Indicator Present */
#define PCI_EXP_SLTCAP_HPS 0x00000020 /* Hot-Plug Surprise */
#define PCI_EXP_SLTCAP_HPC 0x00000040 /* Hot-Plug Capable */
#define PCI_EXP_SLTCAP_SPLV 0x00007f80 /* Slot Power Limit Value */
#define PCI_EXP_SLTCAP_SPLS 0x00018000 /* Slot Power Limit Scale */
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
#define PCI_EXP_SLTCTL 24 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
#define PCI_EXP_SLTCTL_PDCE 0x0008 /* Presence Detect Changed Enable */
#define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */
#define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */
#define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */
#define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */
#define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */
#define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */
#define PCI_EXP_SLTCTL_PIC 0x0300 /* Power Indicator Control */
#define PCI_EXP_SLTCTL_PWR_IND_ON 0x0100 /* Power Indicator on */
#define PCI_EXP_SLTCTL_PWR_IND_BLINK 0x0200 /* Power Indicator blinking */
#define PCI_EXP_SLTCTL_PWR_IND_OFF 0x0300 /* Power Indicator off */
#define PCI_EXP_SLTCTL_PCC 0x0400 /* Power Controller Control */
#define PCI_EXP_SLTCTL_PWR_ON 0x0000 /* Power On */
#define PCI_EXP_SLTCTL_PWR_OFF 0x0400 /* Power Off */
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
#define PCI_EXP_SLTSTA 26 /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
#define PCI_EXP_SLTSTA_PDC 0x0008 /* Presence Detect Changed */
#define PCI_EXP_SLTSTA_CC 0x0010 /* Command Completed */
#define PCI_EXP_SLTSTA_MRLSS 0x0020 /* MRL Sensor State */
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
#define PCI_EXP_RTCTL 28 /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
* The Device Capabilities 2, Device Status 2, Device Control 2,
* Link Capabilities 2, Link Status 2, Link Control 2,
* Slot Capabilities 2, Slot Status 2, and Slot Control 2 registers
* are only present on devices with PCIe Capability version 2.
* Use pcie_capability_read_word() and similar interfaces to use them
* safely.
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
#define PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */
#define PCI_EXT_CAP_ID_VC 0x02 /* Virtual Channel Capability */
#define PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */
#define PCI_EXT_CAP_ID_PWR 0x04 /* Power Budgeting */
#define PCI_EXT_CAP_ID_RCLD 0x05 /* Root Complex Link Declaration */
#define PCI_EXT_CAP_ID_RCILC 0x06 /* Root Complex Internal Link Control */
#define PCI_EXT_CAP_ID_RCEC 0x07 /* Root Complex Event Collector */
#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor-Specific */
#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
#define PCI_EXT_CAP_ID_ATS 0x0F /* Address Translation Services */
#define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* Reserved for AMD */
#define PCI_EXT_CAP_ID_REBAR 0x15 /* Resizable BAR */
#define PCI_EXT_CAP_ID_DPA 0x16 /* Dynamic Power Allocation */
#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH Requester */
#define PCI_EXT_CAP_ID_LTR 0x18 /* Latency Tolerance Reporting */
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
#define PCI_ERR_UNC_COMP_ABORT 0x00008000 /* Completer Abort */
#define PCI_ERR_UNC_UNX_COMP 0x00010000 /* Unexpected Completion */
#define PCI_ERR_UNC_RX_OVER 0x00020000 /* Receiver Overflow */
#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
#define PCI_ERR_UNC_ACSV 0x00200000 /* ACS Violation */
#define PCI_ERR_UNC_INTN 0x00400000 /* internal error */
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
/* Same bits as above */
#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
/* Same bits as above */
#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
/* Same bits as above */
#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
/* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001
/* Non-fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002
/* Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004
#define PCI_ERR_ROOT_STATUS 48
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
/* Multi ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002
/* ERR_FATAL/NONFATAL Received */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004
/* Multi ERR_FATAL/NONFATAL Received */
#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
/* Virtual Channel */
#define PCI_VC_PORT_CAP1 4
#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
#define PCI_VC_PORT_CAP2 8
#define PCI_VC_CAP2_32_PHASE 0x00000002
#define PCI_VC_CAP2_64_PHASE 0x00000004
#define PCI_VC_CAP2_128_PHASE 0x00000008
#define PCI_VC_CAP2_ARB_OFF 0xff000000
#define PCI_VC_PORT_CTRL 12
#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
#define PCI_VC_PORT_STATUS 14
#define PCI_VC_PORT_STATUS_TABLE 0x00000001
#define PCI_VC_RES_CAP 16
#define PCI_VC_RES_CAP_32_PHASE 0x00000002
#define PCI_VC_RES_CAP_64_PHASE 0x00000004
#define PCI_VC_RES_CAP_128_PHASE 0x00000008
#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
#define PCI_VC_RES_CAP_256_PHASE 0x00000020
#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
#define PCI_VC_RES_CTRL 20
#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
#define PCI_VC_RES_CTRL_ID 0x07000000
#define PCI_VC_RES_CTRL_ENABLE 0x80000000
#define PCI_VC_RES_STATUS 26
#define PCI_VC_RES_STATUS_TABLE 0x00000001
#define PCI_VC_RES_STATUS_NEGO 0x00000002
#define PCI_CAP_VC_BASE_SIZEOF 0x10
#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
/* Power Budgeting */
#define PCI_PWR_DSR 4 /* Data Select Register */
#define PCI_PWR_DATA 8 /* Data Register */
#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
#define PCI_PWR_CAP 12 /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
#define PCI_EXT_CAP_PWR_SIZEOF 16
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
#define PCI_VNDR_HEADER_ID(x) ((x) & 0xffff)
#define PCI_VNDR_HEADER_REV(x) (((x) >> 16) & 0xf)
#define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff)
/*
* HyperTransport sub capability types
*
* Unfortunately there are both 3 bit and 5 bit capability types defined
* in the HT spec, catering for that is a little messy. You probably don't
* want to use these directly, just use pci_find_ht_capability() and it
* will do the right thing for you.
*/
#define HT_3BIT_CAP_MASK 0xE0
#define HT_CAPTYPE_SLAVE 0x00 /* Slave/Primary link configuration */
#define HT_CAPTYPE_HOST 0x20 /* Host/Secondary link configuration */
#define HT_5BIT_CAP_MASK 0xF8
#define HT_CAPTYPE_IRQ 0x80 /* IRQ Configuration */
#define HT_CAPTYPE_REMAPPING_40 0xA0 /* 40 bit address remapping */
#define HT_CAPTYPE_REMAPPING_64 0xA2 /* 64 bit address remapping */
#define HT_CAPTYPE_UNITID_CLUMP 0x90 /* Unit ID clumping */
#define HT_CAPTYPE_EXTCONF 0x98 /* Extended Configuration Space Access */
#define HT_CAPTYPE_MSI_MAPPING 0xA8 /* MSI Mapping Capability */
#define HT_MSI_FLAGS 0x02 /* Offset to flags */
#define HT_MSI_FLAGS_ENABLE 0x1 /* Mapping enable */
#define HT_MSI_FLAGS_FIXED 0x2 /* Fixed mapping only */
#define HT_MSI_FIXED_ADDR 0x00000000FEE00000ULL /* Fixed addr */
#define HT_MSI_ADDR_LO 0x04 /* Offset to low addr bits */
#define HT_MSI_ADDR_LO_MASK 0xFFF00000 /* Low address bit mask */
#define HT_MSI_ADDR_HI 0x08 /* Offset to high addr bits */
#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */
#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */
#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 HyperTransport configuration */
#define HT_CAPTYPE_PM 0xE0 /* HyperTransport power management configuration */
#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
/* Alternative Routing-ID Interpretation */
#define PCI_ARI_CAP 0x04 /* ARI Capability Register */
#define PCI_ARI_CAP_MFVC 0x0001 /* MFVC Function Groups Capability */
#define PCI_ARI_CAP_ACS 0x0002 /* ACS Function Groups Capability */
#define PCI_ARI_CAP_NFN(x) (((x) >> 8) & 0xff) /* Next Function Number */
#define PCI_ARI_CTRL 0x06 /* ARI Control Register */
#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
#define PCI_EXT_CAP_ARI_SIZEOF 8
/* Address Translation Service */
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
#define PCI_EXT_CAP_ATS_SIZEOF 8
/* Page Request Interface */
#define PCI_PRI_CTRL 0x04 /* PRI control register */
#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */
#define PCI_PRI_CTRL_RESET 0x02 /* Reset */
#define PCI_PRI_STATUS 0x06 /* PRI status register */
#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
/* Process Address Space ID */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */
#define PCI_PASID_CTRL 0x06 /* PASID control register */
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */
#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */
#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */
#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */
#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */
#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */
#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */
#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */
#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/
#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */
#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */
#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
#define PCI_EXT_CAP_SRIOV_SIZEOF 64
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
#define PCI_ACS_SV 0x01 /* Source Validation */
#define PCI_ACS_TB 0x02 /* Translation Blocking */
#define PCI_ACS_RR 0x04 /* P2P Request Redirect */
#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */
#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
#define PCI_ACS_EC 0x20 /* P2P Egress Control */
#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
#define PCI_VSEC_HDR 4 /* extended cap - vendor-specific */
#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
/* SATA capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
#define PCI_SATA_SIZEOF_SHORT 8
#define PCI_SATA_SIZEOF_LONG 16
/* Resizable BARs */
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
/* Dynamic Power Allocation */
#define PCI_DPA_CAP 4 /* capability register */
#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
/* TPH Requester */
#define PCI_TPH_CAP 4 /* capability register */
#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
#endif /* LINUX_PCI_REGS_H */

View file

@ -0,0 +1,108 @@
/*
* ARM Power State and Coordination Interface (PSCI) header
*
* This header holds common PSCI defines and macros shared
* by: ARM kernel, ARM64 kernel, KVM ARM/ARM64 and user space.
*
* Copyright (C) 2014 Linaro Ltd.
* Author: Anup Patel <anup.patel@linaro.org>
*/
#ifndef _UAPI_LINUX_PSCI_H
#define _UAPI_LINUX_PSCI_H
/*
* PSCI v0.1 interface
*
* The PSCI v0.1 function numbers are implementation defined.
*
* Only PSCI return values such as: SUCCESS, NOT_SUPPORTED,
* INVALID_PARAMS, and DENIED defined below are applicable
* to PSCI v0.1.
*/
/* PSCI v0.2 interface */
#define PSCI_0_2_FN_BASE 0x84000000
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
#define PSCI_0_2_64BIT 0x40000000
#define PSCI_0_2_FN64_BASE \
(PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3)
#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4)
#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5)
#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6)
#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7)
#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8)
#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9)
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
#define PSCI_0_2_POWER_STATE_ID_SHIFT 0
#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16
#define PSCI_0_2_POWER_STATE_TYPE_MASK \
(0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24
#define PSCI_0_2_POWER_STATE_AFFL_MASK \
(0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
/* PSCI extended power state encoding for CPU_SUSPEND function */
#define PSCI_1_0_EXT_POWER_STATE_ID_MASK 0xfffffff
#define PSCI_1_0_EXT_POWER_STATE_ID_SHIFT 0
#define PSCI_1_0_EXT_POWER_STATE_TYPE_SHIFT 30
#define PSCI_1_0_EXT_POWER_STATE_TYPE_MASK \
(0x1 << PSCI_1_0_EXT_POWER_STATE_TYPE_SHIFT)
/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */
#define PSCI_0_2_AFFINITY_LEVEL_ON 0
#define PSCI_0_2_AFFINITY_LEVEL_OFF 1
#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2
/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */
#define PSCI_0_2_TOS_UP_MIGRATE 0
#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
#define PSCI_0_2_TOS_MP 2
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK \
((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
#define PSCI_VERSION_MAJOR(ver) \
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) \
((ver) & PSCI_VERSION_MINOR_MASK)
/* PSCI features decoding (>=1.0) */
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK \
(0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT)
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
#define PSCI_RET_INVALID_PARAMS -2
#define PSCI_RET_DENIED -3
#define PSCI_RET_ALREADY_ON -4
#define PSCI_RET_ON_PENDING -5
#define PSCI_RET_INTERNAL_FAILURE -6
#define PSCI_RET_NOT_PRESENT -7
#define PSCI_RET_DISABLED -8
#define PSCI_RET_INVALID_ADDRESS -9
#endif /* _UAPI_LINUX_PSCI_H */

View file

@ -0,0 +1,73 @@
/*
* Edu PCI device.
*
* Copyright (C) 2016 Red Hat, Inc.
*
* Authors:
* Peter Xu <peterx@redhat.com>,
*
* This work is licensed under the terms of the GNU LGPL, version 2 or
* later.
*/
#include "pci-edu.h"
#include "asm/barrier.h"
/* Return true if alive */
static inline bool edu_check_alive(struct pci_edu_dev *dev)
{
static uint32_t live_count = 1;
uint32_t value;
edu_reg_writel(dev, EDU_REG_ALIVE, live_count++);
value = edu_reg_readl(dev, EDU_REG_ALIVE);
return (live_count - 1 == ~value);
}
bool edu_init(struct pci_edu_dev *dev)
{
pcidevaddr_t dev_addr;
dev_addr = pci_find_dev(PCI_VENDOR_ID_QEMU, PCI_DEVICE_ID_EDU);
if (dev_addr == PCIDEVADDR_INVALID)
return false;
pci_dev_init(&dev->pci_dev, dev_addr);
pci_enable_defaults(&dev->pci_dev);
dev->reg_base = ioremap(dev->pci_dev.resource[EDU_BAR], PAGE_SIZE);
assert(edu_check_alive(dev));
return true;
}
void edu_dma(struct pci_edu_dev *dev, iova_t iova,
size_t size, unsigned int dev_offset, bool from_device)
{
uint64_t from, to;
uint32_t cmd = EDU_CMD_DMA_START;
assert(size <= EDU_DMA_SIZE_MAX);
assert(dev_offset < EDU_DMA_SIZE_MAX);
printf("edu device DMA start %s addr 0x%" PRIx64 " size 0x%lu off 0x%x\n",
from_device ? "FROM" : "TO",
iova, (ulong)size, dev_offset);
if (from_device) {
from = dev_offset + EDU_DMA_START;
to = iova;
cmd |= EDU_CMD_DMA_FROM;
} else {
from = iova;
to = EDU_DMA_START + dev_offset;
cmd |= EDU_CMD_DMA_TO;
}
edu_reg_writeq(dev, EDU_REG_DMA_SRC, from);
edu_reg_writeq(dev, EDU_REG_DMA_DST, to);
edu_reg_writeq(dev, EDU_REG_DMA_COUNT, size);
edu_reg_writel(dev, EDU_REG_DMA_CMD, cmd);
/* Wait until DMA finished */
while (edu_reg_readl(dev, EDU_REG_DMA_CMD) & EDU_CMD_DMA_START)
cpu_relax();
}

View file

@ -0,0 +1,86 @@
/*
* Edu PCI device header.
*
* Copyright (C) 2016 Red Hat, Inc.
*
* Authors:
* Peter Xu <peterx@redhat.com>,
*
* This work is licensed under the terms of the GNU LGPL, version 2 or
* later.
*
* Edu device is a virtualized device in QEMU. Please refer to
* docs/specs/edu.txt in QEMU repository for EDU device manual.
*/
#ifndef __PCI_EDU_H__
#define __PCI_EDU_H__
#include "pci.h"
#include "asm/io.h"
#define PCI_VENDOR_ID_QEMU 0x1234
#define PCI_DEVICE_ID_EDU 0x11e8
/* The only bar used by EDU device */
#define EDU_BAR 0
#define EDU_MAGIC 0xed
#define EDU_VERSION 0x100
#define EDU_DMA_BUF_SIZE (1 << 20)
#define EDU_INPUT_BUF_SIZE 256
#define EDU_REG_ID 0x0
#define EDU_REG_ALIVE 0x4
#define EDU_REG_FACTORIAL 0x8
#define EDU_REG_STATUS 0x20
#define EDU_REG_INTR_STATUS 0x24
#define EDU_REG_INTR_RAISE 0x60
#define EDU_REG_INTR_ACK 0x64
#define EDU_REG_DMA_SRC 0x80
#define EDU_REG_DMA_DST 0x88
#define EDU_REG_DMA_COUNT 0x90
#define EDU_REG_DMA_CMD 0x98
#define EDU_CMD_DMA_START 0x01
#define EDU_CMD_DMA_FROM 0x02
#define EDU_CMD_DMA_TO 0x00
#define EDU_STATUS_FACTORIAL 0x1
#define EDU_STATUS_INT_ENABLE 0x80
#define EDU_DMA_START 0x40000
#define EDU_DMA_SIZE_MAX 4096
struct pci_edu_dev {
struct pci_dev pci_dev;
volatile void *reg_base;
};
#define edu_reg(d, r) (volatile void *)((d)->reg_base + (r))
static inline uint64_t edu_reg_readq(struct pci_edu_dev *dev, int reg)
{
return __raw_readq(edu_reg(dev, reg));
}
static inline uint32_t edu_reg_readl(struct pci_edu_dev *dev, int reg)
{
return __raw_readl(edu_reg(dev, reg));
}
static inline void edu_reg_writeq(struct pci_edu_dev *dev, int reg,
uint64_t val)
{
__raw_writeq(val, edu_reg(dev, reg));
}
static inline void edu_reg_writel(struct pci_edu_dev *dev, int reg,
uint32_t val)
{
__raw_writel(val, edu_reg(dev, reg));
}
bool edu_init(struct pci_edu_dev *dev);
void edu_dma(struct pci_edu_dev *dev, iova_t iova,
size_t size, unsigned int dev_offset, bool from_device);
#endif

View file

@ -0,0 +1,320 @@
/*
* Generic PCI host controller as described in PCI Bus Binding to Open Firmware
*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "devicetree.h"
#include "alloc.h"
#include "pci.h"
#include "asm/pci.h"
#include "asm/io.h"
#include "pci-host-generic.h"
#include <linux/pci_regs.h>
static struct pci_host_bridge *pci_host_bridge;
static int of_flags_to_pci_type(u32 of_flags)
{
static int type_map[] = {
[1] = PCI_BASE_ADDRESS_SPACE_IO,
[2] = PCI_BASE_ADDRESS_MEM_TYPE_32,
[3] = PCI_BASE_ADDRESS_MEM_TYPE_64
};
int idx = (of_flags >> 24) & 0x03;
int res;
assert(idx > 0);
res = type_map[idx];
if (of_flags & 0x40000000)
res |= PCI_BASE_ADDRESS_MEM_PREFETCH;
return res;
}
static int pci_bar_type(u32 bar)
{
if (bar & PCI_BASE_ADDRESS_SPACE)
return PCI_BASE_ADDRESS_SPACE_IO;
else
return bar & (PCI_BASE_ADDRESS_MEM_TYPE_MASK |
PCI_BASE_ADDRESS_MEM_PREFETCH);
}
/*
* Probe DT for a generic PCI host controller
* See kernel Documentation/devicetree/bindings/pci/host-generic-pci.txt
* and function gen_pci_probe() in drivers/pci/host/pci-host-generic.c
*/
static struct pci_host_bridge *pci_dt_probe(void)
{
struct pci_host_bridge *host;
const void *fdt = dt_fdt();
const struct fdt_property *prop;
struct dt_pbus_reg base;
struct dt_device dt_dev;
struct dt_bus dt_bus;
struct pci_addr_space *as;
fdt32_t *data;
u32 bus, bus_max;
u32 nac, nsc, nac_root, nsc_root;
int nr_range_cells, nr_addr_spaces;
int ret, node, len, i;
if (!dt_available()) {
printf("No device tree found\n");
return NULL;
}
dt_bus_init_defaults(&dt_bus);
dt_device_init(&dt_dev, &dt_bus, NULL);
node = fdt_path_offset(fdt, "/");
assert(node >= 0);
ret = dt_get_nr_cells(node, &nac_root, &nsc_root);
assert(ret == 0);
assert(nac_root == 1 || nac_root == 2);
node = fdt_node_offset_by_compatible(fdt, node,
"pci-host-ecam-generic");
if (node == -FDT_ERR_NOTFOUND) {
printf("No PCIe ECAM compatible controller found\n");
return NULL;
}
assert(node >= 0);
prop = fdt_get_property(fdt, node, "device_type", &len);
assert(prop && len == 4 && !strcmp((char *)prop->data, "pci"));
dt_device_bind_node(&dt_dev, node);
ret = dt_pbus_get_base(&dt_dev, &base);
assert(ret == 0);
prop = fdt_get_property(fdt, node, "bus-range", &len);
if (prop == NULL) {
assert(len == -FDT_ERR_NOTFOUND);
bus = 0x00;
bus_max = 0xff;
} else {
data = (fdt32_t *)prop->data;
bus = fdt32_to_cpu(data[0]);
bus_max = fdt32_to_cpu(data[1]);
assert(bus <= bus_max);
}
assert(bus_max < base.size / (1 << PCI_ECAM_BUS_SHIFT));
ret = dt_get_nr_cells(node, &nac, &nsc);
assert(ret == 0);
assert(nac == 3 && nsc == 2);
prop = fdt_get_property(fdt, node, "ranges", &len);
assert(prop != NULL);
nr_range_cells = nac + nsc + nac_root;
nr_addr_spaces = (len / 4) / nr_range_cells;
assert(nr_addr_spaces);
host = malloc(sizeof(*host) +
sizeof(host->addr_space[0]) * nr_addr_spaces);
assert(host != NULL);
host->start = base.addr;
host->size = base.size;
host->bus = bus;
host->bus_max = bus_max;
host->nr_addr_spaces = nr_addr_spaces;
data = (fdt32_t *)prop->data;
as = &host->addr_space[0];
for (i = 0; i < nr_addr_spaces; i++) {
/*
* The PCI binding encodes the PCI address with three
* cells as follows:
*
* phys.hi cell: npt000ss bbbbbbbb dddddfff rrrrrrrr
* phys.mid cell: hhhhhhhh hhhhhhhh hhhhhhhh hhhhhhhh
* phys.lo cell: llllllll llllllll llllllll llllllll
*
* PCI device bus address and flags are encoded into phys.high
* PCI 64 bit address is encoded into phys.mid and phys.low
*/
as->type = of_flags_to_pci_type(fdt32_to_cpu(data[0]));
as->pci_start = ((u64)fdt32_to_cpu(data[1]) << 32) |
fdt32_to_cpu(data[2]);
if (nr_range_cells == 6) {
as->start = fdt32_to_cpu(data[3]);
as->size = ((u64)fdt32_to_cpu(data[4]) << 32) |
fdt32_to_cpu(data[5]);
} else {
as->start = ((u64)fdt32_to_cpu(data[3]) << 32) |
fdt32_to_cpu(data[4]);
as->size = ((u64)fdt32_to_cpu(data[5]) << 32) |
fdt32_to_cpu(data[6]);
}
data += nr_range_cells;
as++;
}
return host;
}
static bool pci_alloc_resource(struct pci_dev *dev, int bar_num, u64 *addr)
{
struct pci_host_bridge *host = pci_host_bridge;
struct pci_addr_space *as = &host->addr_space[0];
u32 bar;
u64 size, pci_addr;
int type, i;
*addr = INVALID_PHYS_ADDR;
size = pci_bar_size(dev, bar_num);
if (!size)
return false;
bar = pci_bar_get(dev, bar_num);
type = pci_bar_type(bar);
if (type & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
type &= ~PCI_BASE_ADDRESS_MEM_PREFETCH;
for (i = 0; i < host->nr_addr_spaces; i++) {
if (as->type == type)
break;
as++;
}
if (i >= host->nr_addr_spaces) {
printf("%s: warning: can't satisfy request for ", __func__);
pci_dev_print_id(dev);
printf(" ");
pci_bar_print(dev, bar_num);
printf("\n");
return false;
}
pci_addr = ALIGN(as->pci_start + as->allocated, size);
size += pci_addr - (as->pci_start + as->allocated);
assert(as->allocated + size <= as->size);
*addr = pci_addr;
as->allocated += size;
return true;
}
bool pci_probe(void)
{
struct pci_dev pci_dev;
pcidevaddr_t dev;
u8 header;
u32 cmd;
int i;
assert(!pci_host_bridge);
pci_host_bridge = pci_dt_probe();
if (!pci_host_bridge)
return false;
for (dev = 0; dev < PCI_DEVFN_MAX; dev++) {
if (!pci_dev_exists(dev))
continue;
pci_dev_init(&pci_dev, dev);
/* We are only interested in normal PCI devices */
header = pci_config_readb(dev, PCI_HEADER_TYPE);
if ((header & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_NORMAL)
continue;
cmd = PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
for (i = 0; i < PCI_BAR_NUM; i++) {
u64 addr;
if (pci_alloc_resource(&pci_dev, i, &addr)) {
pci_bar_set_addr(&pci_dev, i, addr);
if (pci_bar_is_memory(&pci_dev, i))
cmd |= PCI_COMMAND_MEMORY;
else
cmd |= PCI_COMMAND_IO;
}
if (pci_bar_is64(&pci_dev, i))
i++;
}
pci_config_writew(dev, PCI_COMMAND, cmd);
}
return true;
}
/*
* This function is to be called from pci_translate_addr() to provide
* mapping between this host bridge's PCI busses address and CPU physical
* address.
*/
phys_addr_t pci_host_bridge_get_paddr(u64 pci_addr)
{
struct pci_host_bridge *host = pci_host_bridge;
struct pci_addr_space *as = &host->addr_space[0];
int i;
for (i = 0; i < host->nr_addr_spaces; i++) {
if (pci_addr >= as->pci_start &&
pci_addr < as->pci_start + as->size)
return as->start + (pci_addr - as->pci_start);
as++;
}
return INVALID_PHYS_ADDR;
}
static void __iomem *pci_get_dev_conf(struct pci_host_bridge *host, int devfn)
{
return (void __iomem *)(unsigned long)
host->start + (devfn << PCI_ECAM_DEVFN_SHIFT);
}
u8 pci_config_readb(pcidevaddr_t dev, u8 off)
{
void __iomem *conf = pci_get_dev_conf(pci_host_bridge, dev);
return readb(conf + off);
}
u16 pci_config_readw(pcidevaddr_t dev, u8 off)
{
void __iomem *conf = pci_get_dev_conf(pci_host_bridge, dev);
return readw(conf + off);
}
u32 pci_config_readl(pcidevaddr_t dev, u8 off)
{
void __iomem *conf = pci_get_dev_conf(pci_host_bridge, dev);
return readl(conf + off);
}
void pci_config_writeb(pcidevaddr_t dev, u8 off, u8 val)
{
void __iomem *conf = pci_get_dev_conf(pci_host_bridge, dev);
writeb(val, conf + off);
}
void pci_config_writew(pcidevaddr_t dev, u8 off, u16 val)
{
void __iomem *conf = pci_get_dev_conf(pci_host_bridge, dev);
writew(val, conf + off);
}
void pci_config_writel(pcidevaddr_t dev, u8 off, u32 val)
{
void __iomem *conf = pci_get_dev_conf(pci_host_bridge, dev);
writel(val, conf + off);
}

View file

@ -0,0 +1,46 @@
#ifndef PCI_HOST_GENERIC_H
#define PCI_HOST_GENERIC_H
/*
* PCI host bridge supporting structures and constants
*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
struct pci_addr_space {
phys_addr_t pci_start;
phys_addr_t start;
phys_addr_t size;
phys_addr_t allocated;
int type;
};
struct pci_host_bridge {
phys_addr_t start;
phys_addr_t size;
int bus;
int bus_max;
int nr_addr_spaces;
struct pci_addr_space addr_space[];
};
/*
* The following constants are derived from Linux, see this source:
*
* drivers/pci/host/pci-host-generic.c
* struct gen_pci_cfg_bus_ops::bus_shift
* int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
*
* Documentation/devicetree/bindings/pci/host-generic-pci.txt describes
* ECAM Configuration Space is be memory-mapped by concatenating the various
* components to form an offset:
*
* cfg_offset(bus, device, function, register) =
* bus << 20 | device << 15 | function << 12 | register
*/
#define PCI_ECAM_BUS_SHIFT 20
#define PCI_ECAM_DEVFN_SHIFT 12
#endif

View file

@ -0,0 +1,194 @@
/*
* QEMU "pci-testdev" PCI test device
*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "pci.h"
#include "asm/io.h"
struct pci_testdev_ops {
u8 (*io_readb)(const volatile void *addr);
u16 (*io_readw)(const volatile void *addr);
u32 (*io_readl)(const volatile void *addr);
void (*io_writeb)(u8 value, volatile void *addr);
void (*io_writew)(u16 value, volatile void *addr);
void (*io_writel)(u32 value, volatile void *addr);
};
static u8 pio_readb(const volatile void *addr)
{
return inb((unsigned long)addr);
}
static u16 pio_readw(const volatile void *addr)
{
return inw((unsigned long)addr);
}
static u32 pio_readl(const volatile void *addr)
{
return inl((unsigned long)addr);
}
static void pio_writeb(u8 value, volatile void *addr)
{
outb(value, (unsigned long)addr);
}
static void pio_writew(u16 value, volatile void *addr)
{
outw(value, (unsigned long)addr);
}
static void pio_writel(u32 value, volatile void *addr)
{
outl(value, (unsigned long)addr);
}
static struct pci_testdev_ops pci_testdev_io_ops = {
.io_readb = pio_readb,
.io_readw = pio_readw,
.io_readl = pio_readl,
.io_writeb = pio_writeb,
.io_writew = pio_writew,
.io_writel = pio_writel
};
static u8 mmio_readb(const volatile void *addr)
{
return *(const volatile u8 __force *)addr;
}
static u16 mmio_readw(const volatile void *addr)
{
return *(const volatile u16 __force *)addr;
}
static u32 mmio_readl(const volatile void *addr)
{
return *(const volatile u32 __force *)addr;
}
static void mmio_writeb(u8 value, volatile void *addr)
{
*(volatile u8 __force *)addr = value;
}
static void mmio_writew(u16 value, volatile void *addr)
{
*(volatile u16 __force *)addr = value;
}
static void mmio_writel(u32 value, volatile void *addr)
{
*(volatile u32 __force *)addr = value;
}
static struct pci_testdev_ops pci_testdev_mem_ops = {
.io_readb = mmio_readb,
.io_readw = mmio_readw,
.io_readl = mmio_readl,
.io_writeb = mmio_writeb,
.io_writew = mmio_writew,
.io_writel = mmio_writel
};
static bool pci_testdev_one(struct pci_test_dev_hdr *test,
int test_nr,
struct pci_testdev_ops *ops)
{
u8 width;
u32 count, sig, off;
const int nr_writes = 16;
int i;
ops->io_writeb(test_nr, &test->test);
count = ops->io_readl(&test->count);
if (count != 0)
return false;
width = ops->io_readb(&test->width);
if (width != 1 && width != 2 && width != 4)
return false;
sig = ops->io_readl(&test->data);
off = ops->io_readl(&test->offset);
for (i = 0; i < nr_writes; i++) {
switch (width) {
case 1: ops->io_writeb(sig, (void *)test + off); break;
case 2: ops->io_writew(sig, (void *)test + off); break;
case 4: ops->io_writel(sig, (void *)test + off); break;
}
}
count = ops->io_readl(&test->count);
if (!count)
return true;
return (int)count == nr_writes;
}
void pci_testdev_print(struct pci_test_dev_hdr *test,
struct pci_testdev_ops *ops)
{
bool io = (ops == &pci_testdev_io_ops);
int i;
printf("pci-testdev %3s: ", io ? "io" : "mem");
for (i = 0;; ++i) {
char c = ops->io_readb(&test->name[i]);
if (!c)
break;
printf("%c", c);
}
printf("\n");
}
static int pci_testdev_all(struct pci_test_dev_hdr *test,
struct pci_testdev_ops *ops)
{
int i;
for (i = 0;; i++) {
if (!pci_testdev_one(test, i, ops))
break;
pci_testdev_print(test, ops);
}
return i;
}
int pci_testdev(void)
{
struct pci_dev pci_dev;
pcidevaddr_t dev;
phys_addr_t addr;
void __iomem *mem, *io;
int nr_tests = 0;
bool ret;
dev = pci_find_dev(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_TEST);
if (dev == PCIDEVADDR_INVALID) {
printf("'pci-testdev' device is not found, "
"check QEMU '-device pci-testdev' parameter\n");
return -1;
}
pci_dev_init(&pci_dev, dev);
ret = pci_bar_is_valid(&pci_dev, 0) && pci_bar_is_valid(&pci_dev, 1);
assert(ret);
addr = pci_bar_get_addr(&pci_dev, 0);
mem = ioremap(addr, PAGE_SIZE);
addr = pci_bar_get_addr(&pci_dev, 1);
io = (void *)(unsigned long)addr;
nr_tests += pci_testdev_all(mem, &pci_testdev_mem_ops);
nr_tests += pci_testdev_all(io, &pci_testdev_io_ops);
return nr_tests;
}

View file

@ -0,0 +1,374 @@
/*
* Copyright (C) 2013, Red Hat Inc, Michael S. Tsirkin <mst@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include <linux/pci_regs.h>
#include "pci.h"
#include "asm/pci.h"
void pci_cap_walk(struct pci_dev *dev, pci_cap_handler_t handler)
{
uint8_t cap_offset;
uint8_t cap_id;
int count = 0;
cap_offset = pci_config_readb(dev->bdf, PCI_CAPABILITY_LIST);
while (cap_offset) {
cap_id = pci_config_readb(dev->bdf, cap_offset);
assert(cap_id < PCI_CAP_ID_MAX + 1);
handler(dev, cap_offset, cap_id);
cap_offset = pci_config_readb(dev->bdf, cap_offset + 1);
/* Avoid dead loop during cap walk */
assert(++count <= 255);
}
}
void pci_msi_set_enable(struct pci_dev *dev, bool enabled)
{
uint16_t msi_control;
uint16_t offset;
offset = dev->msi_offset;
msi_control = pci_config_readw(dev->bdf, offset + PCI_MSI_FLAGS);
if (enabled)
msi_control |= PCI_MSI_FLAGS_ENABLE;
else
msi_control &= ~PCI_MSI_FLAGS_ENABLE;
pci_config_writew(dev->bdf, offset + PCI_MSI_FLAGS, msi_control);
}
bool pci_setup_msi(struct pci_dev *dev, uint64_t msi_addr, uint32_t msi_data)
{
uint16_t msi_control;
uint16_t offset;
pcidevaddr_t addr;
assert(dev);
if (!dev->msi_offset) {
printf("MSI: dev 0x%x does not support MSI.\n", dev->bdf);
return false;
}
addr = dev->bdf;
offset = dev->msi_offset;
msi_control = pci_config_readw(addr, offset + PCI_MSI_FLAGS);
pci_config_writel(addr, offset + PCI_MSI_ADDRESS_LO,
msi_addr & 0xffffffff);
if (msi_control & PCI_MSI_FLAGS_64BIT) {
pci_config_writel(addr, offset + PCI_MSI_ADDRESS_HI,
(uint32_t)(msi_addr >> 32));
pci_config_writel(addr, offset + PCI_MSI_DATA_64, msi_data);
} else {
pci_config_writel(addr, offset + PCI_MSI_DATA_32, msi_data);
}
pci_msi_set_enable(dev, true);
return true;
}
void pci_cmd_set_clr(struct pci_dev *dev, uint16_t set, uint16_t clr)
{
uint16_t val = pci_config_readw(dev->bdf, PCI_COMMAND);
/* No overlap is allowed */
assert((set & clr) == 0);
val |= set;
val &= ~clr;
pci_config_writew(dev->bdf, PCI_COMMAND, val);
}
bool pci_dev_exists(pcidevaddr_t dev)
{
return (pci_config_readw(dev, PCI_VENDOR_ID) != 0xffff &&
pci_config_readw(dev, PCI_DEVICE_ID) != 0xffff);
}
/* Scan bus look for a specific device. Only bus 0 scanned for now. */
pcidevaddr_t pci_find_dev(uint16_t vendor_id, uint16_t device_id)
{
pcidevaddr_t dev;
for (dev = 0; dev < PCI_DEVFN_MAX; ++dev) {
if (pci_config_readw(dev, PCI_VENDOR_ID) == vendor_id &&
pci_config_readw(dev, PCI_DEVICE_ID) == device_id)
return dev;
}
return PCIDEVADDR_INVALID;
}
uint32_t pci_bar_mask(uint32_t bar)
{
return (bar & PCI_BASE_ADDRESS_SPACE_IO) ?
PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
}
uint32_t pci_bar_get(struct pci_dev *dev, int bar_num)
{
return pci_config_readl(dev->bdf, PCI_BASE_ADDRESS_0 +
bar_num * 4);
}
static phys_addr_t __pci_bar_get_addr(struct pci_dev *dev, int bar_num)
{
uint32_t bar = pci_bar_get(dev, bar_num);
uint32_t mask = pci_bar_mask(bar);
uint64_t addr = bar & mask;
phys_addr_t phys_addr;
if (pci_bar_is64(dev, bar_num))
addr |= (uint64_t)pci_bar_get(dev, bar_num + 1) << 32;
phys_addr = pci_translate_addr(dev->bdf, addr);
assert(phys_addr != INVALID_PHYS_ADDR);
return phys_addr;
}
phys_addr_t pci_bar_get_addr(struct pci_dev *dev, int bar_num)
{
return dev->resource[bar_num];
}
void pci_bar_set_addr(struct pci_dev *dev, int bar_num, phys_addr_t addr)
{
int off = PCI_BASE_ADDRESS_0 + bar_num * 4;
pci_config_writel(dev->bdf, off, (uint32_t)addr);
dev->resource[bar_num] = addr;
if (pci_bar_is64(dev, bar_num)) {
assert(bar_num + 1 < PCI_BAR_NUM);
pci_config_writel(dev->bdf, off + 4, (uint32_t)(addr >> 32));
dev->resource[bar_num + 1] = dev->resource[bar_num];
}
}
/*
* To determine the amount of address space needed by a PCI device,
* one must save the original value of the BAR, write a value of
* all 1's to the register, and then read it back. The amount of
* memory can be then determined by masking the information bits,
* performing a bitwise NOT, and incrementing the value by 1.
*
* The following pci_bar_size_helper() and pci_bar_size() functions
* implement the algorithm.
*/
static uint32_t pci_bar_size_helper(struct pci_dev *dev, int bar_num)
{
int off = PCI_BASE_ADDRESS_0 + bar_num * 4;
uint16_t bdf = dev->bdf;
uint32_t bar, val;
bar = pci_config_readl(bdf, off);
pci_config_writel(bdf, off, ~0u);
val = pci_config_readl(bdf, off);
pci_config_writel(bdf, off, bar);
return val;
}
phys_addr_t pci_bar_size(struct pci_dev *dev, int bar_num)
{
uint32_t bar, size;
size = pci_bar_size_helper(dev, bar_num);
if (!size)
return 0;
bar = pci_bar_get(dev, bar_num);
size &= pci_bar_mask(bar);
if (pci_bar_is64(dev, bar_num)) {
phys_addr_t size64 = pci_bar_size_helper(dev, bar_num + 1);
size64 = (size64 << 32) | size;
return ~size64 + 1;
} else {
return ~size + 1;
}
}
bool pci_bar_is_memory(struct pci_dev *dev, int bar_num)
{
uint32_t bar = pci_bar_get(dev, bar_num);
return !(bar & PCI_BASE_ADDRESS_SPACE_IO);
}
bool pci_bar_is_valid(struct pci_dev *dev, int bar_num)
{
return dev->resource[bar_num] != INVALID_PHYS_ADDR;
}
bool pci_bar_is64(struct pci_dev *dev, int bar_num)
{
uint32_t bar = pci_bar_get(dev, bar_num);
if (bar & PCI_BASE_ADDRESS_SPACE_IO)
return false;
return (bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
PCI_BASE_ADDRESS_MEM_TYPE_64;
}
void pci_bar_print(struct pci_dev *dev, int bar_num)
{
phys_addr_t size, start, end;
uint32_t bar;
if (!pci_bar_is_valid(dev, bar_num))
return;
bar = pci_bar_get(dev, bar_num);
size = pci_bar_size(dev, bar_num);
start = pci_bar_get_addr(dev, bar_num);
end = start + size - 1;
if (pci_bar_is64(dev, bar_num)) {
printf("BAR#%d,%d [%" PRIx64 "-%" PRIx64 " ",
bar_num, bar_num + 1, start, end);
} else {
printf("BAR#%d [%02x-%02x ",
bar_num, (uint32_t)start, (uint32_t)end);
}
if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
printf("PIO");
} else {
printf("MEM");
switch (bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK) {
case PCI_BASE_ADDRESS_MEM_TYPE_32:
printf("32");
break;
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
printf("1M");
break;
case PCI_BASE_ADDRESS_MEM_TYPE_64:
printf("64");
break;
default:
assert(0);
}
}
if (bar & PCI_BASE_ADDRESS_MEM_PREFETCH)
printf("/p");
printf("]");
}
void pci_dev_print_id(struct pci_dev *dev)
{
pcidevaddr_t bdf = dev->bdf;
printf("00.%02x.%1x %04x:%04x", bdf / 8, bdf % 8,
pci_config_readw(bdf, PCI_VENDOR_ID),
pci_config_readw(bdf, PCI_DEVICE_ID));
}
static void pci_cap_print(struct pci_dev *dev, int cap_offset, int cap_id)
{
switch (cap_id) {
case PCI_CAP_ID_MSI: {
uint16_t control = pci_config_readw(dev->bdf, cap_offset + PCI_MSI_FLAGS);
printf("\tMSI,%s-bit capability ", control & PCI_MSI_FLAGS_64BIT ? "64" : "32");
break;
}
default:
printf("\tcapability 0x%02x ", cap_id);
break;
}
printf("at offset 0x%02x\n", cap_offset);
}
void pci_dev_print(struct pci_dev *dev)
{
pcidevaddr_t bdf = dev->bdf;
uint8_t header = pci_config_readb(bdf, PCI_HEADER_TYPE);
uint8_t progif = pci_config_readb(bdf, PCI_CLASS_PROG);
uint8_t subclass = pci_config_readb(bdf, PCI_CLASS_DEVICE);
uint8_t class = pci_config_readb(bdf, PCI_CLASS_DEVICE + 1);
int i;
pci_dev_print_id(dev);
printf(" type %02x progif %02x class %02x subclass %02x\n",
header, progif, class, subclass);
pci_cap_walk(dev, pci_cap_print);
if ((header & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_NORMAL)
return;
for (i = 0; i < PCI_BAR_NUM; i++) {
if (pci_bar_is_valid(dev, i)) {
printf("\t");
pci_bar_print(dev, i);
printf("\n");
}
if (pci_bar_is64(dev, i))
i++;
}
}
void pci_print(void)
{
pcidevaddr_t devfn;
struct pci_dev pci_dev;
for (devfn = 0; devfn < PCI_DEVFN_MAX; ++devfn) {
if (pci_dev_exists(devfn)) {
pci_dev_init(&pci_dev, devfn);
pci_dev_print(&pci_dev);
}
}
}
void pci_dev_init(struct pci_dev *dev, pcidevaddr_t bdf)
{
int i;
memset(dev, 0, sizeof(*dev));
dev->bdf = bdf;
for (i = 0; i < PCI_BAR_NUM; i++) {
if (pci_bar_size(dev, i)) {
dev->resource[i] = __pci_bar_get_addr(dev, i);
if (pci_bar_is64(dev, i)) {
assert(i + 1 < PCI_BAR_NUM);
dev->resource[i + 1] = dev->resource[i];
i++;
}
} else {
dev->resource[i] = INVALID_PHYS_ADDR;
}
}
}
uint8_t pci_intx_line(struct pci_dev *dev)
{
return pci_config_readb(dev->bdf, PCI_INTERRUPT_LINE);
}
static void pci_cap_setup(struct pci_dev *dev, int cap_offset, int cap_id)
{
switch (cap_id) {
case PCI_CAP_ID_MSI:
dev->msi_offset = cap_offset;
break;
}
}
void pci_enable_defaults(struct pci_dev *dev)
{
/* Enable device DMA operations */
pci_cmd_set_clr(dev, PCI_COMMAND_MASTER, 0);
pci_cap_walk(dev, pci_cap_setup);
}

View file

@ -0,0 +1,102 @@
#ifndef PCI_H
#define PCI_H
/*
* API for scanning a PCI bus for a given device, as well to access
* BAR registers.
*
* Copyright (C) 2013, Red Hat Inc, Michael S. Tsirkin <mst@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
typedef uint16_t pcidevaddr_t;
enum {
PCIDEVADDR_INVALID = 0xffff,
};
#define PCI_BAR_NUM 6
#define PCI_DEVFN_MAX 256
#define PCI_BDF_GET_DEVFN(x) ((x) & 0xff)
#define PCI_BDF_GET_BUS(x) (((x) >> 8) & 0xff)
struct pci_dev {
uint16_t bdf;
uint16_t msi_offset;
phys_addr_t resource[PCI_BAR_NUM];
};
extern void pci_dev_init(struct pci_dev *dev, pcidevaddr_t bdf);
extern void pci_cmd_set_clr(struct pci_dev *dev, uint16_t set, uint16_t clr);
typedef void (*pci_cap_handler_t)(struct pci_dev *dev, int cap_offset, int cap_id);
extern void pci_cap_walk(struct pci_dev *dev, pci_cap_handler_t handler);
extern void pci_enable_defaults(struct pci_dev *dev);
extern bool pci_setup_msi(struct pci_dev *dev, uint64_t msi_addr,
uint32_t msi_data);
typedef phys_addr_t iova_t;
extern bool pci_probe(void);
extern void pci_print(void);
extern bool pci_dev_exists(pcidevaddr_t dev);
extern pcidevaddr_t pci_find_dev(uint16_t vendor_id, uint16_t device_id);
/*
* @bar_num in all BAR access functions below is the index of the 32-bit
* register starting from the PCI_BASE_ADDRESS_0 offset.
*
* In cases where the BAR size is 64-bit, a caller should still provide
* @bar_num in terms of 32-bit words. For example, if a device has a 64-bit
* BAR#0 and a 32-bit BAR#1, then caller should provide 2 to address BAR#1,
* not 1.
*
* It is expected the caller is aware of the device BAR layout and never
* tries to address the middle of a 64-bit register.
*/
extern phys_addr_t pci_bar_get_addr(struct pci_dev *dev, int bar_num);
extern void pci_bar_set_addr(struct pci_dev *dev, int bar_num, phys_addr_t addr);
extern phys_addr_t pci_bar_size(struct pci_dev *dev, int bar_num);
extern uint32_t pci_bar_get(struct pci_dev *dev, int bar_num);
extern uint32_t pci_bar_mask(uint32_t bar);
extern bool pci_bar_is64(struct pci_dev *dev, int bar_num);
extern bool pci_bar_is_memory(struct pci_dev *dev, int bar_num);
extern bool pci_bar_is_valid(struct pci_dev *dev, int bar_num);
extern void pci_bar_print(struct pci_dev *dev, int bar_num);
extern void pci_dev_print_id(struct pci_dev *dev);
extern void pci_dev_print(struct pci_dev *dev);
extern uint8_t pci_intx_line(struct pci_dev *dev);
void pci_msi_set_enable(struct pci_dev *dev, bool enabled);
extern int pci_testdev(void);
/*
* pci-testdev is a driver for the pci-testdev qemu pci device. The
* device enables testing mmio and portio exits, and measuring their
* speed.
*/
#define PCI_VENDOR_ID_REDHAT 0x1b36
#define PCI_DEVICE_ID_REDHAT_TEST 0x0005
/*
* pci-testdev supports at least three types of tests (via mmio and
* portio BARs): no-eventfd, wildcard-eventfd and datamatch-eventfd
*/
#define PCI_TESTDEV_BAR_MEM 0
#define PCI_TESTDEV_BAR_IO 1
#define PCI_TESTDEV_NUM_BARS 2
#define PCI_TESTDEV_NUM_TESTS 3
struct pci_test_dev_hdr {
uint8_t test;
uint8_t width;
uint8_t pad0[2];
uint32_t offset;
uint32_t data;
uint32_t count;
uint8_t name[];
};
#define PCI_HEADER_TYPE_MASK 0x7f
#endif /* PCI_H */

View file

@ -0,0 +1,261 @@
#include "libcflat.h"
#define BUFSZ 2000
typedef struct pstream {
char *buffer;
int remain;
int added;
} pstream_t;
typedef struct strprops {
char pad;
int npad;
} strprops_t;
static void addchar(pstream_t *p, char c)
{
if (p->remain) {
*p->buffer++ = c;
--p->remain;
}
++p->added;
}
void print_str(pstream_t *p, const char *s, strprops_t props)
{
const char *s_orig = s;
int npad = props.npad;
if (npad > 0) {
npad -= strlen(s_orig);
while (npad > 0) {
addchar(p, props.pad);
--npad;
}
}
while (*s)
addchar(p, *s++);
if (npad < 0) {
props.pad = ' '; /* ignore '0' flag with '-' flag */
npad += strlen(s_orig);
while (npad < 0) {
addchar(p, props.pad);
++npad;
}
}
}
static char digits[16] = "0123456789abcdef";
void print_int(pstream_t *ps, long long n, int base, strprops_t props)
{
char buf[sizeof(long) * 3 + 2], *p = buf;
int s = 0, i;
if (n < 0) {
n = -n;
s = 1;
}
while (n) {
*p++ = digits[n % base];
n /= base;
}
if (s)
*p++ = '-';
if (p == buf)
*p++ = '0';
for (i = 0; i < (p - buf) / 2; ++i) {
char tmp;
tmp = buf[i];
buf[i] = p[-1-i];
p[-1-i] = tmp;
}
*p = 0;
print_str(ps, buf, props);
}
void print_unsigned(pstream_t *ps, unsigned long long n, int base,
strprops_t props)
{
char buf[sizeof(long) * 3 + 1], *p = buf;
int i;
while (n) {
*p++ = digits[n % base];
n /= base;
}
if (p == buf)
*p++ = '0';
for (i = 0; i < (p - buf) / 2; ++i) {
char tmp;
tmp = buf[i];
buf[i] = p[-1-i];
p[-1-i] = tmp;
}
*p = 0;
print_str(ps, buf, props);
}
static int fmtnum(const char **fmt)
{
const char *f = *fmt;
int len = 0, num;
if (*f == '-')
++f, ++len;
while (*f >= '0' && *f <= '9')
++f, ++len;
num = atol(*fmt);
*fmt += len;
return num;
}
int vsnprintf(char *buf, int size, const char *fmt, va_list va)
{
pstream_t s;
s.buffer = buf;
s.remain = size - 1;
s.added = 0;
while (*fmt) {
char f = *fmt++;
int nlong = 0;
strprops_t props;
memset(&props, 0, sizeof(props));
props.pad = ' ';
if (f != '%') {
addchar(&s, f);
continue;
}
morefmt:
f = *fmt++;
switch (f) {
case '%':
addchar(&s, '%');
break;
case 'c':
addchar(&s, va_arg(va, int));
break;
case '\0':
--fmt;
break;
case '0':
props.pad = '0';
++fmt;
/* fall through */
case '1'...'9':
case '-':
--fmt;
props.npad = fmtnum(&fmt);
goto morefmt;
case 'l':
++nlong;
goto morefmt;
case 'd':
switch (nlong) {
case 0:
print_int(&s, va_arg(va, int), 10, props);
break;
case 1:
print_int(&s, va_arg(va, long), 10, props);
break;
default:
print_int(&s, va_arg(va, long long), 10, props);
break;
}
break;
case 'u':
switch (nlong) {
case 0:
print_unsigned(&s, va_arg(va, unsigned), 10, props);
break;
case 1:
print_unsigned(&s, va_arg(va, unsigned long), 10, props);
break;
default:
print_unsigned(&s, va_arg(va, unsigned long long), 10, props);
break;
}
break;
case 'x':
switch (nlong) {
case 0:
print_unsigned(&s, va_arg(va, unsigned), 16, props);
break;
case 1:
print_unsigned(&s, va_arg(va, unsigned long), 16, props);
break;
default:
print_unsigned(&s, va_arg(va, unsigned long long), 16, props);
break;
}
break;
case 'p':
print_str(&s, "0x", props);
print_unsigned(&s, (unsigned long)va_arg(va, void *), 16, props);
break;
case 's':
print_str(&s, va_arg(va, const char *), props);
break;
default:
addchar(&s, f);
break;
}
}
*s.buffer = 0;
++s.added;
return s.added;
}
int snprintf(char *buf, int size, const char *fmt, ...)
{
va_list va;
int r;
va_start(va, fmt);
r = vsnprintf(buf, size, fmt, va);
va_end(va);
return r;
}
int vprintf(const char *fmt, va_list va)
{
char buf[BUFSZ];
int r;
r = vsnprintf(buf, sizeof(buf), fmt, va);
puts(buf);
return r;
}
int printf(const char *fmt, ...)
{
va_list va;
char buf[BUFSZ];
int r;
va_start(va, fmt);
r = vsnprintf(buf, sizeof buf, fmt, va);
va_end(va);
puts(buf);
return r;
}

View file

@ -0,0 +1,145 @@
/*
* Test result reporting
*
* Copyright (c) Siemens AG, 2014
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
* Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "asm/spinlock.h"
static unsigned int tests, failures, xfailures, skipped;
static char prefixes[256];
static struct spinlock lock;
void report_prefix_push(const char *prefix)
{
spin_lock(&lock);
strcat(prefixes, prefix);
strcat(prefixes, ": ");
spin_unlock(&lock);
}
void report_prefix_pop(void)
{
char *p, *q;
spin_lock(&lock);
if (!*prefixes)
return;
for (p = prefixes, q = strstr(p, ": ") + 2;
*q;
p = q, q = strstr(p, ": ") + 2)
;
*p = '\0';
spin_unlock(&lock);
}
static void va_report(const char *msg_fmt,
bool pass, bool xfail, bool skip, va_list va)
{
char *prefix = skip ? "SKIP"
: xfail ? (pass ? "XPASS" : "XFAIL")
: (pass ? "PASS" : "FAIL");
spin_lock(&lock);
tests++;
printf("%s: ", prefix);
puts(prefixes);
vprintf(msg_fmt, va);
puts("\n");
if (skip)
skipped++;
else if (xfail && !pass)
xfailures++;
else if (xfail || !pass)
failures++;
spin_unlock(&lock);
}
void report(const char *msg_fmt, bool pass, ...)
{
va_list va;
va_start(va, pass);
va_report(msg_fmt, pass, false, false, va);
va_end(va);
}
void report_xfail(const char *msg_fmt, bool xfail, bool pass, ...)
{
va_list va;
va_start(va, pass);
va_report(msg_fmt, pass, xfail, false, va);
va_end(va);
}
void report_skip(const char *msg_fmt, ...)
{
va_list va;
va_start(va, msg_fmt);
va_report(msg_fmt, false, false, true, va);
va_end(va);
}
void report_info(const char *msg_fmt, ...)
{
va_list va;
spin_lock(&lock);
puts("INFO: ");
puts(prefixes);
va_start(va, msg_fmt);
vprintf(msg_fmt, va);
va_end(va);
puts("\n");
spin_unlock(&lock);
}
int report_summary(void)
{
spin_lock(&lock);
printf("SUMMARY: %d tests", tests);
if (failures)
printf(", %d unexpected failures", failures);
if (xfailures)
printf(", %d expected failures", xfailures);
if (skipped)
printf(", %d skipped", skipped);
printf("\n");
if (tests == skipped)
/* Blame AUTOTOOLS for using 77 for skipped test and QEMU for
* mangling error codes in a way that gets 77 if we ... */
return 77 >> 1;
return failures > 0 ? 1 : 0;
spin_unlock(&lock);
}
void report_abort(const char *msg_fmt, ...)
{
va_list va;
spin_lock(&lock);
puts("ABORT: ");
puts(prefixes);
va_start(va, msg_fmt);
vprintf(msg_fmt, va);
va_end(va);
puts("\n");
spin_unlock(&lock);
report_summary();
abort();
}

View file

@ -0,0 +1,12 @@
#ifndef LIBCFLAT_SETJMP_H
#define LIBCFLAT_SETJMP_H 1
typedef struct jmp_buf_tag {
long int regs[8];
} jmp_buf[1];
extern int setjmp (struct jmp_buf_tag env[1]);
extern void longjmp (struct jmp_buf_tag env[1], int val)
__attribute__ ((__noreturn__));
#endif /* setjmp.h */

View file

@ -0,0 +1,96 @@
#include <libcflat.h>
#include <stack.h>
#define MAX_DEPTH 20
static void print_stack(const void **return_addrs, int depth,
bool top_is_return_address)
{
int i = 0;
printf("\tSTACK:");
/* @addr indicates a non-return address, as expected by the stack
* pretty printer script. */
if (depth > 0 && !top_is_return_address) {
printf(" @%lx", (unsigned long) return_addrs[0]);
i++;
}
for (; i < depth; i++) {
printf(" %lx", (unsigned long) return_addrs[i]);
}
printf("\n");
}
void dump_stack(void)
{
const void *return_addrs[MAX_DEPTH];
int depth;
depth = backtrace(return_addrs, MAX_DEPTH);
print_stack(&return_addrs[1], depth ? depth - 1 : 0, true);
}
void dump_frame_stack(const void *instruction, const void *frame)
{
const void *return_addrs[MAX_DEPTH];
int depth;
return_addrs[0] = instruction;
depth = backtrace_frame(frame, &return_addrs[1], MAX_DEPTH - 1);
print_stack(return_addrs, depth + 1, false);
}
#ifndef HAVE_ARCH_BACKTRACE
int backtrace(const void **return_addrs, int max_depth)
{
static int walking;
int depth = 0;
void *addr;
if (walking) {
printf("RECURSIVE STACK WALK!!!\n");
return 0;
}
walking = 1;
/* __builtin_return_address requires a compile-time constant argument */
#define GET_RETURN_ADDRESS(i) \
if (max_depth == i) \
goto done; \
addr = __builtin_return_address(i); \
if (!addr) \
goto done; \
return_addrs[i] = __builtin_extract_return_addr(addr); \
depth = i + 1; \
GET_RETURN_ADDRESS(0)
GET_RETURN_ADDRESS(1)
GET_RETURN_ADDRESS(2)
GET_RETURN_ADDRESS(3)
GET_RETURN_ADDRESS(4)
GET_RETURN_ADDRESS(5)
GET_RETURN_ADDRESS(6)
GET_RETURN_ADDRESS(7)
GET_RETURN_ADDRESS(8)
GET_RETURN_ADDRESS(9)
GET_RETURN_ADDRESS(10)
GET_RETURN_ADDRESS(11)
GET_RETURN_ADDRESS(12)
GET_RETURN_ADDRESS(13)
GET_RETURN_ADDRESS(14)
GET_RETURN_ADDRESS(15)
GET_RETURN_ADDRESS(16)
GET_RETURN_ADDRESS(17)
GET_RETURN_ADDRESS(18)
GET_RETURN_ADDRESS(19)
GET_RETURN_ADDRESS(20)
#undef GET_RETURN_ADDRESS
done:
walking = 0;
return depth;
}
#endif /* HAVE_ARCH_BACKTRACE */

View file

@ -0,0 +1,21 @@
#ifndef _STACK_H_
#define _STACK_H_
#include <libcflat.h>
#include <asm/stack.h>
#ifdef HAVE_ARCH_BACKTRACE_FRAME
extern int backtrace_frame(const void *frame, const void **return_addrs,
int max_depth);
#else
static inline int
backtrace_frame(const void *frame __unused, const void **return_addrs __unused,
int max_depth __unused)
{
return 0;
}
#endif
extern int backtrace(const void **return_addrs, int max_depth);
#endif

View file

@ -0,0 +1,175 @@
#include "libcflat.h"
unsigned long strlen(const char *buf)
{
unsigned long len = 0;
while (*buf++)
++len;
return len;
}
char *strcat(char *dest, const char *src)
{
char *p = dest;
while (*p)
++p;
while ((*p++ = *src++) != 0)
;
return dest;
}
char *strcpy(char *dest, const char *src)
{
*dest = 0;
return strcat(dest, src);
}
int strncmp(const char *a, const char *b, size_t n)
{
for (; n--; ++a, ++b)
if (*a != *b || *a == '\0')
return *a - *b;
return 0;
}
int strcmp(const char *a, const char *b)
{
return strncmp(a, b, SIZE_MAX);
}
char *strchr(const char *s, int c)
{
while (*s != (char)c)
if (*s++ == '\0')
return NULL;
return (char *)s;
}
char *strstr(const char *s1, const char *s2)
{
size_t l1, l2;
l2 = strlen(s2);
if (!l2)
return (char *)s1;
l1 = strlen(s1);
while (l1 >= l2) {
l1--;
if (!memcmp(s1, s2, l2))
return (char *)s1;
s1++;
}
return NULL;
}
void *memset(void *s, int c, size_t n)
{
size_t i;
char *a = s;
for (i = 0; i < n; ++i)
a[i] = c;
return s;
}
void *memcpy(void *dest, const void *src, size_t n)
{
size_t i;
char *a = dest;
const char *b = src;
for (i = 0; i < n; ++i)
a[i] = b[i];
return dest;
}
int memcmp(const void *s1, const void *s2, size_t n)
{
const unsigned char *a = s1, *b = s2;
int ret = 0;
while (n--) {
ret = *a - *b;
if (ret)
break;
++a, ++b;
}
return ret;
}
void *memmove(void *dest, const void *src, size_t n)
{
const unsigned char *s = src;
unsigned char *d = dest;
if (d <= s) {
while (n--)
*d++ = *s++;
} else {
d += n, s += n;
while (n--)
*--d = *--s;
}
return dest;
}
void *memchr(const void *s, int c, size_t n)
{
const unsigned char *str = s, chr = (unsigned char)c;
while (n--)
if (*str++ == chr)
return (void *)(str - 1);
return NULL;
}
long atol(const char *ptr)
{
long acc = 0;
const char *s = ptr;
int neg, c;
while (*s == ' ' || *s == '\t')
s++;
if (*s == '-'){
neg = 1;
s++;
} else {
neg = 0;
if (*s == '+')
s++;
}
while (*s) {
if (*s < '0' || *s > '9')
break;
c = *s - '0';
acc = acc * 10 + c;
s++;
}
if (neg)
acc = -acc;
return acc;
}
extern char **environ;
char *getenv(const char *name)
{
char **envp = environ, *delim;
while (*envp) {
delim = strchr(*envp, '=');
if (delim && strncmp(name, *envp, delim - *envp) == 0)
return delim + 1;
++envp;
}
return NULL;
}

View file

@ -0,0 +1,17 @@
#ifndef __STRING_H
#define __STRING_H
extern unsigned long strlen(const char *buf);
extern char *strcat(char *dest, const char *src);
extern char *strcpy(char *dest, const char *src);
extern int strcmp(const char *a, const char *b);
extern int strncmp(const char *a, const char *b, size_t n);
extern char *strchr(const char *s, int c);
extern char *strstr(const char *haystack, const char *needle);
extern void *memset(void *s, int c, size_t n);
extern void *memcpy(void *dest, const void *src, size_t n);
extern int memcmp(const void *s1, const void *s2, size_t n);
extern void *memmove(void *dest, const void *src, size_t n);
extern void *memchr(const void *s, int c, size_t n);
#endif /* _STRING_H */

View file

@ -0,0 +1,18 @@
/*
* Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include <libcflat.h>
int parse_keyval(char *s, long *val)
{
char *p;
p = strchr(s, '=');
if (!p)
return -1;
*val = atol(p+1);
return p - s;
}

View file

@ -0,0 +1,23 @@
#ifndef _UTIL_H_
#define _UTIL_H_
/*
* Collection of utility functions to share between unit tests.
*
* Copyright (C) 2016, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
/*
* parse_keyval extracts the integer from a string formatted as
* string=integer. This is useful for passing expected values to
* the unit test on the command line, i.e. it helps parse QEMU
* command lines that include something like -append var1=1 var2=2
* @s is the input string, likely a command line parameter, and
* @val is a pointer to where the integer will be stored.
*
* Returns the offset of the '=', or -1 if no keyval pair is found.
*/
extern int parse_keyval(char *s, long *val);
#endif

View file

@ -0,0 +1,177 @@
/*
* virtqueue support adapted from the Linux kernel.
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "devicetree.h"
#include "alloc.h"
#include "asm/page.h"
#include "asm/io.h"
#include "virtio.h"
#include "virtio-mmio.h"
static void vm_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
u8 *p = buf;
unsigned i;
for (i = 0; i < len; ++i)
p[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
}
static void vm_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
const u8 *p = buf;
unsigned i;
for (i = 0; i < len; ++i)
writeb(p[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
}
static bool vm_notify(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
return true;
}
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev,
unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct vring_virtqueue *vq;
void *queue;
unsigned num = VIRTIO_MMIO_QUEUE_NUM_MIN;
vq = calloc(1, sizeof(*vq));
queue = memalign(PAGE_SIZE, VIRTIO_MMIO_QUEUE_SIZE_MIN);
assert(vq && queue);
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
assert(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX) >= num);
if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN) != 0) {
printf("%s: virtqueue %d already setup! base=%p\n",
__func__, index, vm_dev->base);
return NULL;
}
writel(num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
writel(VIRTIO_MMIO_VRING_ALIGN,
vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
writel(virt_to_pfn(queue), vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
vring_init_virtqueue(vq, index, num, VIRTIO_MMIO_VRING_ALIGN,
vdev, queue, vm_notify, callback, name);
return &vq->vq;
}
static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
const char *names[])
{
unsigned i;
for (i = 0; i < nvqs; ++i) {
vqs[i] = vm_setup_vq(vdev, i,
callbacks ? callbacks[i] : NULL,
names ? names[i] : "");
if (vqs[i] == NULL)
return -1;
}
return 0;
}
static const struct virtio_config_ops vm_config_ops = {
.get = vm_get,
.set = vm_set,
.find_vqs = vm_find_vqs,
};
static void vm_device_init(struct virtio_mmio_device *vm_dev)
{
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
vm_dev->vdev.config = &vm_config_ops;
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
}
/******************************************************
* virtio-mmio device tree support
******************************************************/
struct vm_dt_info {
u32 devid;
void *base;
};
static int vm_dt_match(const struct dt_device *dev, int fdtnode)
{
struct vm_dt_info *info = (struct vm_dt_info *)dev->info;
struct dt_pbus_reg base;
u32 magic;
int ret;
dt_device_bind_node((struct dt_device *)dev, fdtnode);
ret = dt_pbus_get_base(dev, &base);
assert(ret == 0);
info->base = ioremap(base.addr, base.size);
magic = readl(info->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24))
return false;
return readl(info->base + VIRTIO_MMIO_DEVICE_ID) == info->devid;
}
static struct virtio_device *virtio_mmio_dt_bind(u32 devid)
{
struct virtio_mmio_device *vm_dev;
struct dt_device dt_dev;
struct dt_bus dt_bus;
struct vm_dt_info info;
int node;
if (!dt_available())
return NULL;
dt_bus_init_defaults(&dt_bus);
dt_bus.match = vm_dt_match;
info.devid = devid;
dt_device_init(&dt_dev, &dt_bus, &info);
node = dt_device_find_compatible(&dt_dev, "virtio,mmio");
assert(node >= 0 || node == -FDT_ERR_NOTFOUND);
if (node == -FDT_ERR_NOTFOUND)
return NULL;
vm_dev = calloc(1, sizeof(*vm_dev));
assert(vm_dev != NULL);
vm_dev->base = info.base;
vm_device_init(vm_dev);
return &vm_dev->vdev;
}
struct virtio_device *virtio_mmio_bind(u32 devid)
{
return virtio_mmio_dt_bind(devid);
}

View file

@ -0,0 +1,65 @@
#ifndef _VIRTIO_MMIO_H_
#define _VIRTIO_MMIO_H_
/*
* A minimal implementation of virtio-mmio. Adapted from the Linux Kernel.
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "asm/page.h"
#include "virtio.h"
#define VIRTIO_MMIO_MAGIC_VALUE 0x000
#define VIRTIO_MMIO_VERSION 0x004
#define VIRTIO_MMIO_DEVICE_ID 0x008
#define VIRTIO_MMIO_VENDOR_ID 0x00c
#define VIRTIO_MMIO_HOST_FEATURES 0x010
#define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014
#define VIRTIO_MMIO_GUEST_FEATURES 0x020
#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024
#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
#define VIRTIO_MMIO_QUEUE_SEL 0x030
#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
#define VIRTIO_MMIO_QUEUE_NUM 0x038
#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
#define VIRTIO_MMIO_QUEUE_PFN 0x040
#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
#define VIRTIO_MMIO_STATUS 0x070
#define VIRTIO_MMIO_CONFIG 0x100
#define VIRTIO_MMIO_INT_VRING (1 << 0)
#define VIRTIO_MMIO_INT_CONFIG (1 << 1)
#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
/*
* The minimum queue size is 2*VIRTIO_MMIO_VRING_ALIGN, which
* means the largest queue num for the minimum queue size is 128, i.e.
* 2*VIRTIO_MMIO_VRING_ALIGN = vring_size(128, VIRTIO_MMIO_VRING_ALIGN),
* where vring_size is
*
* unsigned vring_size(unsigned num, unsigned long align)
* {
* return ((sizeof(struct vring_desc) * num + sizeof(u16) * (3 + num)
* + align - 1) & ~(align - 1))
* + sizeof(u16) * 3 + sizeof(struct vring_used_elem) * num;
* }
*/
#define VIRTIO_MMIO_QUEUE_SIZE_MIN (2*VIRTIO_MMIO_VRING_ALIGN)
#define VIRTIO_MMIO_QUEUE_NUM_MIN 128
#define to_virtio_mmio_device(vdev_ptr) \
container_of(vdev_ptr, struct virtio_mmio_device, vdev)
struct virtio_mmio_device {
struct virtio_device vdev;
void *base;
};
extern struct virtio_device *virtio_mmio_bind(u32 devid);
#endif /* _VIRTIO_MMIO_H_ */

View file

@ -0,0 +1,130 @@
/*
* virtqueue support adapted from the Linux kernel.
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "asm/io.h"
#include "virtio.h"
#include "virtio-mmio.h"
void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align)
{
vr->num = num;
vr->desc = p;
vr->avail = p + num*sizeof(struct vring_desc);
vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(u16)
+ align-1) & ~(align - 1));
}
void vring_init_virtqueue(struct vring_virtqueue *vq, unsigned index,
unsigned num, unsigned vring_align,
struct virtio_device *vdev, void *pages,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name)
{
unsigned i;
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.num_free = num;
vq->vq.index = index;
vq->notify = notify;
vq->last_used_idx = 0;
vq->num_added = 0;
vq->free_head = 0;
for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1;
vq->data[i] = NULL;
}
vq->data[i] = NULL;
}
int virtqueue_add_outbuf(struct virtqueue *_vq, char *buf, unsigned int len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned avail;
int head;
assert(buf != NULL);
assert(len != 0);
if (!vq->vq.num_free)
return -1;
--vq->vq.num_free;
head = vq->free_head;
vq->vring.desc[head].flags = 0;
vq->vring.desc[head].addr = virt_to_phys(buf);
vq->vring.desc[head].len = len;
vq->free_head = vq->vring.desc[head].next;
vq->data[head] = buf;
avail = (vq->vring.avail->idx & (vq->vring.num-1));
vq->vring.avail->ring[avail] = head;
wmb();
vq->vring.avail->idx++;
vq->num_added++;
return 0;
}
bool virtqueue_kick(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
mb();
return vq->notify(_vq);
}
void detach_buf(struct vring_virtqueue *vq, unsigned head)
{
unsigned i = head;
vq->data[head] = NULL;
while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
vq->vq.num_free++;
}
vq->vring.desc[i].next = vq->free_head;
vq->free_head = head;
vq->vq.num_free++;
}
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 last_used;
unsigned i;
void *ret;
rmb();
last_used = (vq->last_used_idx & (vq->vring.num-1));
i = vq->vring.used->ring[last_used].id;
*len = vq->vring.used->ring[last_used].len;
ret = vq->data[i];
detach_buf(vq, i);
vq->last_used_idx++;
return ret;
}
struct virtio_device *virtio_bind(u32 devid)
{
return virtio_mmio_bind(devid);
}

View file

@ -0,0 +1,150 @@
#ifndef _VIRTIO_H_
#define _VIRTIO_H_
/*
* A minimal implementation of virtio.
* Structures adapted from the Linux Kernel.
*
* Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#define VIRTIO_ID_CONSOLE 3
struct virtio_device_id {
u32 device;
u32 vendor;
};
struct virtio_device {
struct virtio_device_id id;
const struct virtio_config_ops *config;
};
struct virtqueue {
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
void *priv;
};
typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops {
void (*get)(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len);
void (*set)(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len);
int (*find_vqs)(struct virtio_device *vdev, unsigned nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char *names[]);
};
static inline u8
virtio_config_readb(struct virtio_device *vdev, unsigned offset)
{
u8 val;
vdev->config->get(vdev, offset, &val, 1);
return val;
}
static inline u16
virtio_config_readw(struct virtio_device *vdev, unsigned offset)
{
u16 val;
vdev->config->get(vdev, offset, &val, 2);
return val;
}
static inline u32
virtio_config_readl(struct virtio_device *vdev, unsigned offset)
{
u32 val;
vdev->config->get(vdev, offset, &val, 4);
return val;
}
static inline void
virtio_config_writeb(struct virtio_device *vdev, unsigned offset, u8 val)
{
vdev->config->set(vdev, offset, &val, 1);
}
static inline void
virtio_config_writew(struct virtio_device *vdev, unsigned offset, u16 val)
{
vdev->config->set(vdev, offset, &val, 2);
}
static inline void
virtio_config_writel(struct virtio_device *vdev, unsigned offset, u32 val)
{
vdev->config->set(vdev, offset, &val, 4);
}
#define VRING_DESC_F_NEXT 1
#define VRING_DESC_F_WRITE 2
struct vring_desc {
u64 addr;
u32 len;
u16 flags;
u16 next;
};
struct vring_avail {
u16 flags;
u16 idx;
u16 ring[];
};
struct vring_used_elem {
u32 id;
u32 len;
};
struct vring_used {
u16 flags;
u16 idx;
struct vring_used_elem ring[];
};
struct vring {
unsigned int num;
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
struct vring_virtqueue {
struct virtqueue vq;
struct vring vring;
unsigned int free_head;
unsigned int num_added;
u16 last_used_idx;
bool (*notify)(struct virtqueue *vq);
void *data[];
};
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
extern void vring_init(struct vring *vr, unsigned int num, void *p,
unsigned long align);
extern void vring_init_virtqueue(struct vring_virtqueue *vq, unsigned index,
unsigned num, unsigned vring_align,
struct virtio_device *vdev, void *pages,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name);
extern int virtqueue_add_outbuf(struct virtqueue *vq, char *buf,
unsigned int len);
extern bool virtqueue_kick(struct virtqueue *vq);
extern void detach_buf(struct vring_virtqueue *vq, unsigned head);
extern void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len);
extern struct virtio_device *virtio_bind(u32 devid);
#endif /* _VIRTIO_H_ */

View file

@ -0,0 +1,52 @@
#include "libcflat.h"
#include "acpi.h"
void* find_acpi_table_addr(u32 sig)
{
unsigned long addr;
struct rsdp_descriptor *rsdp;
struct rsdt_descriptor_rev1 *rsdt;
void *end;
int i;
/* FACS is special... */
if (sig == FACS_SIGNATURE) {
struct fadt_descriptor_rev1 *fadt;
fadt = find_acpi_table_addr(FACP_SIGNATURE);
if (!fadt) {
return NULL;
}
return (void*)(ulong)fadt->firmware_ctrl;
}
for(addr = 0xf0000; addr < 0x100000; addr += 16) {
rsdp = (void*)addr;
if (rsdp->signature == 0x2052545020445352LL)
break;
}
if (addr == 0x100000) {
printf("Can't find RSDP\n");
return 0;
}
if (sig == RSDP_SIGNATURE) {
return rsdp;
}
rsdt = (void*)(ulong)rsdp->rsdt_physical_address;
if (!rsdt || rsdt->signature != RSDT_SIGNATURE)
return 0;
if (sig == RSDT_SIGNATURE) {
return rsdt;
}
end = (void*)rsdt + rsdt->length;
for (i=0; (void*)&rsdt->table_offset_entry[i] < end; i++) {
struct acpi_table *t = (void*)(ulong)rsdt->table_offset_entry[i];
if (t && t->signature == sig) {
return t;
}
}
return NULL;
}

View file

@ -0,0 +1,104 @@
#ifndef KVM_ACPI_H
#define KVM_ACPI_H 1
#include "libcflat.h"
#define ACPI_SIGNATURE(c1, c2, c3, c4) \
((c1) | ((c2) << 8) | ((c3) << 16) | ((c4) << 24))
#define RSDP_SIGNATURE ACPI_SIGNATURE('R','S','D','P')
#define RSDT_SIGNATURE ACPI_SIGNATURE('R','S','D','T')
#define FACP_SIGNATURE ACPI_SIGNATURE('F','A','C','P')
#define FACS_SIGNATURE ACPI_SIGNATURE('F','A','C','S')
struct rsdp_descriptor { /* Root System Descriptor Pointer */
u64 signature; /* ACPI signature, contains "RSD PTR " */
u8 checksum; /* To make sum of struct == 0 */
u8 oem_id [6]; /* OEM identification */
u8 revision; /* Must be 0 for 1.0, 2 for 2.0 */
u32 rsdt_physical_address; /* 32-bit physical address of RSDT */
u32 length; /* XSDT Length in bytes including hdr */
u64 xsdt_physical_address; /* 64-bit physical address of XSDT */
u8 extended_checksum; /* Checksum of entire table */
u8 reserved [3]; /* Reserved field must be 0 */
};
#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \
u32 signature; /* ACPI signature (4 ASCII characters) */ \
u32 length; /* Length of table, in bytes, including header */ \
u8 revision; /* ACPI Specification minor version # */ \
u8 checksum; /* To make sum of entire table == 0 */ \
u8 oem_id [6]; /* OEM identification */ \
u8 oem_table_id [8]; /* OEM table identification */ \
u32 oem_revision; /* OEM revision number */ \
u8 asl_compiler_id [4]; /* ASL compiler vendor ID */ \
u32 asl_compiler_revision; /* ASL compiler revision number */
struct acpi_table {
ACPI_TABLE_HEADER_DEF
char data[0];
};
struct rsdt_descriptor_rev1 {
ACPI_TABLE_HEADER_DEF
u32 table_offset_entry[0];
};
struct fadt_descriptor_rev1
{
ACPI_TABLE_HEADER_DEF /* ACPI common table header */
u32 firmware_ctrl; /* Physical address of FACS */
u32 dsdt; /* Physical address of DSDT */
u8 model; /* System Interrupt Model */
u8 reserved1; /* Reserved */
u16 sci_int; /* System vector of SCI interrupt */
u32 smi_cmd; /* Port address of SMI command port */
u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */
u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */
u8 S4bios_req; /* Value to write to SMI CMD to enter S4BIOS state */
u8 reserved2; /* Reserved - must be zero */
u32 pm1a_evt_blk; /* Port address of Power Mgt 1a acpi_event Reg Blk */
u32 pm1b_evt_blk; /* Port address of Power Mgt 1b acpi_event Reg Blk */
u32 pm1a_cnt_blk; /* Port address of Power Mgt 1a Control Reg Blk */
u32 pm1b_cnt_blk; /* Port address of Power Mgt 1b Control Reg Blk */
u32 pm2_cnt_blk; /* Port address of Power Mgt 2 Control Reg Blk */
u32 pm_tmr_blk; /* Port address of Power Mgt Timer Ctrl Reg Blk */
u32 gpe0_blk; /* Port addr of General Purpose acpi_event 0 Reg Blk */
u32 gpe1_blk; /* Port addr of General Purpose acpi_event 1 Reg Blk */
u8 pm1_evt_len; /* Byte length of ports at pm1_x_evt_blk */
u8 pm1_cnt_len; /* Byte length of ports at pm1_x_cnt_blk */
u8 pm2_cnt_len; /* Byte Length of ports at pm2_cnt_blk */
u8 pm_tmr_len; /* Byte Length of ports at pm_tm_blk */
u8 gpe0_blk_len; /* Byte Length of ports at gpe0_blk */
u8 gpe1_blk_len; /* Byte Length of ports at gpe1_blk */
u8 gpe1_base; /* Offset in gpe model where gpe1 events start */
u8 reserved3; /* Reserved */
u16 plvl2_lat; /* Worst case HW latency to enter/exit C2 state */
u16 plvl3_lat; /* Worst case HW latency to enter/exit C3 state */
u16 flush_size; /* Size of area read to flush caches */
u16 flush_stride; /* Stride used in flushing caches */
u8 duty_offset; /* Bit location of duty cycle field in p_cnt reg */
u8 duty_width; /* Bit width of duty cycle field in p_cnt reg */
u8 day_alrm; /* Index to day-of-month alarm in RTC CMOS RAM */
u8 mon_alrm; /* Index to month-of-year alarm in RTC CMOS RAM */
u8 century; /* Index to century in RTC CMOS RAM */
u8 reserved4; /* Reserved */
u8 reserved4a; /* Reserved */
u8 reserved4b; /* Reserved */
};
struct facs_descriptor_rev1
{
u32 signature; /* ACPI Signature */
u32 length; /* Length of structure, in bytes */
u32 hardware_signature; /* Hardware configuration signature */
u32 firmware_waking_vector; /* ACPI OS waking vector */
u32 global_lock; /* Global Lock */
u32 S4bios_f : 1; /* Indicates if S4BIOS support is present */
u32 reserved1 : 31; /* Must be 0 */
u8 reserved3 [40]; /* Reserved - must be zero */
};
void* find_acpi_table_addr(u32 sig);
#endif

View file

@ -0,0 +1,138 @@
#ifndef _ASM_X86_APICDEF_H
#define _ASM_X86_APICDEF_H
/*
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
* Alan Cox <Alan.Cox@linux.org>, 1995.
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
#define APIC_BSP (1UL << 8)
#define APIC_EXTD (1UL << 10)
#define APIC_EN (1UL << 11)
#define APIC_ID 0x20
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x) & 0xFFu)
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
#ifdef CONFIG_X86_32
# define APIC_INTEGRATED(x) ((x) & 0xF0u)
#else
# define APIC_INTEGRATED(x) (1)
#endif
#define APIC_XAPIC(x) ((x) >= 0x14)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
#define APIC_ARBPRI 0x90
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
#define APIC_EIO_ACK 0x0
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu << 24)
#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
#define APIC_ESR_SEND_CS 0x00001
#define APIC_ESR_RECV_CS 0x00002
#define APIC_ESR_SEND_ACC 0x00004
#define APIC_ESR_RECV_ACC 0x00008
#define APIC_ESR_SENDILL 0x00020
#define APIC_ESR_RECVILL 0x00040
#define APIC_ESR_ILLREGA 0x00080
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
#define APIC_ICR_RR_MASK 0x30000
#define APIC_ICR_RR_INVALID 0x00000
#define APIC_ICR_RR_INPROG 0x10000
#define APIC_ICR_RR_VALID 0x20000
#define APIC_INT_LEVELTRIG 0x08000
#define APIC_INT_ASSERT 0x04000
#define APIC_ICR_BUSY 0x01000
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
#define APIC_DM_NMI 0x00400
#define APIC_DM_INIT 0x00500
#define APIC_DM_STARTUP 0x00600
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
#define SET_APIC_DEST_FIELD(x) ((x) << 24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
#define SET_APIC_TIMER_BASE(x) (((x) << 18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
#define APIC_LVT_TIMER_ONESHOT (0 << 17)
#define APIC_LVT_TIMER_PERIODIC (1 << 17)
#define APIC_LVT_TIMER_TSCDEADLINE (2 << 17)
#define APIC_LVT_MASKED (1 << 16)
#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
#define APIC_LVT_REMOTE_IRR (1 << 14)
#define APIC_INPUT_POLARITY (1 << 13)
#define APIC_SEND_PENDING (1 << 12)
#define APIC_MODE_MASK 0x700
#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
#define APIC_LVT1 0x360
#define APIC_LVTERR 0x370
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
#define APIC_TDR_DIV_8 0x2
#define APIC_TDR_DIV_16 0x3
#define APIC_TDR_DIV_32 0x8
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_EILVT0 0x500
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
#define APIC_EILVT_MASKED (1 << 16)
#define APIC_EILVT1 0x510
#define APIC_EILVT2 0x520
#define APIC_EILVT3 0x530
#define APIC_BASE_MSR 0x800
#endif /* _ASM_X86_APICDEF_H */

View file

@ -0,0 +1,208 @@
#include "libcflat.h"
#include "apic.h"
#include "msr.h"
#include "processor.h"
static void *g_apic = (void *)0xfee00000;
static void *g_ioapic = (void *)0xfec00000;
struct apic_ops {
u32 (*reg_read)(unsigned reg);
void (*reg_write)(unsigned reg, u32 val);
void (*icr_write)(u32 val, u32 dest);
u32 (*id)(void);
};
static void outb(unsigned char data, unsigned short port)
{
asm volatile ("out %0, %1" : : "a"(data), "d"(port));
}
void eoi(void)
{
apic_write(APIC_EOI, 0);
}
static u32 xapic_read(unsigned reg)
{
return *(volatile u32 *)(g_apic + reg);
}
static void xapic_write(unsigned reg, u32 val)
{
*(volatile u32 *)(g_apic + reg) = val;
}
static void xapic_icr_write(u32 val, u32 dest)
{
while (xapic_read(APIC_ICR) & APIC_ICR_BUSY)
;
xapic_write(APIC_ICR2, dest << 24);
xapic_write(APIC_ICR, val);
}
static uint32_t xapic_id(void)
{
return xapic_read(APIC_ID) >> 24;
}
static const struct apic_ops xapic_ops = {
.reg_read = xapic_read,
.reg_write = xapic_write,
.icr_write = xapic_icr_write,
.id = xapic_id,
};
static const struct apic_ops *apic_ops = &xapic_ops;
static u32 x2apic_read(unsigned reg)
{
unsigned a, d;
asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(APIC_BASE_MSR + reg/16));
return a | (u64)d << 32;
}
static void x2apic_write(unsigned reg, u32 val)
{
asm volatile ("wrmsr" : : "a"(val), "d"(0), "c"(APIC_BASE_MSR + reg/16));
}
static void x2apic_icr_write(u32 val, u32 dest)
{
asm volatile ("wrmsr" : : "a"(val), "d"(dest),
"c"(APIC_BASE_MSR + APIC_ICR/16));
}
static uint32_t x2apic_id(void)
{
return x2apic_read(APIC_ID);
}
static const struct apic_ops x2apic_ops = {
.reg_read = x2apic_read,
.reg_write = x2apic_write,
.icr_write = x2apic_icr_write,
.id = x2apic_id,
};
u32 apic_read(unsigned reg)
{
return apic_ops->reg_read(reg);
}
void apic_write(unsigned reg, u32 val)
{
apic_ops->reg_write(reg, val);
}
bool apic_read_bit(unsigned reg, int n)
{
reg += (n >> 5) << 4;
n &= 31;
return (apic_read(reg) & (1 << n)) != 0;
}
void apic_icr_write(u32 val, u32 dest)
{
apic_ops->icr_write(val, dest);
}
uint32_t apic_id(void)
{
return apic_ops->id();
}
uint8_t apic_get_tpr(void)
{
unsigned long tpr;
#ifdef __x86_64__
asm volatile ("mov %%cr8, %0" : "=r"(tpr));
#else
tpr = apic_read(APIC_TASKPRI) >> 4;
#endif
return tpr;
}
void apic_set_tpr(uint8_t tpr)
{
#ifdef __x86_64__
asm volatile ("mov %0, %%cr8" : : "r"((unsigned long) tpr));
#else
apic_write(APIC_TASKPRI, tpr << 4);
#endif
}
int enable_x2apic(void)
{
unsigned a, b, c, d;
asm ("cpuid" : "=a"(a), "=b"(b), "=c"(c), "=d"(d) : "0"(1));
if (c & (1 << 21)) {
asm ("rdmsr" : "=a"(a), "=d"(d) : "c"(MSR_IA32_APICBASE));
a |= 1 << 10;
asm ("wrmsr" : : "a"(a), "d"(d), "c"(MSR_IA32_APICBASE));
apic_ops = &x2apic_ops;
return 1;
} else {
return 0;
}
}
void reset_apic(void)
{
u64 disabled = rdmsr(MSR_IA32_APICBASE) & ~(APIC_EN | APIC_EXTD);
wrmsr(MSR_IA32_APICBASE, disabled);
apic_ops = &xapic_ops;
wrmsr(MSR_IA32_APICBASE, disabled | APIC_EN);
}
u32 ioapic_read_reg(unsigned reg)
{
*(volatile u32 *)g_ioapic = reg;
return *(volatile u32 *)(g_ioapic + 0x10);
}
void ioapic_write_reg(unsigned reg, u32 value)
{
*(volatile u32 *)g_ioapic = reg;
*(volatile u32 *)(g_ioapic + 0x10) = value;
}
void ioapic_write_redir(unsigned line, ioapic_redir_entry_t e)
{
ioapic_write_reg(0x10 + line * 2 + 0, ((u32 *)&e)[0]);
ioapic_write_reg(0x10 + line * 2 + 1, ((u32 *)&e)[1]);
}
ioapic_redir_entry_t ioapic_read_redir(unsigned line)
{
ioapic_redir_entry_t e;
((u32 *)&e)[0] = ioapic_read_reg(0x10 + line * 2 + 0);
((u32 *)&e)[1] = ioapic_read_reg(0x10 + line * 2 + 1);
return e;
}
void set_mask(unsigned line, int mask)
{
ioapic_redir_entry_t e = ioapic_read_redir(line);
e.mask = mask;
ioapic_write_redir(line, e);
}
void enable_apic(void)
{
printf("enabling apic\n");
xapic_write(0xf0, 0x1ff); /* spurious vector register */
}
void mask_pic_interrupts(void)
{
outb(0xff, 0x21);
outb(0xff, 0xa1);
}

View file

@ -0,0 +1,50 @@
#ifndef CFLAT_APIC_H
#define CFLAT_APIC_H
#include <stdint.h>
#include "apic-defs.h"
typedef struct {
uint8_t vector;
uint8_t delivery_mode:3;
uint8_t dest_mode:1;
uint8_t delivery_status:1;
uint8_t polarity:1;
uint8_t remote_irr:1;
uint8_t trig_mode:1;
uint8_t mask:1;
uint8_t reserve:7;
uint8_t reserved[4];
uint8_t dest_id;
} ioapic_redir_entry_t;
typedef enum trigger_mode {
TRIGGER_EDGE = 0,
TRIGGER_LEVEL,
TRIGGER_MAX,
} trigger_mode_t;
void mask_pic_interrupts(void);
void eoi(void);
uint8_t apic_get_tpr(void);
void apic_set_tpr(uint8_t tpr);
void ioapic_write_redir(unsigned line, ioapic_redir_entry_t e);
void ioapic_write_reg(unsigned reg, uint32_t value);
ioapic_redir_entry_t ioapic_read_redir(unsigned line);
uint32_t ioapic_read_reg(unsigned reg);
void set_mask(unsigned line, int mask);
void enable_apic(void);
uint32_t apic_read(unsigned reg);
bool apic_read_bit(unsigned reg, int n);
void apic_write(unsigned reg, uint32_t val);
void apic_icr_write(uint32_t val, uint32_t dest);
uint32_t apic_id(void);
int enable_x2apic(void);
void reset_apic(void);
#endif

View file

@ -0,0 +1,27 @@
#ifndef _ASM_X86_BARRIER_H_
#define _ASM_X86_BARRIER_H_
/*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence":::"memory")
#define smp_rmb() barrier()
#define smp_wmb() barrier()
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
{
asm volatile("rep; nop" ::: "memory");
}
static inline void cpu_relax(void)
{
rep_nop();
}
#endif

View file

@ -0,0 +1,14 @@
#ifndef _ASMX86_BITOPS_H_
#define _ASMX86_BITOPS_H_
#ifndef _BITOPS_H_
#error only <bitops.h> can be included directly
#endif
#ifdef __x86_64__
#define BITS_PER_LONG 64
#else
#define BITS_PER_LONG 32
#endif
#endif

View file

@ -0,0 +1,65 @@
#ifndef _ASM_X86_IO_H_
#define _ASM_X86_IO_H_
#define __iomem
#define inb inb
static inline uint8_t inb(unsigned long port)
{
unsigned char value;
asm volatile("inb %w1, %0" : "=a" (value) : "Nd" ((unsigned short)port));
return value;
}
#define inw inw
static inline uint16_t inw(unsigned long port)
{
unsigned short value;
asm volatile("inw %w1, %0" : "=a" (value) : "Nd" ((unsigned short)port));
return value;
}
#define inl inl
static inline uint32_t inl(unsigned long port)
{
unsigned int value;
asm volatile("inl %w1, %0" : "=a" (value) : "Nd" ((unsigned short)port));
return value;
}
#define outb outb
static inline void outb(uint8_t value, unsigned long port)
{
asm volatile("outb %b0, %w1" : : "a"(value), "Nd"((unsigned short)port));
}
#define outw outw
static inline void outw(uint16_t value, unsigned long port)
{
asm volatile("outw %w0, %w1" : : "a"(value), "Nd"((unsigned short)port));
}
#define outl outl
static inline void outl(uint32_t value, unsigned long port)
{
asm volatile("outl %0, %w1" : : "a"(value), "Nd"((unsigned short)port));
}
#define virt_to_phys virt_to_phys
static inline unsigned long virt_to_phys(const void *virt)
{
return (unsigned long)virt;
}
#define phys_to_virt phys_to_virt
static inline void *phys_to_virt(unsigned long phys)
{
return (void *)phys;
}
#define ioremap ioremap
void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
#include <asm-generic/io.h>
#endif

View file

@ -0,0 +1,48 @@
#ifndef _ASM_X86_PAGE_H_
#define _ASM_X86_PAGE_H_
/*
* Copyright (C) 2016, Red Hat Inc, Alexander Gordeev <agordeev@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include <linux/const.h>
#include <bitops.h>
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifndef __ASSEMBLY__
#ifdef __x86_64__
#define LARGE_PAGE_SIZE (512 * PAGE_SIZE)
#else
#define LARGE_PAGE_SIZE (1024 * PAGE_SIZE)
#endif
#define PT_PRESENT_MASK (1ull << 0)
#define PT_WRITABLE_MASK (1ull << 1)
#define PT_USER_MASK (1ull << 2)
#define PT_ACCESSED_MASK (1ull << 5)
#define PT_DIRTY_MASK (1ull << 6)
#define PT_PAGE_SIZE_MASK (1ull << 7)
#define PT64_NX_MASK (1ull << 63)
#define PT_ADDR_MASK GENMASK_ULL(51, 12)
#ifdef __x86_64__
#define PAGE_LEVEL 4
#define PGDIR_WIDTH 9
#define PGDIR_MASK 511
#else
#define PAGE_LEVEL 2
#define PGDIR_WIDTH 10
#define PGDIR_MASK 1023
#endif
#define PGDIR_BITS(lvl) (((lvl) - 1) * PGDIR_WIDTH + PAGE_SHIFT)
#define PGDIR_OFFSET(va, lvl) (((va) >> PGDIR_BITS(lvl)) & PGDIR_MASK)
#endif /* !__ASSEMBLY__ */
#endif

View file

@ -0,0 +1,59 @@
#ifndef ASM_PCI_H
#define ASM_PCI_H
/*
* Copyright (C) 2013, Red Hat Inc, Michael S. Tsirkin <mst@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#include "pci.h"
#include "x86/asm/io.h"
#define PCI_CONF1_ADDRESS(dev, reg) ((0x1 << 31) | (dev << 8) | reg)
static inline uint8_t pci_config_readb(pcidevaddr_t dev, uint8_t reg)
{
outl(PCI_CONF1_ADDRESS(dev, reg), 0xCF8);
return inb(0xCFC);
}
static inline uint16_t pci_config_readw(pcidevaddr_t dev, uint8_t reg)
{
outl(PCI_CONF1_ADDRESS(dev, reg), 0xCF8);
return inw(0xCFC);
}
static inline uint32_t pci_config_readl(pcidevaddr_t dev, uint8_t reg)
{
outl(PCI_CONF1_ADDRESS(dev, reg), 0xCF8);
return inl(0xCFC);
}
static inline void pci_config_writeb(pcidevaddr_t dev, uint8_t reg,
uint8_t val)
{
outl(PCI_CONF1_ADDRESS(dev, reg), 0xCF8);
outb(val, 0xCFC);
}
static inline void pci_config_writew(pcidevaddr_t dev, uint8_t reg,
uint16_t val)
{
outl(PCI_CONF1_ADDRESS(dev, reg), 0xCF8);
outw(val, 0xCFC);
}
static inline void pci_config_writel(pcidevaddr_t dev, uint8_t reg,
uint32_t val)
{
outl(PCI_CONF1_ADDRESS(dev, reg), 0xCF8);
outl(val, 0xCFC);
}
static inline
phys_addr_t pci_translate_addr(pcidevaddr_t dev __unused, uint64_t addr)
{
return addr;
}
#endif

View file

@ -0,0 +1,11 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
struct spinlock {
int v;
};
void spin_lock(struct spinlock *lock);
void spin_unlock(struct spinlock *lock);
#endif

View file

@ -0,0 +1,11 @@
#ifndef _X86ASM_STACK_H_
#define _X86ASM_STACK_H_
#ifndef _STACK_H_
#error Do not directly include <asm/stack.h>. Just use <stack.h>.
#endif
#define HAVE_ARCH_BACKTRACE_FRAME
#define HAVE_ARCH_BACKTRACE
#endif

View file

@ -0,0 +1,37 @@
#include <libcflat.h>
#include "atomic.h"
#ifdef __i386__
u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new)
{
u32 low = new;
u32 high = new >> 32;
asm volatile("lock cmpxchg8b %1\n"
: "+A" (old),
"+m" (*(volatile long long *)&v->counter)
: "b" (low), "c" (high)
: "memory"
);
return old;
}
#else
u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new)
{
u64 ret;
u64 _old = old;
u64 _new = new;
asm volatile("lock cmpxchgq %2,%1"
: "=a" (ret), "+m" (*(volatile long *)&v->counter)
: "r" (_new), "0" (_old)
: "memory"
);
return ret;
}
#endif

View file

@ -0,0 +1,166 @@
#ifndef __ATOMIC_H
#define __ATOMIC_H
#include "asm-generic/atomic.h"
typedef struct {
volatile int counter;
} atomic_t;
#ifdef __i386__
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return v->counter;
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile("lock incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile("lock decl %0"
: "+m" (v->counter));
}
typedef struct {
u64 __attribute__((aligned(8))) counter;
} atomic64_t;
#define ATOMIC64_INIT(val) { (val) }
/**
* atomic64_read - read atomic64 variable
* @ptr: pointer to type atomic64_t
*
* Atomically reads the value of @ptr and returns it.
*/
static inline u64 atomic64_read(atomic64_t *ptr)
{
u64 res;
/*
* Note, we inline this atomic64_t primitive because
* it only clobbers EAX/EDX and leaves the others
* untouched. We also (somewhat subtly) rely on the
* fact that cmpxchg8b returns the current 64-bit value
* of the memory location we are touching:
*/
asm volatile("mov %%ebx, %%eax\n\t"
"mov %%ecx, %%edx\n\t"
"lock cmpxchg8b %1\n"
: "=&A" (res)
: "m" (*ptr)
);
return res;
}
u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new);
#elif defined(__x86_64__)
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return v->counter;
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile("lock incl %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile("lock decl %0"
: "=m" (v->counter)
: "m" (v->counter));
}
typedef struct {
long long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
/**
* atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
static inline long atomic64_read(const atomic64_t *v)
{
return v->counter;
}
u64 atomic64_cmpxchg(atomic64_t *v, u64 old, u64 new);
#endif
#endif

View file

@ -0,0 +1,407 @@
#include "libcflat.h"
#include "desc.h"
#include "processor.h"
#include <setjmp.h>
void set_idt_entry(int vec, void *addr, int dpl)
{
idt_entry_t *e = &boot_idt[vec];
memset(e, 0, sizeof *e);
e->offset0 = (unsigned long)addr;
e->selector = read_cs();
e->ist = 0;
e->type = 14;
e->dpl = dpl;
e->p = 1;
e->offset1 = (unsigned long)addr >> 16;
#ifdef __x86_64__
e->offset2 = (unsigned long)addr >> 32;
#endif
}
void set_idt_dpl(int vec, u16 dpl)
{
idt_entry_t *e = &boot_idt[vec];
e->dpl = dpl;
}
void set_idt_sel(int vec, u16 sel)
{
idt_entry_t *e = &boot_idt[vec];
e->selector = sel;
}
struct ex_record {
unsigned long rip;
unsigned long handler;
};
extern struct ex_record exception_table_start, exception_table_end;
static const char* exception_mnemonic(int vector)
{
switch(vector) {
case 0: return "#DE";
case 1: return "#DB";
case 2: return "#NMI";
case 3: return "#BP";
case 4: return "#OF";
case 5: return "#BR";
case 6: return "#UD";
case 7: return "#NM";
case 8: return "#DF";
case 10: return "#TS";
case 11: return "#NP";
case 12: return "#SS";
case 13: return "#GP";
case 14: return "#PF";
case 16: return "#MF";
case 17: return "#AC";
case 18: return "#MC";
case 19: return "#XM";
default: return "#??";
}
}
static void unhandled_exception(struct ex_regs *regs, bool cpu)
{
printf("Unhandled %sexception %ld %s at ip %016lx\n",
cpu ? "cpu " : "", regs->vector,
exception_mnemonic(regs->vector), regs->rip);
if (regs->vector == 14)
printf("PF at 0x%lx addr 0x%lx\n", regs->rip, read_cr2());
printf("error_code=%04lx rflags=%08lx cs=%08lx\n"
"rax=%016lx rcx=%016lx rdx=%016lx rbx=%016lx\n"
"rbp=%016lx rsi=%016lx rdi=%016lx\n"
#ifdef __x86_64__
" r8=%016lx r9=%016lx r10=%016lx r11=%016lx\n"
"r12=%016lx r13=%016lx r14=%016lx r15=%016lx\n"
#endif
"cr0=%016lx cr2=%016lx cr3=%016lx cr4=%016lx\n"
#ifdef __x86_64__
"cr8=%016lx\n"
#endif
,
regs->error_code, regs->rflags, regs->cs,
regs->rax, regs->rcx, regs->rdx, regs->rbx,
regs->rbp, regs->rsi, regs->rdi,
#ifdef __x86_64__
regs->r8, regs->r9, regs->r10, regs->r11,
regs->r12, regs->r13, regs->r14, regs->r15,
#endif
read_cr0(), read_cr2(), read_cr3(), read_cr4()
#ifdef __x86_64__
, read_cr8()
#endif
);
dump_frame_stack((void*) regs->rip, (void*) regs->rbp);
abort();
}
static void check_exception_table(struct ex_regs *regs)
{
struct ex_record *ex;
unsigned ex_val;
ex_val = regs->vector | (regs->error_code << 16) |
(((regs->rflags >> 16) & 1) << 8);
asm("mov %0, %%gs:4" : : "r"(ex_val));
for (ex = &exception_table_start; ex != &exception_table_end; ++ex) {
if (ex->rip == regs->rip) {
regs->rip = ex->handler;
return;
}
}
unhandled_exception(regs, false);
}
static void (*exception_handlers[32])(struct ex_regs *regs);
void handle_exception(u8 v, void (*func)(struct ex_regs *regs))
{
if (v < 32)
exception_handlers[v] = func;
}
#ifndef __x86_64__
__attribute__((regparm(1)))
#endif
void do_handle_exception(struct ex_regs *regs)
{
if (regs->vector < 32 && exception_handlers[regs->vector]) {
exception_handlers[regs->vector](regs);
return;
}
unhandled_exception(regs, true);
}
#define EX(NAME, N) extern char NAME##_fault; \
asm (".pushsection .text \n\t" \
#NAME"_fault: \n\t" \
"push"W" $0 \n\t" \
"push"W" $"#N" \n\t" \
"jmp __handle_exception \n\t" \
".popsection")
#define EX_E(NAME, N) extern char NAME##_fault; \
asm (".pushsection .text \n\t" \
#NAME"_fault: \n\t" \
"push"W" $"#N" \n\t" \
"jmp __handle_exception \n\t" \
".popsection")
EX(de, 0);
EX(db, 1);
EX(nmi, 2);
EX(bp, 3);
EX(of, 4);
EX(br, 5);
EX(ud, 6);
EX(nm, 7);
EX_E(df, 8);
EX_E(ts, 10);
EX_E(np, 11);
EX_E(ss, 12);
EX_E(gp, 13);
EX_E(pf, 14);
EX(mf, 16);
EX_E(ac, 17);
EX(mc, 18);
EX(xm, 19);
asm (".pushsection .text \n\t"
"__handle_exception: \n\t"
#ifdef __x86_64__
"push %r15; push %r14; push %r13; push %r12 \n\t"
"push %r11; push %r10; push %r9; push %r8 \n\t"
#endif
"push %"R "di; push %"R "si; push %"R "bp; sub $"S", %"R "sp \n\t"
"push %"R "bx; push %"R "dx; push %"R "cx; push %"R "ax \n\t"
#ifdef __x86_64__
"mov %"R "sp, %"R "di \n\t"
#else
"mov %"R "sp, %"R "ax \n\t"
#endif
"call do_handle_exception \n\t"
"pop %"R "ax; pop %"R "cx; pop %"R "dx; pop %"R "bx \n\t"
"add $"S", %"R "sp; pop %"R "bp; pop %"R "si; pop %"R "di \n\t"
#ifdef __x86_64__
"pop %r8; pop %r9; pop %r10; pop %r11 \n\t"
"pop %r12; pop %r13; pop %r14; pop %r15 \n\t"
#endif
"add $"S", %"R "sp \n\t"
"add $"S", %"R "sp \n\t"
"iret"W" \n\t"
".popsection");
static void *idt_handlers[32] = {
[0] = &de_fault,
[1] = &db_fault,
[2] = &nmi_fault,
[3] = &bp_fault,
[4] = &of_fault,
[5] = &br_fault,
[6] = &ud_fault,
[7] = &nm_fault,
[8] = &df_fault,
[10] = &ts_fault,
[11] = &np_fault,
[12] = &ss_fault,
[13] = &gp_fault,
[14] = &pf_fault,
[16] = &mf_fault,
[17] = &ac_fault,
[18] = &mc_fault,
[19] = &xm_fault,
};
void setup_idt(void)
{
int i;
static bool idt_initialized = false;
if (idt_initialized) {
return;
}
idt_initialized = true;
for (i = 0; i < 32; i++)
if (idt_handlers[i])
set_idt_entry(i, idt_handlers[i], 0);
handle_exception(0, check_exception_table);
handle_exception(6, check_exception_table);
handle_exception(13, check_exception_table);
}
unsigned exception_vector(void)
{
unsigned char vector;
asm("movb %%gs:4, %0" : "=q"(vector));
return vector;
}
unsigned exception_error_code(void)
{
unsigned short error_code;
asm("mov %%gs:6, %0" : "=rm"(error_code));
return error_code;
}
bool exception_rflags_rf(void)
{
unsigned char rf_flag;
asm("movb %%gs:5, %b0" : "=q"(rf_flag));
return rf_flag & 1;
}
static char intr_alt_stack[4096];
#ifndef __x86_64__
/*
* GDT, with 6 entries:
* 0x00 - NULL descriptor
* 0x08 - Code segment (ring 0)
* 0x10 - Data segment (ring 0)
* 0x18 - Not present code segment (ring 0)
* 0x20 - Code segment (ring 3)
* 0x28 - Data segment (ring 3)
* 0x30 - Interrupt task
* 0x38 to 0x78 - Free to use for test cases
* 0x80 - Primary task (CPU 0)
*/
void set_gdt_entry(int sel, u32 base, u32 limit, u8 access, u8 gran)
{
int num = sel >> 3;
/* Setup the descriptor base address */
gdt32[num].base_low = (base & 0xFFFF);
gdt32[num].base_middle = (base >> 16) & 0xFF;
gdt32[num].base_high = (base >> 24) & 0xFF;
/* Setup the descriptor limits */
gdt32[num].limit_low = (limit & 0xFFFF);
gdt32[num].granularity = ((limit >> 16) & 0x0F);
/* Finally, set up the granularity and access flags */
gdt32[num].granularity |= (gran & 0xF0);
gdt32[num].access = access;
}
void set_gdt_task_gate(u16 sel, u16 tss_sel)
{
set_gdt_entry(sel, tss_sel, 0, 0x85, 0); // task, present
}
void set_idt_task_gate(int vec, u16 sel)
{
idt_entry_t *e = &boot_idt[vec];
memset(e, 0, sizeof *e);
e->selector = sel;
e->ist = 0;
e->type = 5;
e->dpl = 0;
e->p = 1;
}
/*
* 0 - main task
* 1 - interrupt task
*/
tss32_t tss_intr;
void setup_tss32(void)
{
u16 desc_size = sizeof(tss32_t);
tss.cr3 = read_cr3();
tss_intr.cr3 = read_cr3();
tss_intr.ss0 = tss_intr.ss1 = tss_intr.ss2 = 0x10;
tss_intr.esp = tss_intr.esp0 = tss_intr.esp1 = tss_intr.esp2 =
(u32)intr_alt_stack + 4096;
tss_intr.cs = 0x08;
tss_intr.ds = tss_intr.es = tss_intr.fs = tss_intr.gs = tss_intr.ss = 0x10;
tss_intr.iomap_base = (u16)desc_size;
set_gdt_entry(TSS_INTR, (u32)&tss_intr, desc_size - 1, 0x89, 0x0f);
}
void set_intr_task_gate(int e, void *fn)
{
tss_intr.eip = (u32)fn;
set_idt_task_gate(e, TSS_INTR);
}
void setup_alt_stack(void)
{
setup_tss32();
}
void set_intr_alt_stack(int e, void *fn)
{
set_intr_task_gate(e, fn);
}
void print_current_tss_info(void)
{
u16 tr = str();
if (tr != TSS_MAIN && tr != TSS_INTR)
printf("Unknown TSS %x\n", tr);
else
printf("TR=%x (%s) Main TSS back link %x. Intr TSS back link %x\n",
tr, tr ? "interrupt" : "main", tss.prev, tss_intr.prev);
}
#else
void set_intr_alt_stack(int e, void *addr)
{
set_idt_entry(e, addr, 0);
boot_idt[e].ist = 1;
}
void setup_alt_stack(void)
{
tss.ist1 = (u64)intr_alt_stack + 4096;
}
#endif
static bool exception;
static jmp_buf *exception_jmpbuf;
static void exception_handler_longjmp(void)
{
longjmp(*exception_jmpbuf, 1);
}
static void exception_handler(struct ex_regs *regs)
{
/* longjmp must happen after iret, so do not do it now. */
exception = true;
regs->rip = (unsigned long)&exception_handler_longjmp;
}
bool test_for_exception(unsigned int ex, void (*trigger_func)(void *data),
void *data)
{
jmp_buf jmpbuf;
int ret;
handle_exception(ex, exception_handler);
ret = set_exception_jmpbuf(jmpbuf);
if (ret == 0)
trigger_func(data);
handle_exception(ex, NULL);
return ret;
}
void __set_exception_jmpbuf(jmp_buf *addr)
{
exception_jmpbuf = addr;
}

View file

@ -0,0 +1,164 @@
#ifndef __IDT_TEST__
#define __IDT_TEST__
#include <setjmp.h>
void setup_idt(void);
void setup_alt_stack(void);
struct ex_regs {
unsigned long rax, rcx, rdx, rbx;
unsigned long dummy, rbp, rsi, rdi;
#ifdef __x86_64__
unsigned long r8, r9, r10, r11;
unsigned long r12, r13, r14, r15;
#endif
unsigned long vector;
unsigned long error_code;
unsigned long rip;
unsigned long cs;
unsigned long rflags;
};
typedef struct {
u16 prev;
u16 res1;
u32 esp0;
u16 ss0;
u16 res2;
u32 esp1;
u16 ss1;
u16 res3;
u32 esp2;
u16 ss2;
u16 res4;
u32 cr3;
u32 eip;
u32 eflags;
u32 eax, ecx, edx, ebx, esp, ebp, esi, edi;
u16 es;
u16 res5;
u16 cs;
u16 res6;
u16 ss;
u16 res7;
u16 ds;
u16 res8;
u16 fs;
u16 res9;
u16 gs;
u16 res10;
u16 ldt;
u16 res11;
u16 t:1;
u16 res12:15;
u16 iomap_base;
} tss32_t;
typedef struct __attribute__((packed)) {
u32 res1;
u64 rsp0;
u64 rsp1;
u64 rsp2;
u64 res2;
u64 ist1;
u64 ist2;
u64 ist3;
u64 ist4;
u64 ist5;
u64 ist6;
u64 ist7;
u64 res3;
u16 res4;
u16 iomap_base;
} tss64_t;
#define ASM_TRY(catch) \
"movl $0, %%gs:4 \n\t" \
".pushsection .data.ex \n\t" \
".quad 1111f, " catch "\n\t" \
".popsection \n\t" \
"1111:"
#define DB_VECTOR 1
#define BP_VECTOR 3
#define UD_VECTOR 6
#define GP_VECTOR 13
#define KERNEL_CS 0x08
#define KERNEL_DS 0x10
#define NP_SEL 0x18
#define USER_CS 0x23
#define USER_DS 0x2b
#ifdef __x86_64__
#define KERNEL_CS64 KERNEL_CS
#define KERNEL_DS64 KERNEL_DS
#define KERNEL_CS32 0x30
#define KERNEL_DS32 0x38
#define KERNEL_CS16 0x40
#define KERNEL_DS16 0x48
#else
#define KERNEL_CS32 KERNEL_CS
#define KERNEL_DS32 KERNEL_DS
#endif
#define TSS_INTR 0x50
#define FIRST_SPARE_SEL 0x58
#define TSS_MAIN 0x80
typedef struct {
unsigned short offset0;
unsigned short selector;
unsigned short ist : 3;
unsigned short : 5;
unsigned short type : 4;
unsigned short : 1;
unsigned short dpl : 2;
unsigned short p : 1;
unsigned short offset1;
#ifdef __x86_64__
unsigned offset2;
unsigned reserved;
#endif
} idt_entry_t;
typedef struct {
u16 limit_low;
u16 base_low;
u8 base_middle;
u8 access;
u8 granularity;
u8 base_high;
} gdt_entry_t;
extern idt_entry_t boot_idt[256];
#ifndef __x86_64__
extern gdt_entry_t gdt32[];
extern tss32_t tss;
extern tss32_t tss_intr;
void set_gdt_task_gate(u16 tss_sel, u16 sel);
void set_idt_task_gate(int vec, u16 sel);
void set_intr_task_gate(int vec, void *fn);
void setup_tss32(void);
#else
extern tss64_t tss;
#endif
unsigned exception_vector(void);
unsigned exception_error_code(void);
bool exception_rflags_rf(void);
void set_idt_entry(int vec, void *addr, int dpl);
void set_idt_sel(int vec, u16 sel);
void set_idt_dpl(int vec, u16 dpl);
void set_gdt_entry(int sel, u32 base, u32 limit, u8 access, u8 gran);
void set_intr_alt_stack(int e, void *fn);
void print_current_tss_info(void);
void handle_exception(u8 v, void (*func)(struct ex_regs *regs));
bool test_for_exception(unsigned int ex, void (*trigger_func)(void *data),
void *data);
void __set_exception_jmpbuf(jmp_buf *addr);
#define set_exception_jmpbuf(jmpbuf) \
(setjmp(jmpbuf) ? : (__set_exception_jmpbuf(&(jmpbuf)), 0))
#endif

View file

@ -0,0 +1,14 @@
#ifndef SILLY_APIC_H
#define SILLY_APIC_H
#define APIC_BASE 0x1000
#define APIC_SIZE 0x100
#define APIC_REG_NCPU 0x00
#define APIC_REG_ID 0x04
#define APIC_REG_SIPI_ADDR 0x08
#define APIC_REG_SEND_SIPI 0x0c
#define APIC_REG_IPI_VECTOR 0x10
#define APIC_REG_SEND_IPI 0x14
#endif

View file

@ -0,0 +1,45 @@
#include "fwcfg.h"
#include "smp.h"
static struct spinlock lock;
uint64_t fwcfg_get_u(uint16_t index, int bytes)
{
uint64_t r = 0;
uint8_t b;
int i;
spin_lock(&lock);
asm volatile ("out %0, %1" : : "a"(index), "d"((uint16_t)BIOS_CFG_IOPORT));
for (i = 0; i < bytes; ++i) {
asm volatile ("in %1, %0" : "=a"(b) : "d"((uint16_t)(BIOS_CFG_IOPORT + 1)));
r |= (uint64_t)b << (i * 8);
}
spin_unlock(&lock);
return r;
}
uint8_t fwcfg_get_u8(unsigned index)
{
return fwcfg_get_u(index, 1);
}
uint16_t fwcfg_get_u16(unsigned index)
{
return fwcfg_get_u(index, 2);
}
uint32_t fwcfg_get_u32(unsigned index)
{
return fwcfg_get_u(index, 4);
}
uint64_t fwcfg_get_u64(unsigned index)
{
return fwcfg_get_u(index, 8);
}
unsigned fwcfg_get_nb_cpus(void)
{
return fwcfg_get_u16(FW_CFG_NB_CPUS);
}

View file

@ -0,0 +1,44 @@
#ifndef FWCFG_H
#define FWCFG_H
#include <stdint.h>
#define FW_CFG_SIGNATURE 0x00
#define FW_CFG_ID 0x01
#define FW_CFG_UUID 0x02
#define FW_CFG_RAM_SIZE 0x03
#define FW_CFG_NOGRAPHIC 0x04
#define FW_CFG_NB_CPUS 0x05
#define FW_CFG_MACHINE_ID 0x06
#define FW_CFG_KERNEL_ADDR 0x07
#define FW_CFG_KERNEL_SIZE 0x08
#define FW_CFG_KERNEL_CMDLINE 0x09
#define FW_CFG_INITRD_ADDR 0x0a
#define FW_CFG_INITRD_SIZE 0x0b
#define FW_CFG_BOOT_DEVICE 0x0c
#define FW_CFG_NUMA 0x0d
#define FW_CFG_BOOT_MENU 0x0e
#define FW_CFG_MAX_CPUS 0x0f
#define FW_CFG_MAX_ENTRY 0x10
#define FW_CFG_WRITE_CHANNEL 0x4000
#define FW_CFG_ARCH_LOCAL 0x8000
#define FW_CFG_ENTRY_MASK ~(FW_CFG_WRITE_CHANNEL | FW_CFG_ARCH_LOCAL)
#define FW_CFG_INVALID 0xffff
#define BIOS_CFG_IOPORT 0x510
#define FW_CFG_ACPI_TABLES (FW_CFG_ARCH_LOCAL + 0)
#define FW_CFG_SMBIOS_ENTRIES (FW_CFG_ARCH_LOCAL + 1)
#define FW_CFG_IRQ0_OVERRIDE (FW_CFG_ARCH_LOCAL + 2)
uint8_t fwcfg_get_u8(unsigned index);
uint16_t fwcfg_get_u16(unsigned index);
uint32_t fwcfg_get_u32(unsigned index);
uint64_t fwcfg_get_u64(unsigned index);
unsigned fwcfg_get_nb_cpus(void);
#endif

View file

@ -0,0 +1,372 @@
/*
* Intel IOMMU APIs
*
* Copyright (C) 2016 Red Hat, Inc.
*
* Authors:
* Peter Xu <peterx@redhat.com>,
*
* This work is licensed under the terms of the GNU LGPL, version 2 or
* later.
*/
#include "intel-iommu.h"
#include "libcflat.h"
#include "pci.h"
#include "atomic.h"
/*
* VT-d in QEMU currently only support 39 bits address width, which is
* 3-level translation.
*/
#define VTD_PAGE_LEVEL 3
#define VTD_CE_AW_39BIT 0x1
typedef uint64_t vtd_pte_t;
struct vtd_root_entry {
/* Quad 1 */
uint64_t present:1;
uint64_t __reserved:11;
uint64_t context_table_p:52;
/* Quad 2 */
uint64_t __reserved_2;
} __attribute__ ((packed));
typedef struct vtd_root_entry vtd_re_t;
struct vtd_context_entry {
/* Quad 1 */
uint64_t present:1;
uint64_t disable_fault_report:1;
uint64_t trans_type:2;
uint64_t __reserved:8;
uint64_t slptptr:52;
/* Quad 2 */
uint64_t addr_width:3;
uint64_t __ignore:4;
uint64_t __reserved_2:1;
uint64_t domain_id:16;
uint64_t __reserved_3:40;
} __attribute__ ((packed));
typedef struct vtd_context_entry vtd_ce_t;
struct vtd_irte {
uint32_t present:1;
uint32_t fault_disable:1; /* Fault Processing Disable */
uint32_t dest_mode:1; /* Destination Mode */
uint32_t redir_hint:1; /* Redirection Hint */
uint32_t trigger_mode:1; /* Trigger Mode */
uint32_t delivery_mode:3; /* Delivery Mode */
uint32_t __avail:4; /* Available spaces for software */
uint32_t __reserved_0:3; /* Reserved 0 */
uint32_t irte_mode:1; /* IRTE Mode */
uint32_t vector:8; /* Interrupt Vector */
uint32_t __reserved_1:8; /* Reserved 1 */
uint32_t dest_id; /* Destination ID */
uint16_t source_id:16; /* Source-ID */
uint64_t sid_q:2; /* Source-ID Qualifier */
uint64_t sid_vtype:2; /* Source-ID Validation Type */
uint64_t __reserved_2:44; /* Reserved 2 */
} __attribute__ ((packed));
typedef struct vtd_irte vtd_irte_t;
#define VTD_RTA_MASK (PAGE_MASK)
#define VTD_IRTA_MASK (PAGE_MASK)
void *vtd_reg_base;
static uint64_t vtd_root_table(void)
{
/* No extend root table support yet */
return vtd_readq(DMAR_RTADDR_REG) & VTD_RTA_MASK;
}
static uint64_t vtd_ir_table(void)
{
return vtd_readq(DMAR_IRTA_REG) & VTD_IRTA_MASK;
}
static void vtd_gcmd_or(uint32_t cmd)
{
uint32_t status;
/* We only allow set one bit for each time */
assert(is_power_of_2(cmd));
status = vtd_readl(DMAR_GSTS_REG);
vtd_writel(DMAR_GCMD_REG, status | cmd);
if (cmd & VTD_GCMD_ONE_SHOT_BITS) {
/* One-shot bits are taking effect immediately */
return;
}
/* Make sure IOMMU handled our command request */
while (!(vtd_readl(DMAR_GSTS_REG) & cmd))
cpu_relax();
}
static void vtd_dump_init_info(void)
{
uint32_t version;
version = vtd_readl(DMAR_VER_REG);
/* Major version >= 1 */
assert(((version >> 3) & 0xf) >= 1);
printf("VT-d version: 0x%x\n", version);
printf(" cap: 0x%016lx\n", vtd_readq(DMAR_CAP_REG));
printf(" ecap: 0x%016lx\n", vtd_readq(DMAR_ECAP_REG));
}
static void vtd_setup_root_table(void)
{
void *root = alloc_page();
memset(root, 0, PAGE_SIZE);
vtd_writeq(DMAR_RTADDR_REG, virt_to_phys(root));
vtd_gcmd_or(VTD_GCMD_ROOT);
printf("DMAR table address: 0x%016lx\n", vtd_root_table());
}
static void vtd_setup_ir_table(void)
{
void *root = alloc_page();
memset(root, 0, PAGE_SIZE);
/* 0xf stands for table size (2^(0xf+1) == 65536) */
vtd_writeq(DMAR_IRTA_REG, virt_to_phys(root) | 0xf);
vtd_gcmd_or(VTD_GCMD_IR_TABLE);
printf("IR table address: 0x%016lx\n", vtd_ir_table());
}
static void vtd_install_pte(vtd_pte_t *root, iova_t iova,
phys_addr_t pa, int level_target)
{
int level;
unsigned int offset;
void *page;
for (level = VTD_PAGE_LEVEL; level > level_target; level--) {
offset = PGDIR_OFFSET(iova, level);
if (!(root[offset] & VTD_PTE_RW)) {
page = alloc_page();
memset(page, 0, PAGE_SIZE);
root[offset] = virt_to_phys(page) | VTD_PTE_RW;
}
root = (uint64_t *)(phys_to_virt(root[offset] &
VTD_PTE_ADDR));
}
offset = PGDIR_OFFSET(iova, level);
root[offset] = pa | VTD_PTE_RW;
if (level != 1) {
/* This is huge page */
root[offset] |= VTD_PTE_HUGE;
}
}
/**
* vtd_map_range: setup IO address mapping for specific memory range
*
* @sid: source ID of the device to setup
* @iova: start IO virtual address
* @pa: start physical address
* @size: size of the mapping area
*/
void vtd_map_range(uint16_t sid, iova_t iova, phys_addr_t pa, size_t size)
{
uint8_t bus_n, devfn;
void *slptptr;
vtd_ce_t *ce;
vtd_re_t *re = phys_to_virt(vtd_root_table());
assert(IS_ALIGNED(iova, SZ_4K));
assert(IS_ALIGNED(pa, SZ_4K));
assert(IS_ALIGNED(size, SZ_4K));
bus_n = PCI_BDF_GET_BUS(sid);
devfn = PCI_BDF_GET_DEVFN(sid);
/* Point to the correct root entry */
re += bus_n;
if (!re->present) {
ce = alloc_page();
memset(ce, 0, PAGE_SIZE);
memset(re, 0, sizeof(*re));
re->context_table_p = virt_to_phys(ce) >> VTD_PAGE_SHIFT;
re->present = 1;
printf("allocated vt-d root entry for PCI bus %d\n",
bus_n);
} else
ce = phys_to_virt(re->context_table_p << VTD_PAGE_SHIFT);
/* Point to the correct context entry */
ce += devfn;
if (!ce->present) {
slptptr = alloc_page();
memset(slptptr, 0, PAGE_SIZE);
memset(ce, 0, sizeof(*ce));
/* To make it simple, domain ID is the same as SID */
ce->domain_id = sid;
/* We only test 39 bits width case (3-level paging) */
ce->addr_width = VTD_CE_AW_39BIT;
ce->slptptr = virt_to_phys(slptptr) >> VTD_PAGE_SHIFT;
ce->trans_type = VTD_CONTEXT_TT_MULTI_LEVEL;
ce->present = 1;
/* No error reporting yet */
ce->disable_fault_report = 1;
printf("allocated vt-d context entry for devfn 0x%x\n",
devfn);
} else
slptptr = phys_to_virt(ce->slptptr << VTD_PAGE_SHIFT);
while (size) {
/* TODO: currently we only map 4K pages (level = 1) */
printf("map 4K page IOVA 0x%lx to 0x%lx (sid=0x%04x)\n",
iova, pa, sid);
vtd_install_pte(slptptr, iova, pa, 1);
size -= VTD_PAGE_SIZE;
iova += VTD_PAGE_SIZE;
pa += VTD_PAGE_SIZE;
}
}
static uint16_t vtd_intr_index_alloc(void)
{
static volatile int index_ctr = 0;
int ctr;
assert(index_ctr < 65535);
ctr = atomic_inc_fetch(&index_ctr);
printf("INTR: alloc IRTE index %d\n", ctr);
return ctr;
}
static void vtd_setup_irte(struct pci_dev *dev, vtd_irte_t *irte,
int vector, int dest_id, trigger_mode_t trigger)
{
assert(sizeof(vtd_irte_t) == 16);
memset(irte, 0, sizeof(*irte));
irte->fault_disable = 1;
irte->dest_mode = 0; /* physical */
irte->trigger_mode = trigger;
irte->delivery_mode = 0; /* fixed */
irte->irte_mode = 0; /* remapped */
irte->vector = vector;
irte->dest_id = dest_id;
irte->source_id = dev->bdf;
irte->sid_q = 0;
irte->sid_vtype = 1; /* full-sid verify */
irte->present = 1;
}
struct vtd_msi_addr {
uint32_t __dont_care:2;
uint32_t handle_15:1; /* handle[15] */
uint32_t shv:1;
uint32_t interrupt_format:1;
uint32_t handle_0_14:15; /* handle[0:14] */
uint32_t head:12; /* 0xfee */
uint32_t addr_hi; /* not used except with x2apic */
} __attribute__ ((packed));
typedef struct vtd_msi_addr vtd_msi_addr_t;
struct vtd_msi_data {
uint16_t __reserved;
uint16_t subhandle;
} __attribute__ ((packed));
typedef struct vtd_msi_data vtd_msi_data_t;
struct vtd_ioapic_entry {
uint64_t vector:8;
uint64_t __zeros:3;
uint64_t index_15:1;
uint64_t delivery_status:1;
uint64_t polarity:1;
uint64_t remote_irr:1;
uint64_t trigger_mode:1;
uint64_t mask:1;
uint64_t __zeros_2:31;
uint64_t interrupt_format:1;
uint64_t index_0_14:15;
} __attribute__ ((packed));
typedef struct vtd_ioapic_entry vtd_ioapic_entry_t;
/**
* vtd_setup_msi - setup MSI message for a device
*
* @dev: PCI device to setup MSI
* @vector: interrupt vector
* @dest_id: destination processor
*/
bool vtd_setup_msi(struct pci_dev *dev, int vector, int dest_id)
{
vtd_msi_data_t msi_data = {};
vtd_msi_addr_t msi_addr = {};
vtd_irte_t *irte = phys_to_virt(vtd_ir_table());
uint16_t index = vtd_intr_index_alloc();
assert(sizeof(vtd_msi_addr_t) == 8);
assert(sizeof(vtd_msi_data_t) == 4);
/* Use edge irq as default */
vtd_setup_irte(dev, irte + index, vector,
dest_id, TRIGGER_EDGE);
msi_addr.handle_15 = index >> 15 & 1;
msi_addr.shv = 0;
msi_addr.interrupt_format = 1;
msi_addr.handle_0_14 = index & 0x7fff;
msi_addr.head = 0xfee;
msi_data.subhandle = 0;
printf("%s: msi_addr=0x%" PRIx64 ", msi_data=0x%x\n", __func__,
*(uint64_t *)&msi_addr, *(uint32_t *)&msi_data);
return pci_setup_msi(dev, *(uint64_t *)&msi_addr,
*(uint32_t *)&msi_data);
}
void vtd_setup_ioapic_irq(struct pci_dev *dev, int vector,
int dest_id, trigger_mode_t trigger)
{
vtd_ioapic_entry_t entry = {};
vtd_irte_t *irte = phys_to_virt(vtd_ir_table());
ioapic_redir_entry_t *entry_2 = (ioapic_redir_entry_t *)&entry;
uint16_t index = vtd_intr_index_alloc();
uint8_t line;
assert(dev);
assert(sizeof(vtd_ioapic_entry_t) == 8);
vtd_setup_irte(dev, irte + index, vector,
dest_id, trigger);
entry.vector = vector;
entry.trigger_mode = trigger;
entry.index_15 = (index >> 15) & 1;
entry.interrupt_format = 1;
entry.index_0_14 = index & 0x7fff;
line = pci_intx_line(dev);
ioapic_write_redir(line, *entry_2);
}
void vtd_init(void)
{
setup_vm();
smp_init();
vtd_reg_base = ioremap(Q35_HOST_BRIDGE_IOMMU_ADDR, PAGE_SIZE);
vtd_dump_init_info();
vtd_gcmd_or(VTD_GCMD_QI); /* Enable QI */
vtd_setup_root_table();
vtd_setup_ir_table();
vtd_gcmd_or(VTD_GCMD_DMAR); /* Enable DMAR */
vtd_gcmd_or(VTD_GCMD_IR); /* Enable IR */
}

View file

@ -0,0 +1,149 @@
/*
* Intel IOMMU header
*
* Copyright (C) 2016 Red Hat, Inc.
*
* Authors:
* Peter Xu <peterx@redhat.com>,
*
* This work is licensed under the terms of the GNU LGPL, version 2 or
* later.
*
* (From include/linux/intel-iommu.h)
*/
#ifndef __INTEL_IOMMU_H__
#define __INTEL_IOMMU_H__
#include "libcflat.h"
#include "vm.h"
#include "isr.h"
#include "smp.h"
#include "desc.h"
#include "pci.h"
#include "asm/io.h"
#include "apic.h"
#define Q35_HOST_BRIDGE_IOMMU_ADDR 0xfed90000ULL
#define VTD_PAGE_SHIFT PAGE_SHIFT
#define VTD_PAGE_SIZE PAGE_SIZE
/*
* Intel IOMMU register specification
*/
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
#define DMAR_CAP_REG_HI 0xc /* High 32-bit of DMAR_CAP_REG */
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
#define DMAR_ECAP_REG_HI 0X14
#define DMAR_GCMD_REG 0x18 /* Global command */
#define DMAR_GSTS_REG 0x1c /* Global status */
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
#define DMAR_RTADDR_REG_HI 0X24
#define DMAR_CCMD_REG 0x28 /* Context command */
#define DMAR_CCMD_REG_HI 0x2c
#define DMAR_FSTS_REG 0x34 /* Fault status */
#define DMAR_FECTL_REG 0x38 /* Fault control */
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr */
#define DMAR_FEUADDR_REG 0x44 /* Upper address */
#define DMAR_AFLOG_REG 0x58 /* Advanced fault control */
#define DMAR_AFLOG_REG_HI 0X5c
#define DMAR_PMEN_REG 0x64 /* Enable protected memory region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
#define DMAR_PHMBASE_REG 0x70 /* PMRR high base addr */
#define DMAR_PHMBASE_REG_HI 0X74
#define DMAR_PHMLIMIT_REG 0x78 /* PMRR high limit */
#define DMAR_PHMLIMIT_REG_HI 0x7c
#define DMAR_IQH_REG 0x80 /* Invalidation queue head */
#define DMAR_IQH_REG_HI 0X84
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail */
#define DMAR_IQT_REG_HI 0X8c
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr */
#define DMAR_IQA_REG_HI 0x94
#define DMAR_ICS_REG 0x9c /* Invalidation complete status */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr */
#define DMAR_IRTA_REG_HI 0xbc
#define DMAR_IECTL_REG 0xa0 /* Invalidation event control */
#define DMAR_IEDATA_REG 0xa4 /* Invalidation event data */
#define DMAR_IEADDR_REG 0xa8 /* Invalidation event address */
#define DMAR_IEUADDR_REG 0xac /* Invalidation event address */
#define DMAR_PQH_REG 0xc0 /* Page request queue head */
#define DMAR_PQH_REG_HI 0xc4
#define DMAR_PQT_REG 0xc8 /* Page request queue tail*/
#define DMAR_PQT_REG_HI 0xcc
#define DMAR_PQA_REG 0xd0 /* Page request queue address */
#define DMAR_PQA_REG_HI 0xd4
#define DMAR_PRS_REG 0xdc /* Page request status */
#define DMAR_PECTL_REG 0xe0 /* Page request event control */
#define DMAR_PEDATA_REG 0xe4 /* Page request event data */
#define DMAR_PEADDR_REG 0xe8 /* Page request event address */
#define DMAR_PEUADDR_REG 0xec /* Page event upper address */
#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability */
#define DMAR_MTRRCAP_REG_HI 0x104
#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type */
#define DMAR_MTRRDEF_REG_HI 0x10c
#define VTD_GCMD_IR_TABLE 0x1000000
#define VTD_GCMD_IR 0x2000000
#define VTD_GCMD_QI 0x4000000
#define VTD_GCMD_WBF 0x8000000 /* Write Buffer Flush */
#define VTD_GCMD_SFL 0x20000000 /* Set Fault Log */
#define VTD_GCMD_ROOT 0x40000000
#define VTD_GCMD_DMAR 0x80000000
#define VTD_GCMD_ONE_SHOT_BITS (VTD_GCMD_IR_TABLE | VTD_GCMD_WBF | \
VTD_GCMD_SFL | VTD_GCMD_ROOT)
/* Supported Adjusted Guest Address Widths */
#define VTD_CAP_SAGAW_SHIFT 8
/* 39-bit AGAW, 3-level page-table */
#define VTD_CAP_SAGAW_39bit (0x2ULL << VTD_CAP_SAGAW_SHIFT)
/* 48-bit AGAW, 4-level page-table */
#define VTD_CAP_SAGAW_48bit (0x4ULL << VTD_CAP_SAGAW_SHIFT)
#define VTD_CAP_SAGAW VTD_CAP_SAGAW_39bit
/* Both 1G/2M huge pages */
#define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35))
#define VTD_CONTEXT_TT_MULTI_LEVEL 0
#define VTD_CONTEXT_TT_DEV_IOTLB 1
#define VTD_CONTEXT_TT_PASS_THROUGH 2
#define VTD_PTE_R (1 << 0)
#define VTD_PTE_W (1 << 1)
#define VTD_PTE_RW (VTD_PTE_R | VTD_PTE_W)
#define VTD_PTE_ADDR GENMASK_ULL(63, 12)
#define VTD_PTE_HUGE (1 << 7)
extern void *vtd_reg_base;
#define vtd_reg(reg) ({ assert(vtd_reg_base); \
(volatile void *)(vtd_reg_base + reg); })
static inline void vtd_writel(unsigned int reg, uint32_t value)
{
__raw_writel(value, vtd_reg(reg));
}
static inline void vtd_writeq(unsigned int reg, uint64_t value)
{
__raw_writeq(value, vtd_reg(reg));
}
static inline uint32_t vtd_readl(unsigned int reg)
{
return __raw_readl(vtd_reg(reg));
}
static inline uint64_t vtd_readq(unsigned int reg)
{
return __raw_readq(vtd_reg(reg));
}
void vtd_init(void);
void vtd_map_range(uint16_t sid, phys_addr_t iova, phys_addr_t pa, size_t size);
bool vtd_setup_msi(struct pci_dev *dev, int vector, int dest_id);
void vtd_setup_ioapic_irq(struct pci_dev *dev, int vector,
int dest_id, trigger_mode_t trigger);
#endif

View file

@ -0,0 +1,99 @@
#include "libcflat.h"
#include "vm.h"
#include "smp.h"
#include "asm/io.h"
#include "asm/page.h"
#ifndef USE_SERIAL
#define USE_SERIAL
#endif
static struct spinlock lock;
static int serial_iobase = 0x3f8;
static int serial_inited = 0;
static void serial_outb(char ch)
{
u8 lsr;
do {
lsr = inb(serial_iobase + 0x05);
} while (!(lsr & 0x20));
outb(ch, serial_iobase + 0x00);
}
static void serial_init(void)
{
u8 lcr;
/* set DLAB */
lcr = inb(serial_iobase + 0x03);
lcr |= 0x80;
outb(lcr, serial_iobase + 0x03);
/* set baud rate to 115200 */
outb(0x01, serial_iobase + 0x00);
outb(0x00, serial_iobase + 0x01);
/* clear DLAB */
lcr = inb(serial_iobase + 0x03);
lcr &= ~0x80;
outb(lcr, serial_iobase + 0x03);
}
static void print_serial(const char *buf)
{
unsigned long len = strlen(buf);
#ifdef USE_SERIAL
unsigned long i;
if (!serial_inited) {
serial_init();
serial_inited = 1;
}
for (i = 0; i < len; i++) {
serial_outb(buf[i]);
}
#else
asm volatile ("rep/outsb" : "+S"(buf), "+c"(len) : "d"(0xf1));
#endif
}
void puts(const char *s)
{
spin_lock(&lock);
print_serial(s);
spin_unlock(&lock);
}
void exit(int code)
{
#ifdef USE_SERIAL
static const char shutdown_str[8] = "Shutdown";
int i;
/* test device exit (with status) */
outl(code, 0xf4);
/* if that failed, try the Bochs poweroff port */
for (i = 0; i < 8; i++) {
outb(shutdown_str[i], 0x8900);
}
#else
asm volatile("out %0, %1" : : "a"(code), "d"((short)0xf4));
#endif
}
void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
{
phys_addr_t base = phys_addr & PAGE_MASK;
phys_addr_t offset = phys_addr - base;
/*
* The kernel sets PTEs for an ioremap() with page cache disabled,
* but we do not do that right now. It would make sense that I/O
* mappings would be uncached - and may help us find bugs when we
* properly map that way.
*/
return vmap(phys_addr, size) + offset;
}

View file

@ -0,0 +1,124 @@
#include "libcflat.h"
#include "isr.h"
#include "vm.h"
#include "desc.h"
extern char isr_entry_point[];
asm (
"isr_entry_point: \n"
#ifdef __x86_64__
"push %r15 \n\t"
"push %r14 \n\t"
"push %r13 \n\t"
"push %r12 \n\t"
"push %r11 \n\t"
"push %r10 \n\t"
"push %r9 \n\t"
"push %r8 \n\t"
#endif
"push %"R "di \n\t"
"push %"R "si \n\t"
"push %"R "bp \n\t"
"push %"R "sp \n\t"
"push %"R "bx \n\t"
"push %"R "dx \n\t"
"push %"R "cx \n\t"
"push %"R "ax \n\t"
#ifdef __x86_64__
"mov %rsp, %rdi \n\t"
"callq *8*16(%rsp) \n\t"
#else
"push %esp \n\t"
"calll *4+4*8(%esp) \n\t"
"add $4, %esp \n\t"
#endif
"pop %"R "ax \n\t"
"pop %"R "cx \n\t"
"pop %"R "dx \n\t"
"pop %"R "bx \n\t"
"pop %"R "bp \n\t"
"pop %"R "bp \n\t"
"pop %"R "si \n\t"
"pop %"R "di \n\t"
#ifdef __x86_64__
"pop %r8 \n\t"
"pop %r9 \n\t"
"pop %r10 \n\t"
"pop %r11 \n\t"
"pop %r12 \n\t"
"pop %r13 \n\t"
"pop %r14 \n\t"
"pop %r15 \n\t"
#endif
".globl isr_iret_ip\n\t"
#ifdef __x86_64__
"add $8, %rsp \n\t"
"isr_iret_ip: \n\t"
"iretq \n\t"
#else
"add $4, %esp \n\t"
"isr_iret_ip: \n\t"
"iretl \n\t"
#endif
);
void handle_irq(unsigned vec, void (*func)(isr_regs_t *regs))
{
u8 *thunk = vmalloc(50);
set_idt_entry(vec, thunk, 0);
#ifdef __x86_64__
/* sub $8, %rsp */
*thunk++ = 0x48; *thunk++ = 0x83; *thunk++ = 0xec; *thunk++ = 0x08;
/* mov $func_low, %(rsp) */
*thunk++ = 0xc7; *thunk++ = 0x04; *thunk++ = 0x24;
*(u32 *)thunk = (ulong)func; thunk += 4;
/* mov $func_high, %(rsp+4) */
*thunk++ = 0xc7; *thunk++ = 0x44; *thunk++ = 0x24; *thunk++ = 0x04;
*(u32 *)thunk = (ulong)func >> 32; thunk += 4;
/* jmp isr_entry_point */
*thunk ++ = 0xe9;
*(u32 *)thunk = (ulong)isr_entry_point - (ulong)(thunk + 4);
#else
/* push $func */
*thunk++ = 0x68;
*(u32 *)thunk = (ulong)func; thunk += 4;
/* jmp isr_entry_point */
*thunk++ = 0xe9;
*(u32 *)thunk = (ulong)isr_entry_point - (ulong)(thunk + 4);
#endif
}
void handle_external_interrupt(int vector)
{
idt_entry_t *idt = &boot_idt[vector];
unsigned long entry =
idt->offset0 | ((unsigned long)idt->offset1 << 16);
#ifdef __x86_64__
unsigned long tmp;
entry |= ((unsigned long)idt->offset2 << 32);
#endif
asm volatile(
#ifdef __x86_64__
"mov %%rsp, %[sp]\n\t"
"and $0xfffffffffffffff0, %%rsp\n\t"
"push $%c[ss]\n\t"
"push %[sp]\n\t"
#endif
"pushf\n\t"
"orl $0x200, (%%"R "sp)\n\t"
"push $%c[cs]\n\t"
"call *%[entry]\n\t"
:
#ifdef __x86_64__
[sp]"=&r"(tmp)
#endif
:
[entry]"r"(entry),
[ss]"i"(KERNEL_DS),
[cs]"i"(KERNEL_CS)
);
}

View file

@ -0,0 +1,14 @@
#ifndef __ISR_TEST__
#define __ISR_TEST__
typedef struct {
ulong regs[sizeof(ulong)*2];
ulong func;
ulong rip;
ulong cs;
ulong rflags;
} isr_regs_t;
void handle_irq(unsigned vec, void (*func)(isr_regs_t *regs));
void handle_external_interrupt(int vector);
#endif

View file

@ -0,0 +1,412 @@
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
/* CPU model specific register (MSR) numbers */
/* x86-64 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */
#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */
/* EFER bits: */
#define _EFER_SCE 0 /* SYSCALL/SYSRET */
#define _EFER_LME 8 /* Long mode enable */
#define _EFER_LMA 10 /* Long mode active (read-only) */
#define _EFER_NX 11 /* No execute enable */
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
#define EFER_LMA (1<<_EFER_LMA)
#define EFER_NX (1<<_EFER_NX)
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
#define MSR_IA32_MCG_CAP 0x00000179
#define MSR_IA32_MCG_STATUS 0x0000017a
#define MSR_IA32_MCG_CTL 0x0000017b
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_MTRRfix64K_00000 0x00000250
#define MSR_MTRRfix16K_80000 0x00000258
#define MSR_MTRRfix16K_A0000 0x00000259
#define MSR_MTRRfix4K_C0000 0x00000268
#define MSR_MTRRfix4K_C8000 0x00000269
#define MSR_MTRRfix4K_D0000 0x0000026a
#define MSR_MTRRfix4K_D8000 0x0000026b
#define MSR_MTRRfix4K_E0000 0x0000026c
#define MSR_MTRRfix4K_E8000 0x0000026d
#define MSR_MTRRfix4K_F0000 0x0000026e
#define MSR_MTRRfix4K_F8000 0x0000026f
#define MSR_MTRRdefType 0x000002ff
#define MSR_IA32_CR_PAT 0x00000277
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
#define MSR_IA32_LASTINTFROMIP 0x000001dd
#define MSR_IA32_LASTINTTOIP 0x000001de
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
#define DEBUGCTLMSR_TR (1UL << 6)
#define DEBUGCTLMSR_BTS (1UL << 7)
#define DEBUGCTLMSR_BTINT (1UL << 8)
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
#define MSR_IA32_MC0_MISC 0x00000403
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
/* These are consecutive and not in the normal 4er MCE bank block */
#define MSR_IA32_MC0_CTL2 0x00000280
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
#define CMCI_EN (1ULL << 30)
#define CMCI_THRESHOLD_MASK 0xffffULL
#define MSR_P6_PERFCTR0 0x000000c1
#define MSR_P6_PERFCTR1 0x000000c2
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
/* AMD64 MSRs. Not complete. See the architecture manual for a more
complete list. */
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_NB_CFG 0xc001001f
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
#define MSR_AMD64_IBSOPCTL 0xc0011033
#define MSR_AMD64_IBSOPRIP 0xc0011034
#define MSR_AMD64_IBSOPDATA 0xc0011035
#define MSR_AMD64_IBSOPDATA2 0xc0011036
#define MSR_AMD64_IBSOPDATA3 0xc0011037
#define MSR_AMD64_IBSDCLINAD 0xc0011038
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
#define MSR_AMD64_IBSCTL 0xc001103a
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
#define FAM10H_MMIO_CONF_ENABLE (1<<0)
#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_K8_SYSCFG 0xc0010010
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
/* K7 MSRs */
#define MSR_K7_EVNTSEL0 0xc0010000
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_EVNTSEL1 0xc0010001
#define MSR_K7_PERFCTR1 0xc0010005
#define MSR_K7_EVNTSEL2 0xc0010002
#define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_EVNTSEL3 0xc0010003
#define MSR_K7_PERFCTR3 0xc0010007
#define MSR_K7_CLK_CTL 0xc001001b
#define MSR_K7_HWCR 0xc0010015
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
/* K6 MSRs */
#define MSR_K6_EFER 0xc0000080
#define MSR_K6_STAR 0xc0000081
#define MSR_K6_WHCR 0xc0000082
#define MSR_K6_UWCCR 0xc0000085
#define MSR_K6_EPMR 0xc0000086
#define MSR_K6_PSOR 0xc0000087
#define MSR_K6_PFIR 0xc0000088
/* Centaur-Hauls/IDT defined MSRs. */
#define MSR_IDT_FCR1 0x00000107
#define MSR_IDT_FCR2 0x00000108
#define MSR_IDT_FCR3 0x00000109
#define MSR_IDT_FCR4 0x0000010a
#define MSR_IDT_MCR0 0x00000110
#define MSR_IDT_MCR1 0x00000111
#define MSR_IDT_MCR2 0x00000112
#define MSR_IDT_MCR3 0x00000113
#define MSR_IDT_MCR4 0x00000114
#define MSR_IDT_MCR5 0x00000115
#define MSR_IDT_MCR6 0x00000116
#define MSR_IDT_MCR7 0x00000117
#define MSR_IDT_MCR_CTRL 0x00000120
/* VIA Cyrix defined MSRs*/
#define MSR_VIA_FCR 0x00001107
#define MSR_VIA_LONGHAUL 0x0000110a
#define MSR_VIA_RNG 0x0000110b
#define MSR_VIA_BCR2 0x00001147
/* Transmeta defined MSRs */
#define MSR_TMTA_LONGRUN_CTRL 0x80868010
#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
#define MSR_TMTA_LRTI_READOUT 0x80868018
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
/* Intel defined MSRs. */
#define MSR_IA32_P5_MC_ADDR 0x00000000
#define MSR_IA32_P5_MC_TYPE 0x00000001
#define MSR_IA32_TSC 0x00000010
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_FEATURE_CONTROL 0x0000003a
#define FEATURE_CONTROL_LOCKED (1<<0)
#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
#define MSR_IA32_PERF_STATUS 0x00000198
#define MSR_IA32_PERF_CTL 0x00000199
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
#define MSR_IA32_THERM_CONTROL 0x0000019a
#define MSR_IA32_THERM_INTERRUPT 0x0000019b
#define THERM_INT_LOW_ENABLE (1 << 0)
#define THERM_INT_HIGH_ENABLE (1 << 1)
#define MSR_IA32_THERM_STATUS 0x0000019c
#define THERM_STATUS_PROCHOT (1 << 0)
#define MSR_THERM2_CTL 0x0000019d
#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
#define MSR_IA32_MISC_ENABLE 0x000001a0
#define MSR_IA32_TEMPERATURE_TARGET 0x000001a2
/* MISC_ENABLE bits: architectural */
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
#define MSR_IA32_MCG_EBX 0x00000181
#define MSR_IA32_MCG_ECX 0x00000182
#define MSR_IA32_MCG_EDX 0x00000183
#define MSR_IA32_MCG_ESI 0x00000184
#define MSR_IA32_MCG_EDI 0x00000185
#define MSR_IA32_MCG_EBP 0x00000186
#define MSR_IA32_MCG_ESP 0x00000187
#define MSR_IA32_MCG_EFLAGS 0x00000188
#define MSR_IA32_MCG_EIP 0x00000189
#define MSR_IA32_MCG_RESERVED 0x0000018a
/* Pentium IV performance counter MSRs */
#define MSR_P4_BPU_PERFCTR0 0x00000300
#define MSR_P4_BPU_PERFCTR1 0x00000301
#define MSR_P4_BPU_PERFCTR2 0x00000302
#define MSR_P4_BPU_PERFCTR3 0x00000303
#define MSR_P4_MS_PERFCTR0 0x00000304
#define MSR_P4_MS_PERFCTR1 0x00000305
#define MSR_P4_MS_PERFCTR2 0x00000306
#define MSR_P4_MS_PERFCTR3 0x00000307
#define MSR_P4_FLAME_PERFCTR0 0x00000308
#define MSR_P4_FLAME_PERFCTR1 0x00000309
#define MSR_P4_FLAME_PERFCTR2 0x0000030a
#define MSR_P4_FLAME_PERFCTR3 0x0000030b
#define MSR_P4_IQ_PERFCTR0 0x0000030c
#define MSR_P4_IQ_PERFCTR1 0x0000030d
#define MSR_P4_IQ_PERFCTR2 0x0000030e
#define MSR_P4_IQ_PERFCTR3 0x0000030f
#define MSR_P4_IQ_PERFCTR4 0x00000310
#define MSR_P4_IQ_PERFCTR5 0x00000311
#define MSR_P4_BPU_CCCR0 0x00000360
#define MSR_P4_BPU_CCCR1 0x00000361
#define MSR_P4_BPU_CCCR2 0x00000362
#define MSR_P4_BPU_CCCR3 0x00000363
#define MSR_P4_MS_CCCR0 0x00000364
#define MSR_P4_MS_CCCR1 0x00000365
#define MSR_P4_MS_CCCR2 0x00000366
#define MSR_P4_MS_CCCR3 0x00000367
#define MSR_P4_FLAME_CCCR0 0x00000368
#define MSR_P4_FLAME_CCCR1 0x00000369
#define MSR_P4_FLAME_CCCR2 0x0000036a
#define MSR_P4_FLAME_CCCR3 0x0000036b
#define MSR_P4_IQ_CCCR0 0x0000036c
#define MSR_P4_IQ_CCCR1 0x0000036d
#define MSR_P4_IQ_CCCR2 0x0000036e
#define MSR_P4_IQ_CCCR3 0x0000036f
#define MSR_P4_IQ_CCCR4 0x00000370
#define MSR_P4_IQ_CCCR5 0x00000371
#define MSR_P4_ALF_ESCR0 0x000003ca
#define MSR_P4_ALF_ESCR1 0x000003cb
#define MSR_P4_BPU_ESCR0 0x000003b2
#define MSR_P4_BPU_ESCR1 0x000003b3
#define MSR_P4_BSU_ESCR0 0x000003a0
#define MSR_P4_BSU_ESCR1 0x000003a1
#define MSR_P4_CRU_ESCR0 0x000003b8
#define MSR_P4_CRU_ESCR1 0x000003b9
#define MSR_P4_CRU_ESCR2 0x000003cc
#define MSR_P4_CRU_ESCR3 0x000003cd
#define MSR_P4_CRU_ESCR4 0x000003e0
#define MSR_P4_CRU_ESCR5 0x000003e1
#define MSR_P4_DAC_ESCR0 0x000003a8
#define MSR_P4_DAC_ESCR1 0x000003a9
#define MSR_P4_FIRM_ESCR0 0x000003a4
#define MSR_P4_FIRM_ESCR1 0x000003a5
#define MSR_P4_FLAME_ESCR0 0x000003a6
#define MSR_P4_FLAME_ESCR1 0x000003a7
#define MSR_P4_FSB_ESCR0 0x000003a2
#define MSR_P4_FSB_ESCR1 0x000003a3
#define MSR_P4_IQ_ESCR0 0x000003ba
#define MSR_P4_IQ_ESCR1 0x000003bb
#define MSR_P4_IS_ESCR0 0x000003b4
#define MSR_P4_IS_ESCR1 0x000003b5
#define MSR_P4_ITLB_ESCR0 0x000003b6
#define MSR_P4_ITLB_ESCR1 0x000003b7
#define MSR_P4_IX_ESCR0 0x000003c8
#define MSR_P4_IX_ESCR1 0x000003c9
#define MSR_P4_MOB_ESCR0 0x000003aa
#define MSR_P4_MOB_ESCR1 0x000003ab
#define MSR_P4_MS_ESCR0 0x000003c0
#define MSR_P4_MS_ESCR1 0x000003c1
#define MSR_P4_PMH_ESCR0 0x000003ac
#define MSR_P4_PMH_ESCR1 0x000003ad
#define MSR_P4_RAT_ESCR0 0x000003bc
#define MSR_P4_RAT_ESCR1 0x000003bd
#define MSR_P4_SAAT_ESCR0 0x000003ae
#define MSR_P4_SAAT_ESCR1 0x000003af
#define MSR_P4_SSU_ESCR0 0x000003be
#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */
#define MSR_P4_TBPU_ESCR0 0x000003c2
#define MSR_P4_TBPU_ESCR1 0x000003c3
#define MSR_P4_TC_ESCR0 0x000003c4
#define MSR_P4_TC_ESCR1 0x000003c5
#define MSR_P4_U2L_ESCR0 0x000003b0
#define MSR_P4_U2L_ESCR1 0x000003b1
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
/* Intel Core-based CPU performance counters */
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
/* Intel VT MSRs */
#define MSR_IA32_VMX_BASIC 0x00000480
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
#define MSR_IA32_VMX_EXIT_CTLS 0x00000483
#define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
#define MSR_IA32_VMX_MISC 0x00000485
#define MSR_IA32_VMX_CR0_FIXED0 0x00000486
#define MSR_IA32_VMX_CR0_FIXED1 0x00000487
#define MSR_IA32_VMX_CR4_FIXED0 0x00000488
#define MSR_IA32_VMX_CR4_FIXED1 0x00000489
#define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
#define MSR_IA32_VMX_TRUE_PIN 0x0000048d
#define MSR_IA32_VMX_TRUE_PROC 0x0000048e
#define MSR_IA32_VMX_TRUE_EXIT 0x0000048f
#define MSR_IA32_VMX_TRUE_ENTRY 0x00000490
#define MSR_IA32_TSCDEADLINE 0x000006e0
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
#endif /* _ASM_X86_MSR_INDEX_H */

View file

@ -0,0 +1,433 @@
#ifndef LIBCFLAT_PROCESSOR_H
#define LIBCFLAT_PROCESSOR_H
#include "libcflat.h"
#include "msr.h"
#include <stdint.h>
#ifdef __x86_64__
# define R "r"
# define W "q"
# define S "8"
#else
# define R "e"
# define W "l"
# define S "4"
#endif
#define X86_CR0_PE 0x00000001
#define X86_CR0_MP 0x00000002
#define X86_CR0_TS 0x00000008
#define X86_CR0_WP 0x00010000
#define X86_CR0_AM 0x00040000
#define X86_CR0_PG 0x80000000
#define X86_CR4_TSD 0x00000004
#define X86_CR4_DE 0x00000008
#define X86_CR4_PSE 0x00000010
#define X86_CR4_PAE 0x00000020
#define X86_CR4_VMXE 0x00002000
#define X86_CR4_PCIDE 0x00020000
#define X86_CR4_SMAP 0x00200000
#define X86_CR4_PKE 0x00400000
#define X86_EFLAGS_CF 0x00000001
#define X86_EFLAGS_PF 0x00000004
#define X86_EFLAGS_AF 0x00000010
#define X86_EFLAGS_ZF 0x00000040
#define X86_EFLAGS_SF 0x00000080
#define X86_EFLAGS_OF 0x00000800
#define X86_EFLAGS_AC 0x00040000
#define X86_IA32_EFER 0xc0000080
#define X86_EFER_LMA (1UL << 8)
struct far_pointer32 {
u32 offset;
u16 selector;
} __attribute__((packed));
struct descriptor_table_ptr {
u16 limit;
ulong base;
} __attribute__((packed));
static inline void barrier(void)
{
asm volatile ("" : : : "memory");
}
static inline void clac(void)
{
asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory");
}
static inline void stac(void)
{
asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory");
}
static inline u16 read_cs(void)
{
unsigned val;
asm volatile ("mov %%cs, %0" : "=mr"(val));
return val;
}
static inline u16 read_ds(void)
{
unsigned val;
asm volatile ("mov %%ds, %0" : "=mr"(val));
return val;
}
static inline u16 read_es(void)
{
unsigned val;
asm volatile ("mov %%es, %0" : "=mr"(val));
return val;
}
static inline u16 read_ss(void)
{
unsigned val;
asm volatile ("mov %%ss, %0" : "=mr"(val));
return val;
}
static inline u16 read_fs(void)
{
unsigned val;
asm volatile ("mov %%fs, %0" : "=mr"(val));
return val;
}
static inline u16 read_gs(void)
{
unsigned val;
asm volatile ("mov %%gs, %0" : "=mr"(val));
return val;
}
static inline unsigned long read_rflags(void)
{
unsigned long f;
asm volatile ("pushf; pop %0\n\t" : "=rm"(f));
return f;
}
static inline void write_ds(unsigned val)
{
asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory");
}
static inline void write_es(unsigned val)
{
asm volatile ("mov %0, %%es" : : "rm"(val) : "memory");
}
static inline void write_ss(unsigned val)
{
asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory");
}
static inline void write_fs(unsigned val)
{
asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory");
}
static inline void write_gs(unsigned val)
{
asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory");
}
static inline void write_rflags(unsigned long f)
{
asm volatile ("push %0; popf\n\t" : : "rm"(f));
}
static inline u64 rdmsr(u32 index)
{
u32 a, d;
asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory");
return a | ((u64)d << 32);
}
static inline void wrmsr(u32 index, u64 val)
{
u32 a = val, d = val >> 32;
asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory");
}
static inline uint64_t rdpmc(uint32_t index)
{
uint32_t a, d;
asm volatile ("rdpmc" : "=a"(a), "=d"(d) : "c"(index));
return a | ((uint64_t)d << 32);
}
static inline void write_cr0(ulong val)
{
asm volatile ("mov %0, %%cr0" : : "r"(val) : "memory");
}
static inline ulong read_cr0(void)
{
ulong val;
asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory");
return val;
}
static inline void write_cr2(ulong val)
{
asm volatile ("mov %0, %%cr2" : : "r"(val) : "memory");
}
static inline ulong read_cr2(void)
{
ulong val;
asm volatile ("mov %%cr2, %0" : "=r"(val) : : "memory");
return val;
}
static inline void write_cr3(ulong val)
{
asm volatile ("mov %0, %%cr3" : : "r"(val) : "memory");
}
static inline ulong read_cr3(void)
{
ulong val;
asm volatile ("mov %%cr3, %0" : "=r"(val) : : "memory");
return val;
}
static inline void write_cr4(ulong val)
{
asm volatile ("mov %0, %%cr4" : : "r"(val) : "memory");
}
static inline ulong read_cr4(void)
{
ulong val;
asm volatile ("mov %%cr4, %0" : "=r"(val) : : "memory");
return val;
}
static inline void write_cr8(ulong val)
{
asm volatile ("mov %0, %%cr8" : : "r"(val) : "memory");
}
static inline ulong read_cr8(void)
{
ulong val;
asm volatile ("mov %%cr8, %0" : "=r"(val) : : "memory");
return val;
}
static inline void lgdt(const struct descriptor_table_ptr *ptr)
{
asm volatile ("lgdt %0" : : "m"(*ptr));
}
static inline void sgdt(struct descriptor_table_ptr *ptr)
{
asm volatile ("sgdt %0" : "=m"(*ptr));
}
static inline void lidt(const struct descriptor_table_ptr *ptr)
{
asm volatile ("lidt %0" : : "m"(*ptr));
}
static inline void sidt(struct descriptor_table_ptr *ptr)
{
asm volatile ("sidt %0" : "=m"(*ptr));
}
static inline void lldt(unsigned val)
{
asm volatile ("lldt %0" : : "rm"(val));
}
static inline u16 sldt(void)
{
u16 val;
asm volatile ("sldt %0" : "=rm"(val));
return val;
}
static inline void ltr(u16 val)
{
asm volatile ("ltr %0" : : "rm"(val));
}
static inline u16 str(void)
{
u16 val;
asm volatile ("str %0" : "=rm"(val));
return val;
}
static inline void write_dr6(ulong val)
{
asm volatile ("mov %0, %%dr6" : : "r"(val) : "memory");
}
static inline ulong read_dr6(void)
{
ulong val;
asm volatile ("mov %%dr6, %0" : "=r"(val));
return val;
}
static inline void write_dr7(ulong val)
{
asm volatile ("mov %0, %%dr7" : : "r"(val) : "memory");
}
static inline ulong read_dr7(void)
{
ulong val;
asm volatile ("mov %%dr7, %0" : "=r"(val));
return val;
}
struct cpuid { u32 a, b, c, d; };
static inline struct cpuid raw_cpuid(u32 function, u32 index)
{
struct cpuid r;
asm volatile ("cpuid"
: "=a"(r.a), "=b"(r.b), "=c"(r.c), "=d"(r.d)
: "0"(function), "2"(index));
return r;
}
static inline struct cpuid cpuid_indexed(u32 function, u32 index)
{
u32 level = raw_cpuid(function & 0xf0000000, 0).a;
if (level < function)
return (struct cpuid) { 0, 0, 0, 0 };
return raw_cpuid(function, index);
}
static inline struct cpuid cpuid(u32 function)
{
return cpuid_indexed(function, 0);
}
static inline u8 cpuid_maxphyaddr(void)
{
if (raw_cpuid(0x80000000, 0).a < 0x80000008)
return 36;
return raw_cpuid(0x80000008, 0).a & 0xff;
}
static inline void pause(void)
{
asm volatile ("pause");
}
static inline void cli(void)
{
asm volatile ("cli");
}
static inline void sti(void)
{
asm volatile ("sti");
}
static inline unsigned long long rdtsc()
{
long long r;
#ifdef __x86_64__
unsigned a, d;
asm volatile ("rdtsc" : "=a"(a), "=d"(d));
r = a | ((long long)d << 32);
#else
asm volatile ("rdtsc" : "=A"(r));
#endif
return r;
}
static inline unsigned long long rdtscp(u32 *aux)
{
long long r;
#ifdef __x86_64__
unsigned a, d;
asm volatile ("rdtscp" : "=a"(a), "=d"(d), "=c"(*aux));
r = a | ((long long)d << 32);
#else
asm volatile ("rdtscp" : "=A"(r), "=c"(*aux));
#endif
return r;
}
static inline void wrtsc(u64 tsc)
{
unsigned a = tsc, d = tsc >> 32;
asm volatile("wrmsr" : : "a"(a), "d"(d), "c"(0x10));
}
static inline void irq_disable(void)
{
asm volatile("cli");
}
/* Note that irq_enable() does not ensure an interrupt shadow due
* to the vagaries of compiler optimizations. If you need the
* shadow, use a single asm with "sti" and the instruction after it.
*/
static inline void irq_enable(void)
{
asm volatile("sti");
}
static inline void invlpg(volatile void *va)
{
asm volatile("invlpg (%0)" ::"r" (va) : "memory");
}
static inline void safe_halt(void)
{
asm volatile("sti; hlt");
}
static inline u32 read_pkru(void)
{
unsigned int eax, edx;
unsigned int ecx = 0;
unsigned int pkru;
asm volatile(".byte 0x0f,0x01,0xee\n\t"
: "=a" (eax), "=d" (edx)
: "c" (ecx));
pkru = eax;
return pkru;
}
static inline void write_pkru(u32 pkru)
{
unsigned int eax = pkru;
unsigned int ecx = 0;
unsigned int edx = 0;
asm volatile(".byte 0x0f,0x01,0xef\n\t"
: : "a" (eax), "c" (ecx), "d" (edx));
}
#endif

View file

@ -0,0 +1,25 @@
.globl setjmp
setjmp:
mov (%esp), %ecx // get return EIP
mov 4(%esp), %eax // get jmp_buf
mov %ecx, (%eax)
mov %esp, 4(%eax)
mov %ebp, 8(%eax)
mov %ebx, 12(%eax)
mov %esi, 16(%eax)
mov %edi, 20(%eax)
xor %eax, %eax
ret
.globl longjmp
longjmp:
mov 8(%esp), %eax // get return value
mov 4(%esp), %ecx // get jmp_buf
mov 20(%ecx), %edi
mov 16(%ecx), %esi
mov 12(%ecx), %ebx
mov 8(%ecx), %ebp
mov 4(%ecx), %esp
mov (%ecx), %ecx // get saved EIP
mov %ecx, (%esp) // and store it on the stack
ret

View file

@ -0,0 +1,27 @@
.globl setjmp
setjmp:
mov (%rsp), %rsi
mov %rsi, (%rdi)
mov %rsp, 0x8(%rdi)
mov %rbp, 0x10(%rdi)
mov %rbx, 0x18(%rdi)
mov %r12, 0x20(%rdi)
mov %r13, 0x28(%rdi)
mov %r14, 0x30(%rdi)
mov %r15, 0x38(%rdi)
xor %eax, %eax
ret
.globl longjmp
longjmp:
mov %esi, %eax
mov 0x38(%rdi), %r15
mov 0x30(%rdi), %r14
mov 0x28(%rdi), %r13
mov 0x20(%rdi), %r12
mov 0x18(%rdi), %rbx
mov 0x10(%rdi), %rbp
mov 0x8(%rdi), %rsp
mov (%rdi), %rsi
mov %rsi, (%rsp)
ret

View file

@ -0,0 +1,47 @@
/*
* Initialize machine setup information
*
* Copyright (C) 2017, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include "libcflat.h"
#define MBI_MODS_COUNT 20
#define MBI_MODS_ADDR 24
#define MB_MOD_START 0
#define MB_MOD_END 4
#define ENV_SIZE 16384
extern void setup_env(char *env, int size);
char *initrd;
u32 initrd_size;
static char env[ENV_SIZE];
void setup_get_initrd(u8 *bootinfo)
{
u32 *mods_addr, *mod_start, *mod_end;
if (*((u32 *)&bootinfo[MBI_MODS_COUNT]) != 1)
return;
mods_addr = (u32 *)&bootinfo[MBI_MODS_ADDR];
mod_start = (u32 *)(ulong)(*mods_addr + MB_MOD_START);
mod_end = (u32 *)(ulong)(*mods_addr + MB_MOD_END);
initrd = (char *)(ulong)*mod_start;
initrd_size = *mod_end - *mod_start;
}
void setup_environ(void)
{
if (initrd) {
/* environ is currently the only file in the initrd */
u32 size = MIN(initrd_size, ENV_SIZE);
memcpy(env, initrd, size);
setup_env(env, size);
}
}

View file

@ -0,0 +1,125 @@
#include <libcflat.h>
#include "smp.h"
#include "apic.h"
#include "fwcfg.h"
#include "desc.h"
#define IPI_VECTOR 0x20
typedef void (*ipi_function_type)(void *data);
static struct spinlock ipi_lock;
static volatile ipi_function_type ipi_function;
static void *volatile ipi_data;
static volatile int ipi_done;
static volatile bool ipi_wait;
static int _cpu_count;
static __attribute__((used)) void ipi()
{
void (*function)(void *data) = ipi_function;
void *data = ipi_data;
bool wait = ipi_wait;
if (!wait) {
ipi_done = 1;
apic_write(APIC_EOI, 0);
}
function(data);
if (wait) {
ipi_done = 1;
apic_write(APIC_EOI, 0);
}
}
asm (
"ipi_entry: \n"
" call ipi \n"
#ifndef __x86_64__
" iret"
#else
" iretq"
#endif
);
void spin_lock(struct spinlock *lock)
{
int v = 1;
do {
asm volatile ("xchg %1, %0" : "+m"(lock->v), "+r"(v));
} while (v);
asm volatile ("" : : : "memory");
}
void spin_unlock(struct spinlock *lock)
{
asm volatile ("" : : : "memory");
lock->v = 0;
}
int cpu_count(void)
{
return _cpu_count;
}
int smp_id(void)
{
unsigned id;
asm ("mov %%gs:0, %0" : "=r"(id));
return id;
}
static void setup_smp_id(void *data)
{
asm ("mov %0, %%gs:0" : : "r"(apic_id()) : "memory");
}
static void __on_cpu(int cpu, void (*function)(void *data), void *data,
int wait)
{
spin_lock(&ipi_lock);
if (cpu == smp_id())
function(data);
else {
ipi_done = 0;
ipi_function = function;
ipi_data = data;
ipi_wait = wait;
apic_icr_write(APIC_INT_ASSERT | APIC_DEST_PHYSICAL | APIC_DM_FIXED
| IPI_VECTOR,
cpu);
while (!ipi_done)
;
}
spin_unlock(&ipi_lock);
}
void on_cpu(int cpu, void (*function)(void *data), void *data)
{
__on_cpu(cpu, function, data, 1);
}
void on_cpu_async(int cpu, void (*function)(void *data), void *data)
{
__on_cpu(cpu, function, data, 0);
}
void smp_init(void)
{
int i;
void ipi_entry(void);
_cpu_count = fwcfg_get_nb_cpus();
setup_idt();
set_idt_entry(IPI_VECTOR, ipi_entry, 0);
setup_smp_id(0);
for (i = 1; i < cpu_count(); ++i)
on_cpu(i, setup_smp_id, 0);
}

View file

@ -0,0 +1,12 @@
#ifndef __SMP_H
#define __SMP_H
#include <asm/spinlock.h>
void smp_init(void);
int cpu_count(void);
int smp_id(void);
void on_cpu(int cpu, void (*function)(void *data), void *data);
void on_cpu_async(int cpu, void (*function)(void *data), void *data);
#endif

View file

@ -0,0 +1,31 @@
#include <libcflat.h>
#include <stack.h>
int backtrace_frame(const void *frame, const void **return_addrs, int max_depth)
{
static int walking;
int depth = 0;
const unsigned long *bp = (unsigned long *) frame;
if (walking) {
printf("RECURSIVE STACK WALK!!!\n");
return 0;
}
walking = 1;
for (depth = 0; bp && depth < max_depth; depth++) {
return_addrs[depth] = (void *) bp[1];
if (return_addrs[depth] == 0)
break;
bp = (unsigned long *) bp[0];
}
walking = 0;
return depth;
}
int backtrace(const void **return_addrs, int max_depth)
{
return backtrace_frame(__builtin_frame_address(0), return_addrs,
max_depth);
}

View file

@ -0,0 +1,224 @@
#include "fwcfg.h"
#include "vm.h"
#include "libcflat.h"
static void *free = 0;
static void *vfree_top = 0;
static void free_memory(void *mem, unsigned long size)
{
while (size >= PAGE_SIZE) {
*(void **)mem = free;
free = mem;
mem += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
void *alloc_page()
{
void *p;
if (!free)
return 0;
p = free;
free = *(void **)free;
return p;
}
void free_page(void *page)
{
*(void **)page = free;
free = page;
}
extern char edata;
static unsigned long end_of_memory;
unsigned long *install_pte(unsigned long *cr3,
int pte_level,
void *virt,
unsigned long pte,
unsigned long *pt_page)
{
int level;
unsigned long *pt = cr3;
unsigned offset;
for (level = PAGE_LEVEL; level > pte_level; --level) {
offset = PGDIR_OFFSET((unsigned long)virt, level);
if (!(pt[offset] & PT_PRESENT_MASK)) {
unsigned long *new_pt = pt_page;
if (!new_pt)
new_pt = alloc_page();
else
pt_page = 0;
memset(new_pt, 0, PAGE_SIZE);
pt[offset] = virt_to_phys(new_pt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
}
pt = phys_to_virt(pt[offset] & PT_ADDR_MASK);
}
offset = PGDIR_OFFSET((unsigned long)virt, level);
pt[offset] = pte;
return &pt[offset];
}
unsigned long *get_pte(unsigned long *cr3, void *virt)
{
int level;
unsigned long *pt = cr3, pte;
unsigned offset;
for (level = PAGE_LEVEL; level > 1; --level) {
offset = ((unsigned long)virt >> (((level-1) * PGDIR_WIDTH) + 12)) & PGDIR_MASK;
pte = pt[offset];
if (!(pte & PT_PRESENT_MASK))
return NULL;
if (level == 2 && (pte & PT_PAGE_SIZE_MASK))
return &pt[offset];
pt = phys_to_virt(pte & PT_ADDR_MASK);
}
offset = ((unsigned long)virt >> (((level-1) * PGDIR_WIDTH) + 12)) & PGDIR_MASK;
return &pt[offset];
}
unsigned long *install_large_page(unsigned long *cr3,
unsigned long phys,
void *virt)
{
return install_pte(cr3, 2, virt,
phys | PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK | PT_PAGE_SIZE_MASK, 0);
}
unsigned long *install_page(unsigned long *cr3,
unsigned long phys,
void *virt)
{
return install_pte(cr3, 1, virt, phys | PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK, 0);
}
static void setup_mmu_range(unsigned long *cr3, unsigned long start,
unsigned long len)
{
u64 max = (u64)len + (u64)start;
u64 phys = start;
while (phys + LARGE_PAGE_SIZE <= max) {
install_large_page(cr3, phys, (void *)(ulong)phys);
phys += LARGE_PAGE_SIZE;
}
while (phys + PAGE_SIZE <= max) {
install_page(cr3, phys, (void *)(ulong)phys);
phys += PAGE_SIZE;
}
}
static void setup_mmu(unsigned long len)
{
unsigned long *cr3 = alloc_page();
memset(cr3, 0, PAGE_SIZE);
#ifdef __x86_64__
if (len < (1ul << 32))
len = (1ul << 32); /* map mmio 1:1 */
setup_mmu_range(cr3, 0, len);
#else
if (len > (1ul << 31))
len = (1ul << 31);
/* 0 - 2G memory, 2G-3G valloc area, 3G-4G mmio */
setup_mmu_range(cr3, 0, len);
setup_mmu_range(cr3, 3ul << 30, (1ul << 30));
vfree_top = (void*)(3ul << 30);
#endif
write_cr3(virt_to_phys(cr3));
#ifndef __x86_64__
write_cr4(X86_CR4_PSE);
#endif
write_cr0(X86_CR0_PG |X86_CR0_PE | X86_CR0_WP);
printf("paging enabled\n");
printf("cr0 = %lx\n", read_cr0());
printf("cr3 = %lx\n", read_cr3());
printf("cr4 = %lx\n", read_cr4());
}
void setup_vm()
{
assert(!end_of_memory);
end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
free_memory(&edata, end_of_memory - (unsigned long)&edata);
setup_mmu(end_of_memory);
}
void *vmalloc(unsigned long size)
{
void *mem, *p;
unsigned pages;
size += sizeof(unsigned long);
size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
vfree_top -= size;
mem = p = vfree_top;
pages = size / PAGE_SIZE;
while (pages--) {
install_page(phys_to_virt(read_cr3()), virt_to_phys(alloc_page()), p);
p += PAGE_SIZE;
}
*(unsigned long *)mem = size;
mem += sizeof(unsigned long);
return mem;
}
uint64_t virt_to_phys_cr3(void *mem)
{
return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & (PAGE_SIZE - 1));
}
void vfree(void *mem)
{
unsigned long size = ((unsigned long *)mem)[-1];
while (size) {
free_page(phys_to_virt(*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK));
mem += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
void *vmap(unsigned long long phys, unsigned long size)
{
void *mem, *p;
unsigned pages;
size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
vfree_top -= size;
phys &= ~(unsigned long long)(PAGE_SIZE - 1);
mem = p = vfree_top;
pages = size / PAGE_SIZE;
while (pages--) {
install_page(phys_to_virt(read_cr3()), phys, p);
phys += PAGE_SIZE;
p += PAGE_SIZE;
}
return mem;
}
void *alloc_vpages(ulong nr)
{
vfree_top -= PAGE_SIZE * nr;
return vfree_top;
}
void *alloc_vpage(void)
{
return alloc_vpages(1);
}

View file

@ -0,0 +1,31 @@
#ifndef VM_H
#define VM_H
#include "processor.h"
#include "asm/page.h"
#include "asm/io.h"
void setup_vm();
void *vmalloc(unsigned long size);
void vfree(void *mem);
void *vmap(unsigned long long phys, unsigned long size);
void *alloc_vpage(void);
void *alloc_vpages(ulong nr);
uint64_t virt_to_phys_cr3(void *mem);
unsigned long *get_pte(unsigned long *cr3, void *virt);
unsigned long *install_pte(unsigned long *cr3,
int pte_level,
void *virt,
unsigned long pte,
unsigned long *pt_page);
void *alloc_page();
void free_page(void *page);
unsigned long *install_large_page(unsigned long *cr3,unsigned long phys,
void *virt);
unsigned long *install_page(unsigned long *cr3, unsigned long phys, void *virt);
#endif

38
tests/kvm-unit-tests/run.js Executable file
View file

@ -0,0 +1,38 @@
#!/usr/bin/env node
"use strict";
var V86 = require("../../build/libv86.js").V86;
var fs = require("fs");
function readfile(path)
{
return new Uint8Array(fs.readFileSync(path)).buffer;
}
function Loader(path)
{
this.buffer = readfile(path);
this.byteLength = this.buffer.byteLength;
}
Loader.prototype.load = function()
{
this.onload && this.onload({});
};
var bios = readfile(__dirname + "/../../bios/seabios.bin");
var vga_bios = readfile(__dirname + "/../../bios/vgabios.bin");
var emulator = new V86({
bios: { buffer: bios },
vga_bios: { buffer: vga_bios },
multiboot: new Loader(process.argv[2]),
autostart: true,
memory_size: 256 * 1024 * 1024,
});
emulator.add_listener("serial0-output-char", function(chr)
{
process.stdout.write(chr);
});

View file

@ -0,0 +1 @@
include $(TEST_DIR)/Makefile.$(ARCH)

Some files were not shown because too many files have changed in this diff Show more