aboutsummaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
authorfishsoupisgood <github@madingley.org>2019-04-29 01:17:54 +0100
committerfishsoupisgood <github@madingley.org>2019-05-27 03:43:43 +0100
commit3f2546b2ef55b661fd8dd69682b38992225e86f6 (patch)
tree65ca85f13617aee1dce474596800950f266a456c /scripts
downloadqemu-3f2546b2ef55b661fd8dd69682b38992225e86f6.tar.gz
qemu-3f2546b2ef55b661fd8dd69682b38992225e86f6.tar.bz2
qemu-3f2546b2ef55b661fd8dd69682b38992225e86f6.zip
Initial import of qemu-2.4.1HEADmaster
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/acpi_extract.py367
-rwxr-xr-xscripts/acpi_extract_preprocess.py51
-rwxr-xr-xscripts/analyse-9p-simpletrace.py213
-rwxr-xr-xscripts/analyze-migration.py597
-rwxr-xr-xscripts/check-qerror.sh22
-rwxr-xr-xscripts/checkpatch.pl2980
-rwxr-xr-xscripts/cleanup-trace-events.pl51
-rw-r--r--scripts/coverity-model.c343
-rwxr-xr-xscripts/create_config116
-rwxr-xr-xscripts/disas-objdump.pl99
-rw-r--r--scripts/dump-guest-memory.py339
-rwxr-xr-xscripts/extract-vsssdk-headers35
-rw-r--r--scripts/feature_to_c.sh78
-rwxr-xr-xscripts/get_maintainer.pl2121
-rwxr-xr-xscripts/gtester-cat26
-rw-r--r--scripts/hxtool105
-rwxr-xr-xscripts/kvm/kvm_flightrecorder126
-rwxr-xr-xscripts/kvm/kvm_stat646
-rw-r--r--scripts/kvm/kvm_stat.texi55
-rwxr-xr-xscripts/kvm/vmxcap260
-rwxr-xr-xscripts/make-release25
-rw-r--r--scripts/make_device_config.sh30
-rw-r--r--scripts/ordereddict.py127
-rw-r--r--scripts/qapi-commands.py376
-rw-r--r--scripts/qapi-event.py299
-rw-r--r--scripts/qapi-types.py402
-rw-r--r--scripts/qapi-visit.py484
-rw-r--r--scripts/qapi.py1074
-rw-r--r--scripts/qemu-binfmt-conf.sh72
-rw-r--r--scripts/qemu-gdb.py164
-rwxr-xr-xscripts/qemu-guest-agent/fsfreeze-hook33
-rwxr-xr-xscripts/qemu-guest-agent/fsfreeze-hook.d/mysql-flush.sh.sample56
-rwxr-xr-xscripts/qmp/qemu-ga-client301
-rwxr-xr-xscripts/qmp/qmp126
-rwxr-xr-xscripts/qmp/qmp-shell390
-rw-r--r--scripts/qmp/qmp.py236
-rwxr-xr-xscripts/qmp/qom-fuse138
-rwxr-xr-xscripts/qmp/qom-get67
-rwxr-xr-xscripts/qmp/qom-list64
-rwxr-xr-xscripts/qmp/qom-set64
-rwxr-xr-xscripts/qmp/qom-tree75
-rw-r--r--scripts/qtest.py71
-rwxr-xr-xscripts/refresh-pxe-roms.sh31
-rw-r--r--scripts/shaderinclude.pl16
-rw-r--r--scripts/signrom.py40
-rwxr-xr-xscripts/simpletrace.py195
-rwxr-xr-xscripts/switch-timer-api178
-rwxr-xr-xscripts/texi2pod.pl486
-rwxr-xr-xscripts/tracetool.py139
-rw-r--r--scripts/tracetool/__init__.py366
-rw-r--r--scripts/tracetool/backend/__init__.py123
-rw-r--r--scripts/tracetool/backend/dtrace.py46
-rw-r--r--scripts/tracetool/backend/ftrace.py48
-rw-r--r--scripts/tracetool/backend/simple.py99
-rw-r--r--scripts/tracetool/backend/stderr.py47
-rw-r--r--scripts/tracetool/backend/ust.py35
-rw-r--r--scripts/tracetool/format/__init__.py85
-rw-r--r--scripts/tracetool/format/c.py28
-rw-r--r--scripts/tracetool/format/d.py54
-rw-r--r--scripts/tracetool/format/events_c.py36
-rw-r--r--scripts/tracetool/format/events_h.py52
-rw-r--r--scripts/tracetool/format/h.py44
-rw-r--r--scripts/tracetool/format/simpletrace_stap.py71
-rw-r--r--scripts/tracetool/format/stap.py61
-rw-r--r--scripts/tracetool/format/tcg_h.py57
-rw-r--r--scripts/tracetool/format/tcg_helper_c.py50
-rw-r--r--scripts/tracetool/format/tcg_helper_h.py50
-rw-r--r--scripts/tracetool/format/tcg_helper_wrapper_h.py70
-rw-r--r--scripts/tracetool/format/ust_events_c.py33
-rw-r--r--scripts/tracetool/format/ust_events_h.py100
-rw-r--r--scripts/tracetool/transform.py166
-rw-r--r--scripts/update-acpi.sh4
-rwxr-xr-xscripts/update-linux-headers.sh133
-rwxr-xr-xscripts/vmstate-static-checker.py424
74 files changed, 16671 insertions, 0 deletions
diff --git a/scripts/acpi_extract.py b/scripts/acpi_extract.py
new file mode 100755
index 00000000..10c1ffb3
--- /dev/null
+++ b/scripts/acpi_extract.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# Copyright (C) 2011 Red Hat, Inc., Michael S. Tsirkin <mst@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+# Process mixed ASL/AML listing (.lst file) produced by iasl -l
+# Locate and execute ACPI_EXTRACT directives, output offset info
+#
+# Documentation of ACPI_EXTRACT_* directive tags:
+#
+# These directive tags output offset information from AML for BIOS runtime
+# table generation.
+# Each directive is of the form:
+# ACPI_EXTRACT_<TYPE> <array_name> <Operator> (...)
+# and causes the extractor to create an array
+# named <array_name> with offset, in the generated AML,
+# of an object of a given type in the following <Operator>.
+#
+# A directive must fit on a single code line.
+#
+# Object type in AML is verified, a mismatch causes a build failure.
+#
+# Directives and operators currently supported are:
+# ACPI_EXTRACT_NAME_DWORD_CONST - extract a Dword Const object from Name()
+# ACPI_EXTRACT_NAME_WORD_CONST - extract a Word Const object from Name()
+# ACPI_EXTRACT_NAME_BYTE_CONST - extract a Byte Const object from Name()
+# ACPI_EXTRACT_METHOD_STRING - extract a NameString from Method()
+# ACPI_EXTRACT_NAME_STRING - extract a NameString from Name()
+# ACPI_EXTRACT_PROCESSOR_START - start of Processor() block
+# ACPI_EXTRACT_PROCESSOR_STRING - extract a NameString from Processor()
+# ACPI_EXTRACT_PROCESSOR_END - offset at last byte of Processor() + 1
+# ACPI_EXTRACT_PKG_START - start of Package block
+#
+# ACPI_EXTRACT_ALL_CODE - create an array storing the generated AML bytecode
+#
+# ACPI_EXTRACT is not allowed anywhere else in code, except in comments.
+
+import re;
+import sys;
+import fileinput;
+
+aml = []
+asl = []
+output = {}
+debug = ""
+
+class asl_line:
+ line = None
+ lineno = None
+ aml_offset = None
+
+def die(diag):
+ sys.stderr.write("Error: %s; %s\n" % (diag, debug))
+ sys.exit(1)
+
+#Store an ASL command, matching AML offset, and input line (for debugging)
+def add_asl(lineno, line):
+ l = asl_line()
+ l.line = line
+ l.lineno = lineno
+ l.aml_offset = len(aml)
+ asl.append(l)
+
+#Store an AML byte sequence
+#Verify that offset output by iasl matches # of bytes so far
+def add_aml(offset, line):
+ o = int(offset, 16);
+ # Sanity check: offset must match size of code so far
+ if (o != len(aml)):
+ die("Offset 0x%x != 0x%x" % (o, len(aml)))
+ # Strip any trailing dots and ASCII dump after "
+ line = re.sub(r'\s*\.*\s*".*$',"", line)
+ # Strip traling whitespace
+ line = re.sub(r'\s+$',"", line)
+ # Strip leading whitespace
+ line = re.sub(r'^\s+',"", line)
+ # Split on whitespace
+ code = re.split(r'\s+', line)
+ for c in code:
+ # Require a legal hex number, two digits
+ if (not(re.search(r'^[0-9A-Fa-f][0-9A-Fa-f]$', c))):
+ die("Unexpected octet %s" % c);
+ aml.append(int(c, 16));
+
+# Process aml bytecode array, decoding AML
+def aml_pkglen_bytes(offset):
+ # PkgLength can be multibyte. Bits 8-7 give the # of extra bytes.
+ pkglenbytes = aml[offset] >> 6;
+ return pkglenbytes + 1
+
+def aml_pkglen(offset):
+ pkgstart = offset
+ pkglenbytes = aml_pkglen_bytes(offset)
+ pkglen = aml[offset] & 0x3F
+ # If multibyte, first nibble only uses bits 0-3
+ if ((pkglenbytes > 1) and (pkglen & 0x30)):
+ die("PkgLen bytes 0x%x but first nibble 0x%x expected 0x0X" %
+ (pkglen, pkglen))
+ offset += 1
+ pkglenbytes -= 1
+ for i in range(pkglenbytes):
+ pkglen |= aml[offset + i] << (i * 8 + 4)
+ if (len(aml) < pkgstart + pkglen):
+ die("PckgLen 0x%x at offset 0x%x exceeds AML size 0x%x" %
+ (pkglen, offset, len(aml)))
+ return pkglen
+
+# Given method offset, find its NameString offset
+def aml_method_string(offset):
+ #0x14 MethodOp PkgLength NameString MethodFlags TermList
+ if (aml[offset] != 0x14):
+ die( "Method offset 0x%x: expected 0x14 actual 0x%x" %
+ (offset, aml[offset]));
+ offset += 1;
+ pkglenbytes = aml_pkglen_bytes(offset)
+ offset += pkglenbytes;
+ return offset;
+
+# Given name offset, find its NameString offset
+def aml_name_string(offset):
+ #0x08 NameOp NameString DataRef
+ if (aml[offset] != 0x08):
+ die( "Name offset 0x%x: expected 0x08 actual 0x%x" %
+ (offset, aml[offset]));
+ offset += 1
+ # Block Name Modifier. Skip it.
+ if (aml[offset] == 0x5c or aml[offset] == 0x5e):
+ offset += 1
+ return offset;
+
+# Given data offset, find variable length byte buffer offset
+def aml_data_buffer(offset, length):
+ #0x11 PkgLength BufferSize ByteList
+ if (length > 63):
+ die( "Name offset 0x%x: expected a one byte PkgLength (length<=63)" %
+ (offset));
+ expect = [0x11, length+3, 0x0A, length]
+ if (aml[offset:offset+4] != expect):
+ die( "Name offset 0x%x: expected %s actual %s" %
+ (offset, expect, aml[offset:offset+4]))
+ return offset + len(expect)
+
+# Given data offset, find dword const offset
+def aml_data_dword_const(offset):
+ #0x08 NameOp NameString DataRef
+ if (aml[offset] != 0x0C):
+ die( "Name offset 0x%x: expected 0x0C actual 0x%x" %
+ (offset, aml[offset]));
+ return offset + 1;
+
+# Given data offset, find word const offset
+def aml_data_word_const(offset):
+ #0x08 NameOp NameString DataRef
+ if (aml[offset] != 0x0B):
+ die( "Name offset 0x%x: expected 0x0B actual 0x%x" %
+ (offset, aml[offset]));
+ return offset + 1;
+
+# Given data offset, find byte const offset
+def aml_data_byte_const(offset):
+ #0x08 NameOp NameString DataRef
+ if (aml[offset] != 0x0A):
+ die( "Name offset 0x%x: expected 0x0A actual 0x%x" %
+ (offset, aml[offset]));
+ return offset + 1;
+
+# Find name'd buffer
+def aml_name_buffer(offset, length):
+ return aml_data_buffer(aml_name_string(offset) + 4, length)
+
+# Given name offset, find dword const offset
+def aml_name_dword_const(offset):
+ return aml_data_dword_const(aml_name_string(offset) + 4)
+
+# Given name offset, find word const offset
+def aml_name_word_const(offset):
+ return aml_data_word_const(aml_name_string(offset) + 4)
+
+# Given name offset, find byte const offset
+def aml_name_byte_const(offset):
+ return aml_data_byte_const(aml_name_string(offset) + 4)
+
+def aml_device_start(offset):
+ #0x5B 0x82 DeviceOp PkgLength NameString
+ if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x82)):
+ die( "Name offset 0x%x: expected 0x5B 0x82 actual 0x%x 0x%x" %
+ (offset, aml[offset], aml[offset + 1]));
+ return offset
+
+def aml_device_string(offset):
+ #0x5B 0x82 DeviceOp PkgLength NameString
+ start = aml_device_start(offset)
+ offset += 2
+ pkglenbytes = aml_pkglen_bytes(offset)
+ offset += pkglenbytes
+ return offset
+
+def aml_device_end(offset):
+ start = aml_device_start(offset)
+ offset += 2
+ pkglenbytes = aml_pkglen_bytes(offset)
+ pkglen = aml_pkglen(offset)
+ return offset + pkglen
+
+def aml_processor_start(offset):
+ #0x5B 0x83 ProcessorOp PkgLength NameString ProcID
+ if ((aml[offset] != 0x5B) or (aml[offset + 1] != 0x83)):
+ die( "Name offset 0x%x: expected 0x5B 0x83 actual 0x%x 0x%x" %
+ (offset, aml[offset], aml[offset + 1]));
+ return offset
+
+def aml_processor_string(offset):
+ #0x5B 0x83 ProcessorOp PkgLength NameString ProcID
+ start = aml_processor_start(offset)
+ offset += 2
+ pkglenbytes = aml_pkglen_bytes(offset)
+ offset += pkglenbytes
+ return offset
+
+def aml_processor_end(offset):
+ start = aml_processor_start(offset)
+ offset += 2
+ pkglenbytes = aml_pkglen_bytes(offset)
+ pkglen = aml_pkglen(offset)
+ return offset + pkglen
+
+def aml_package_start(offset):
+ offset = aml_name_string(offset) + 4
+ # 0x12 PkgLength NumElements PackageElementList
+ if (aml[offset] != 0x12):
+ die( "Name offset 0x%x: expected 0x12 actual 0x%x" %
+ (offset, aml[offset]));
+ offset += 1
+ return offset + aml_pkglen_bytes(offset) + 1
+
+lineno = 0
+for line in fileinput.input():
+ # Strip trailing newline
+ line = line.rstrip();
+ # line number and debug string to output in case of errors
+ lineno = lineno + 1
+ debug = "input line %d: %s" % (lineno, line)
+ #ASL listing: space, then line#, then ...., then code
+ pasl = re.compile('^\s+([0-9]+)(:\s\s|\.\.\.\.)\s*')
+ m = pasl.search(line)
+ if (m):
+ add_asl(lineno, pasl.sub("", line));
+ # AML listing: offset in hex, then ...., then code
+ paml = re.compile('^([0-9A-Fa-f]+)(:\s\s|\.\.\.\.)\s*')
+ m = paml.search(line)
+ if (m):
+ add_aml(m.group(1), paml.sub("", line))
+
+# Now go over code
+# Track AML offset of a previous non-empty ASL command
+prev_aml_offset = -1
+for i in range(len(asl)):
+ debug = "input line %d: %s" % (asl[i].lineno, asl[i].line)
+
+ l = asl[i].line
+
+ # skip if not an extract directive
+ a = len(re.findall(r'ACPI_EXTRACT', l))
+ if (not a):
+ # If not empty, store AML offset. Will be used for sanity checks
+ # IASL seems to put {}. at random places in the listing.
+ # Ignore any non-words for the purpose of this test.
+ m = re.search(r'\w+', l)
+ if (m):
+ prev_aml_offset = asl[i].aml_offset
+ continue
+
+ if (a > 1):
+ die("Expected at most one ACPI_EXTRACT per line, actual %d" % a)
+
+ mext = re.search(r'''
+ ^\s* # leading whitespace
+ /\*\s* # start C comment
+ (ACPI_EXTRACT_\w+) # directive: group(1)
+ \s+ # whitspace separates directive from array name
+ (\w+) # array name: group(2)
+ \s*\*/ # end of C comment
+ \s*$ # trailing whitespace
+ ''', l, re.VERBOSE)
+ if (not mext):
+ die("Stray ACPI_EXTRACT in input")
+
+ # previous command must have produced some AML,
+ # otherwise we are in a middle of a block
+ if (prev_aml_offset == asl[i].aml_offset):
+ die("ACPI_EXTRACT directive in the middle of a block")
+
+ directive = mext.group(1)
+ array = mext.group(2)
+ offset = asl[i].aml_offset
+
+ if (directive == "ACPI_EXTRACT_ALL_CODE"):
+ if array in output:
+ die("%s directive used more than once" % directive)
+ output[array] = aml
+ continue
+ if (directive == "ACPI_EXTRACT_NAME_BUFFER8"):
+ offset = aml_name_buffer(offset, 8)
+ elif (directive == "ACPI_EXTRACT_NAME_BUFFER16"):
+ offset = aml_name_buffer(offset, 16)
+ elif (directive == "ACPI_EXTRACT_NAME_DWORD_CONST"):
+ offset = aml_name_dword_const(offset)
+ elif (directive == "ACPI_EXTRACT_NAME_WORD_CONST"):
+ offset = aml_name_word_const(offset)
+ elif (directive == "ACPI_EXTRACT_NAME_BYTE_CONST"):
+ offset = aml_name_byte_const(offset)
+ elif (directive == "ACPI_EXTRACT_NAME_STRING"):
+ offset = aml_name_string(offset)
+ elif (directive == "ACPI_EXTRACT_METHOD_STRING"):
+ offset = aml_method_string(offset)
+ elif (directive == "ACPI_EXTRACT_DEVICE_START"):
+ offset = aml_device_start(offset)
+ elif (directive == "ACPI_EXTRACT_DEVICE_STRING"):
+ offset = aml_device_string(offset)
+ elif (directive == "ACPI_EXTRACT_DEVICE_END"):
+ offset = aml_device_end(offset)
+ elif (directive == "ACPI_EXTRACT_PROCESSOR_START"):
+ offset = aml_processor_start(offset)
+ elif (directive == "ACPI_EXTRACT_PROCESSOR_STRING"):
+ offset = aml_processor_string(offset)
+ elif (directive == "ACPI_EXTRACT_PROCESSOR_END"):
+ offset = aml_processor_end(offset)
+ elif (directive == "ACPI_EXTRACT_PKG_START"):
+ offset = aml_package_start(offset)
+ else:
+ die("Unsupported directive %s" % directive)
+
+ if array not in output:
+ output[array] = []
+ output[array].append(offset)
+
+debug = "at end of file"
+
+def get_value_type(maxvalue):
+ #Use type large enough to fit the table
+ if (maxvalue >= 0x10000):
+ return "int"
+ elif (maxvalue >= 0x100):
+ return "short"
+ else:
+ return "char"
+
+# Pretty print output
+for array in output.keys():
+ otype = get_value_type(max(output[array]))
+ odata = []
+ for value in output[array]:
+ odata.append("0x%x" % value)
+ sys.stdout.write("static unsigned %s %s[] = {\n" % (otype, array))
+ sys.stdout.write(",\n".join(odata))
+ sys.stdout.write('\n};\n');
diff --git a/scripts/acpi_extract_preprocess.py b/scripts/acpi_extract_preprocess.py
new file mode 100755
index 00000000..69d10d62
--- /dev/null
+++ b/scripts/acpi_extract_preprocess.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+# Copyright (C) 2011 Red Hat, Inc., Michael S. Tsirkin <mst@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+# Read a preprocessed ASL listing and put each ACPI_EXTRACT
+# directive in a comment, to make iasl skip it.
+# We also put each directive on a new line, the machinery
+# in tools/acpi_extract.py requires this.
+
+import re;
+import sys;
+import fileinput;
+
+def die(diag):
+ sys.stderr.write("Error: %s\n" % (diag))
+ sys.exit(1)
+
+# Note: () around pattern make split return matched string as part of list
+psplit = re.compile(r''' (
+ \b # At word boundary
+ ACPI_EXTRACT_\w+ # directive
+ \s+ # some whitespace
+ \w+ # array name
+ )''', re.VERBOSE);
+
+lineno = 0
+for line in fileinput.input():
+ # line number and debug string to output in case of errors
+ lineno = lineno + 1
+ debug = "input line %d: %s" % (lineno, line.rstrip())
+
+ s = psplit.split(line);
+ # The way split works, each odd item is the matching ACPI_EXTRACT directive.
+ # Put each in a comment, and on a line by itself.
+ for i in range(len(s)):
+ if (i % 2):
+ sys.stdout.write("\n/* %s */\n" % s[i])
+ else:
+ sys.stdout.write(s[i])
diff --git a/scripts/analyse-9p-simpletrace.py b/scripts/analyse-9p-simpletrace.py
new file mode 100755
index 00000000..3c3dee43
--- /dev/null
+++ b/scripts/analyse-9p-simpletrace.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+# Pretty print 9p simpletrace log
+# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
+#
+# Author: Harsh Prateek Bora
+import os
+import simpletrace
+
+symbol_9p = {
+ 6 : 'TLERROR',
+ 7 : 'RLERROR',
+ 8 : 'TSTATFS',
+ 9 : 'RSTATFS',
+ 12 : 'TLOPEN',
+ 13 : 'RLOPEN',
+ 14 : 'TLCREATE',
+ 15 : 'RLCREATE',
+ 16 : 'TSYMLINK',
+ 17 : 'RSYMLINK',
+ 18 : 'TMKNOD',
+ 19 : 'RMKNOD',
+ 20 : 'TRENAME',
+ 21 : 'RRENAME',
+ 22 : 'TREADLINK',
+ 23 : 'RREADLINK',
+ 24 : 'TGETATTR',
+ 25 : 'RGETATTR',
+ 26 : 'TSETATTR',
+ 27 : 'RSETATTR',
+ 30 : 'TXATTRWALK',
+ 31 : 'RXATTRWALK',
+ 32 : 'TXATTRCREATE',
+ 33 : 'RXATTRCREATE',
+ 40 : 'TREADDIR',
+ 41 : 'RREADDIR',
+ 50 : 'TFSYNC',
+ 51 : 'RFSYNC',
+ 52 : 'TLOCK',
+ 53 : 'RLOCK',
+ 54 : 'TGETLOCK',
+ 55 : 'RGETLOCK',
+ 70 : 'TLINK',
+ 71 : 'RLINK',
+ 72 : 'TMKDIR',
+ 73 : 'RMKDIR',
+ 74 : 'TRENAMEAT',
+ 75 : 'RRENAMEAT',
+ 76 : 'TUNLINKAT',
+ 77 : 'RUNLINKAT',
+ 100 : 'TVERSION',
+ 101 : 'RVERSION',
+ 102 : 'TAUTH',
+ 103 : 'RAUTH',
+ 104 : 'TATTACH',
+ 105 : 'RATTACH',
+ 106 : 'TERROR',
+ 107 : 'RERROR',
+ 108 : 'TFLUSH',
+ 109 : 'RFLUSH',
+ 110 : 'TWALK',
+ 111 : 'RWALK',
+ 112 : 'TOPEN',
+ 113 : 'ROPEN',
+ 114 : 'TCREATE',
+ 115 : 'RCREATE',
+ 116 : 'TREAD',
+ 117 : 'RREAD',
+ 118 : 'TWRITE',
+ 119 : 'RWRITE',
+ 120 : 'TCLUNK',
+ 121 : 'RCLUNK',
+ 122 : 'TREMOVE',
+ 123 : 'RREMOVE',
+ 124 : 'TSTAT',
+ 125 : 'RSTAT',
+ 126 : 'TWSTAT',
+ 127 : 'RWSTAT'
+}
+
+class VirtFSRequestTracker(simpletrace.Analyzer):
+ def begin(self):
+ print "Pretty printing 9p simpletrace log ..."
+
+ def v9fs_rerror(self, tag, id, err):
+ print "RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")"
+
+ def v9fs_version(self, tag, id, msize, version):
+ print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
+
+ def v9fs_version_return(self, tag, id, msize, version):
+ print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
+
+ def v9fs_attach(self, tag, id, fid, afid, uname, aname):
+ print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
+
+ def v9fs_attach_return(self, tag, id, type, version, path):
+ print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
+
+ def v9fs_stat(self, tag, id, fid):
+ print "TSTAT (tag =", tag, ", fid =", fid, ")"
+
+ def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
+ print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
+
+ def v9fs_getattr(self, tag, id, fid, request_mask):
+ print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
+
+ def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
+ print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
+
+ def v9fs_walk(self, tag, id, fid, newfid, nwnames):
+ print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
+
+ def v9fs_walk_return(self, tag, id, nwnames, qids):
+ print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
+
+ def v9fs_open(self, tag, id, fid, mode):
+ print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
+
+ def v9fs_open_return(self, tag, id, type, version, path, iounit):
+ print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
+
+ def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
+ print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
+
+ def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
+ print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
+
+ def v9fs_fsync(self, tag, id, fid, datasync):
+ print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
+
+ def v9fs_clunk(self, tag, id, fid):
+ print "TCLUNK (tag =", tag, ", fid =", fid, ")"
+
+ def v9fs_read(self, tag, id, fid, off, max_count):
+ print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
+
+ def v9fs_read_return(self, tag, id, count, err):
+ print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
+
+ def v9fs_readdir(self, tag, id, fid, offset, max_count):
+ print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
+
+ def v9fs_readdir_return(self, tag, id, count, retval):
+ print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
+
+ def v9fs_write(self, tag, id, fid, off, count, cnt):
+ print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
+
+ def v9fs_write_return(self, tag, id, total, err):
+ print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
+
+ def v9fs_create(self, tag, id, fid, name, perm, mode):
+ print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
+
+ def v9fs_create_return(self, tag, id, type, version, path, iounit):
+ print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
+
+ def v9fs_symlink(self, tag, id, fid, name, symname, gid):
+ print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
+
+ def v9fs_symlink_return(self, tag, id, type, version, path):
+ print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
+
+ def v9fs_flush(self, tag, id, flush_tag):
+ print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
+
+ def v9fs_link(self, tag, id, dfid, oldfid, name):
+ print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
+
+ def v9fs_remove(self, tag, id, fid):
+ print "TREMOVE (tag =", tag, ", fid =", fid, ")"
+
+ def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
+ print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
+
+ def v9fs_mknod(self, tag, id, fid, mode, major, minor):
+ print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
+
+ def v9fs_lock(self, tag, id, fid, type, start, length):
+ print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
+
+ def v9fs_lock_return(self, tag, id, status):
+ print "RLOCK (tag =", tag, ", status =", status, ")"
+
+ def v9fs_getlock(self, tag, id, fid, type, start, length):
+ print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
+
+ def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
+ print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
+
+ def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
+ print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
+
+ def v9fs_mkdir_return(self, tag, id, type, version, path, err):
+ print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
+
+ def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
+ print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
+
+ def v9fs_xattrwalk_return(self, tag, id, size):
+ print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
+
+ def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
+ print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
+
+ def v9fs_readlink(self, tag, id, fid):
+ print "TREADLINK (tag =", tag, ", fid =", fid, ")"
+
+ def v9fs_readlink_return(self, tag, id, target):
+ print "RREADLINK (tag =", tag, ", target =", target, ")"
+
+simpletrace.run(VirtFSRequestTracker())
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
new file mode 100755
index 00000000..f6894bec
--- /dev/null
+++ b/scripts/analyze-migration.py
@@ -0,0 +1,597 @@
+#!/usr/bin/env python
+#
+# Migration Stream Analyzer
+#
+# Copyright (c) 2015 Alexander Graf <agraf@suse.de>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+import numpy as np
+import json
+import os
+import argparse
+import collections
+import pprint
+
+def mkdir_p(path):
+ try:
+ os.makedirs(path)
+ except OSError:
+ pass
+
+class MigrationFile(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self.file = open(self.filename, "rb")
+
+ def read64(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
+
+ def read32(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
+
+ def read16(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i2')[0])
+
+ def read8(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
+
+ def readstr(self, len = None):
+ if len is None:
+ len = self.read8()
+ if len == 0:
+ return ""
+ return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
+
+ def readvar(self, size = None):
+ if size is None:
+ size = self.read8()
+ if size == 0:
+ return ""
+ value = self.file.read(size)
+ if len(value) != size:
+ raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
+ return value
+
+ def tell(self):
+ return self.file.tell()
+
+ # The VMSD description is at the end of the file, after EOF. Look for
+ # the last NULL byte, then for the beginning brace of JSON.
+ def read_migration_debug_json(self):
+ QEMU_VM_VMDESCRIPTION = 0x06
+
+ # Remember the offset in the file when we started
+ entrypos = self.file.tell()
+
+ # Read the last 10MB
+ self.file.seek(0, os.SEEK_END)
+ endpos = self.file.tell()
+ self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
+ datapos = self.file.tell()
+ data = self.file.read()
+ # The full file read closed the file as well, reopen it
+ self.file = open(self.filename, "rb")
+
+ # Find the last NULL byte, then the first brace after that. This should
+ # be the beginning of our JSON data.
+ nulpos = data.rfind("\0")
+ jsonpos = data.find("{", nulpos)
+
+ # Check backwards from there and see whether we guessed right
+ self.file.seek(datapos + jsonpos - 5, 0)
+ if self.read8() != QEMU_VM_VMDESCRIPTION:
+ raise Exception("No Debug Migration device found")
+
+ jsonlen = self.read32()
+
+ # Seek back to where we were at the beginning
+ self.file.seek(entrypos, 0)
+
+ return data[jsonpos:jsonpos + jsonlen]
+
+ def close(self):
+ self.file.close()
+
+class RamSection(object):
+ RAM_SAVE_FLAG_COMPRESS = 0x02
+ RAM_SAVE_FLAG_MEM_SIZE = 0x04
+ RAM_SAVE_FLAG_PAGE = 0x08
+ RAM_SAVE_FLAG_EOS = 0x10
+ RAM_SAVE_FLAG_CONTINUE = 0x20
+ RAM_SAVE_FLAG_XBZRLE = 0x40
+ RAM_SAVE_FLAG_HOOK = 0x80
+
+ def __init__(self, file, version_id, ramargs, section_key):
+ if version_id != 4:
+ raise Exception("Unknown RAM version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+ self.TARGET_PAGE_SIZE = ramargs['page_size']
+ self.dump_memory = ramargs['dump_memory']
+ self.write_memory = ramargs['write_memory']
+ self.sizeinfo = collections.OrderedDict()
+ self.data = collections.OrderedDict()
+ self.data['section sizes'] = self.sizeinfo
+ self.name = ''
+ if self.write_memory:
+ self.files = { }
+ if self.dump_memory:
+ self.memory = collections.OrderedDict()
+ self.data['memory'] = self.memory
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ # Read all RAM sections
+ while True:
+ addr = self.file.read64()
+ flags = addr & (self.TARGET_PAGE_SIZE - 1)
+ addr &= ~(self.TARGET_PAGE_SIZE - 1)
+
+ if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
+ while True:
+ namelen = self.file.read8()
+ # We assume that no RAM chunk is big enough to ever
+ # hit the first byte of the address, so when we see
+ # a zero here we know it has to be an address, not the
+ # length of the next block.
+ if namelen == 0:
+ self.file.file.seek(-1, 1)
+ break
+ self.name = self.file.readstr(len = namelen)
+ len = self.file.read64()
+ self.sizeinfo[self.name] = '0x%016x' % len
+ if self.write_memory:
+ print self.name
+ mkdir_p('./' + os.path.dirname(self.name))
+ f = open('./' + self.name, "wb")
+ f.truncate(0)
+ f.truncate(len)
+ self.files[self.name] = f
+ flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
+
+ if flags & self.RAM_SAVE_FLAG_COMPRESS:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ self.name = self.file.readstr()
+ fill_char = self.file.read8()
+ # The page in question is filled with fill_char now
+ if self.write_memory and fill_char != 0:
+ self.files[self.name].seek(addr, os.SEEK_SET)
+ self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
+ if self.dump_memory:
+ self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
+ flags &= ~self.RAM_SAVE_FLAG_COMPRESS
+ elif flags & self.RAM_SAVE_FLAG_PAGE:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ self.name = self.file.readstr()
+
+ if self.write_memory or self.dump_memory:
+ data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
+ else: # Just skip RAM data
+ self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
+
+ if self.write_memory:
+ self.files[self.name].seek(addr, os.SEEK_SET)
+ self.files[self.name].write(data)
+ if self.dump_memory:
+ hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
+ self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
+
+ flags &= ~self.RAM_SAVE_FLAG_PAGE
+ elif flags & self.RAM_SAVE_FLAG_XBZRLE:
+ raise Exception("XBZRLE RAM compression is not supported yet")
+ elif flags & self.RAM_SAVE_FLAG_HOOK:
+ raise Exception("RAM hooks don't make sense with files")
+
+ # End of RAM section
+ if flags & self.RAM_SAVE_FLAG_EOS:
+ break
+
+ if flags != 0:
+ raise Exception("Unknown RAM flags: %x" % flags)
+
+ def __del__(self):
+ if self.write_memory:
+ for key in self.files:
+ self.files[key].close()
+
+
+class HTABSection(object):
+ HASH_PTE_SIZE_64 = 16
+
+ def __init__(self, file, version_id, device, section_key):
+ if version_id != 1:
+ raise Exception("Unknown HTAB version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+
+ def read(self):
+
+ header = self.file.read32()
+
+ if (header > 0):
+ # First section, just the hash shift
+ return
+
+ # Read until end marker
+ while True:
+ index = self.file.read32()
+ n_valid = self.file.read16()
+ n_invalid = self.file.read16()
+
+ if index == 0 and n_valid == 0 and n_invalid == 0:
+ break
+
+ self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
+
+ def getDict(self):
+ return ""
+
+class VMSDFieldGeneric(object):
+ def __init__(self, desc, file):
+ self.file = file
+ self.desc = desc
+ self.data = ""
+
+ def __repr__(self):
+ return str(self.__str__())
+
+ def __str__(self):
+ return " ".join("{0:02x}".format(ord(c)) for c in self.data)
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ size = int(self.desc['size'])
+ self.data = self.file.readvar(size)
+ return self.data
+
+class VMSDFieldInt(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldInt, self).__init__(desc, file)
+ self.size = int(desc['size'])
+ self.format = '0x%%0%dx' % (self.size * 2)
+ self.sdtype = '>i%d' % self.size
+ self.udtype = '>u%d' % self.size
+
+ def __repr__(self):
+ if self.data < 0:
+ return ('%s (%d)' % ((self.format % self.udata), self.data))
+ else:
+ return self.format % self.data
+
+ def __str__(self):
+ return self.__repr__()
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ super(VMSDFieldInt, self).read()
+ self.sdata = np.fromstring(self.data, count=1, dtype=(self.sdtype))[0]
+ self.udata = np.fromstring(self.data, count=1, dtype=(self.udtype))[0]
+ self.data = self.sdata
+ return self.data
+
+class VMSDFieldUInt(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldUInt, self).__init__(desc, file)
+
+ def read(self):
+ super(VMSDFieldUInt, self).read()
+ self.data = self.udata
+ return self.data
+
+class VMSDFieldIntLE(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldIntLE, self).__init__(desc, file)
+ self.dtype = '<i%d' % self.size
+
+class VMSDFieldBool(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldBool, self).__init__(desc, file)
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ super(VMSDFieldBool, self).read()
+ if self.data[0] == 0:
+ self.data = False
+ else:
+ self.data = True
+ return self.data
+
+class VMSDFieldStruct(VMSDFieldGeneric):
+ QEMU_VM_SUBSECTION = 0x05
+
+ def __init__(self, desc, file):
+ super(VMSDFieldStruct, self).__init__(desc, file)
+ self.data = collections.OrderedDict()
+
+ # When we see compressed array elements, unfold them here
+ new_fields = []
+ for field in self.desc['struct']['fields']:
+ if not 'array_len' in field:
+ new_fields.append(field)
+ continue
+ array_len = field.pop('array_len')
+ field['index'] = 0
+ new_fields.append(field)
+ for i in xrange(1, array_len):
+ c = field.copy()
+ c['index'] = i
+ new_fields.append(c)
+
+ self.desc['struct']['fields'] = new_fields
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def read(self):
+ for field in self.desc['struct']['fields']:
+ try:
+ reader = vmsd_field_readers[field['type']]
+ except:
+ reader = VMSDFieldGeneric
+
+ field['data'] = reader(field, self.file)
+ field['data'].read()
+
+ if 'index' in field:
+ if field['name'] not in self.data:
+ self.data[field['name']] = []
+ a = self.data[field['name']]
+ if len(a) != int(field['index']):
+ raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
+ a.append(field['data'])
+ else:
+ self.data[field['name']] = field['data']
+
+ if 'subsections' in self.desc['struct']:
+ for subsection in self.desc['struct']['subsections']:
+ if self.file.read8() != self.QEMU_VM_SUBSECTION:
+ raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
+ name = self.file.readstr()
+ version_id = self.file.read32()
+ self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
+ self.data[name].read()
+
+ def getDictItem(self, value):
+ # Strings would fall into the array category, treat
+ # them specially
+ if value.__class__ is ''.__class__:
+ return value
+
+ try:
+ return self.getDictOrderedDict(value)
+ except:
+ try:
+ return self.getDictArray(value)
+ except:
+ try:
+ return value.getDict()
+ except:
+ return value
+
+ def getDictArray(self, array):
+ r = []
+ for value in array:
+ r.append(self.getDictItem(value))
+ return r
+
+ def getDictOrderedDict(self, dict):
+ r = collections.OrderedDict()
+ for (key, value) in dict.items():
+ r[key] = self.getDictItem(value)
+ return r
+
+ def getDict(self):
+ return self.getDictOrderedDict(self.data)
+
+vmsd_field_readers = {
+ "bool" : VMSDFieldBool,
+ "int8" : VMSDFieldInt,
+ "int16" : VMSDFieldInt,
+ "int32" : VMSDFieldInt,
+ "int32 equal" : VMSDFieldInt,
+ "int32 le" : VMSDFieldIntLE,
+ "int64" : VMSDFieldInt,
+ "uint8" : VMSDFieldUInt,
+ "uint16" : VMSDFieldUInt,
+ "uint32" : VMSDFieldUInt,
+ "uint32 equal" : VMSDFieldUInt,
+ "uint64" : VMSDFieldUInt,
+ "int64 equal" : VMSDFieldInt,
+ "uint8 equal" : VMSDFieldInt,
+ "uint16 equal" : VMSDFieldInt,
+ "float64" : VMSDFieldGeneric,
+ "timer" : VMSDFieldGeneric,
+ "buffer" : VMSDFieldGeneric,
+ "unused_buffer" : VMSDFieldGeneric,
+ "bitmap" : VMSDFieldGeneric,
+ "struct" : VMSDFieldStruct,
+ "unknown" : VMSDFieldGeneric,
+}
+
+class VMSDSection(VMSDFieldStruct):
+ def __init__(self, file, version_id, device, section_key):
+ self.file = file
+ self.data = ""
+ self.vmsd_name = ""
+ self.section_key = section_key
+ desc = device
+ if 'vmsd_name' in device:
+ self.vmsd_name = device['vmsd_name']
+
+ # A section really is nothing but a FieldStruct :)
+ super(VMSDSection, self).__init__({ 'struct' : desc }, file)
+
+###############################################################################
+
+class MigrationDump(object):
+ QEMU_VM_FILE_MAGIC = 0x5145564d
+ QEMU_VM_FILE_VERSION = 0x00000003
+ QEMU_VM_EOF = 0x00
+ QEMU_VM_SECTION_START = 0x01
+ QEMU_VM_SECTION_PART = 0x02
+ QEMU_VM_SECTION_END = 0x03
+ QEMU_VM_SECTION_FULL = 0x04
+ QEMU_VM_SUBSECTION = 0x05
+ QEMU_VM_VMDESCRIPTION = 0x06
+ QEMU_VM_SECTION_FOOTER= 0x7e
+
+ def __init__(self, filename):
+ self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
+ ( 'spapr/htab', 0) : ( HTABSection, None ) }
+ self.filename = filename
+ self.vmsd_desc = None
+
+ def read(self, desc_only = False, dump_memory = False, write_memory = False):
+ # Read in the whole file
+ file = MigrationFile(self.filename)
+
+ # File magic
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_MAGIC:
+ raise Exception("Invalid file magic %x" % data)
+
+ # Version (has to be v3)
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_VERSION:
+ raise Exception("Invalid version number %d" % data)
+
+ self.load_vmsd_json(file)
+
+ # Read sections
+ self.sections = collections.OrderedDict()
+
+ if desc_only:
+ return
+
+ ramargs = {}
+ ramargs['page_size'] = self.vmsd_desc['page_size']
+ ramargs['dump_memory'] = dump_memory
+ ramargs['write_memory'] = write_memory
+ self.section_classes[('ram',0)][1] = ramargs
+
+ while True:
+ section_type = file.read8()
+ if section_type == self.QEMU_VM_EOF:
+ break
+ elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
+ section_id = file.read32()
+ name = file.readstr()
+ instance_id = file.read32()
+ version_id = file.read32()
+ section_key = (name, instance_id)
+ classdesc = self.section_classes[section_key]
+ section = classdesc[0](file, version_id, classdesc[1], section_key)
+ self.sections[section_id] = section
+ section.read()
+ elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
+ section_id = file.read32()
+ self.sections[section_id].read()
+ elif section_type == self.QEMU_VM_SECTION_FOOTER:
+ read_section_id = file.read32()
+ if read_section_id != section_id:
+ raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
+ else:
+ raise Exception("Unknown section type: %d" % section_type)
+ file.close()
+
+ def load_vmsd_json(self, file):
+ vmsd_json = file.read_migration_debug_json()
+ self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
+ for device in self.vmsd_desc['devices']:
+ key = (device['name'], device['instance_id'])
+ value = ( VMSDSection, device )
+ self.section_classes[key] = value
+
+ def getDict(self):
+ r = collections.OrderedDict()
+ for (key, value) in self.sections.items():
+ key = "%s (%d)" % ( value.section_key[0], key )
+ r[key] = value.getDict()
+ return r
+
+###############################################################################
+
+class JSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, VMSDFieldGeneric):
+ return str(o)
+ return json.JSONEncoder.default(self, o)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
+parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
+parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
+parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
+args = parser.parse_args()
+
+jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
+
+if args.extract:
+ dump = MigrationDump(args.file)
+
+ dump.read(desc_only = True)
+ print "desc.json"
+ f = open("desc.json", "wb")
+ f.truncate()
+ f.write(jsonenc.encode(dump.vmsd_desc))
+ f.close()
+
+ dump.read(write_memory = True)
+ dict = dump.getDict()
+ print "state.json"
+ f = open("state.json", "wb")
+ f.truncate()
+ f.write(jsonenc.encode(dict))
+ f.close()
+elif args.dump == "state":
+ dump = MigrationDump(args.file)
+ dump.read(dump_memory = args.memory)
+ dict = dump.getDict()
+ print jsonenc.encode(dict)
+elif args.dump == "desc":
+ dump = MigrationDump(args.file)
+ dump.read(desc_only = True)
+ print jsonenc.encode(dump.vmsd_desc)
+else:
+ raise Exception("Please specify either -x, -d state or -d dump")
diff --git a/scripts/check-qerror.sh b/scripts/check-qerror.sh
new file mode 100755
index 00000000..af7fbd52
--- /dev/null
+++ b/scripts/check-qerror.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+# This script verifies that qerror definitions and table entries are
+# alphabetically ordered.
+
+check_order() {
+ errmsg=$1
+ shift
+
+ # sort -C verifies order but does not print a message. sort -c does print a
+ # message. These options are both in POSIX.
+ if ! "$@" | sort -C; then
+ echo "$errmsg"
+ "$@" | sort -c
+ exit 1
+ fi
+ return 0
+}
+
+check_order 'Definitions in qerror.h must be in alphabetical order:' \
+ grep '^#define QERR_' qerror.h
+check_order 'Entries in qerror.c:qerror_table must be in alphabetical order:' \
+ sed -n '/^static.*qerror_table\[\]/,/^};/s/QERR_/&/gp' qerror.c
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
new file mode 100755
index 00000000..7f0aae97
--- /dev/null
+++ b/scripts/checkpatch.pl
@@ -0,0 +1,2980 @@
+#!/usr/bin/perl -w
+# (c) 2001, Dave Jones. (the file handling bit)
+# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
+# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
+# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
+# Licensed under the terms of the GNU GPL License version 2
+
+use strict;
+
+my $P = $0;
+$P =~ s@.*/@@g;
+
+my $V = '0.31';
+
+use Getopt::Long qw(:config no_auto_abbrev);
+
+my $quiet = 0;
+my $tree = 1;
+my $chk_signoff = 1;
+my $chk_patch = 1;
+my $tst_only;
+my $emacs = 0;
+my $terse = 0;
+my $file = 0;
+my $check = 0;
+my $summary = 1;
+my $mailback = 0;
+my $summary_file = 0;
+my $root;
+my %debug;
+my $help = 0;
+
+sub help {
+ my ($exitcode) = @_;
+
+ print << "EOM";
+Usage: $P [OPTION]... [FILE]...
+Version: $V
+
+Options:
+ -q, --quiet quiet
+ --no-tree run without a kernel tree
+ --no-signoff do not check for 'Signed-off-by' line
+ --patch treat FILE as patchfile (default)
+ --emacs emacs compile window format
+ --terse one line per report
+ -f, --file treat FILE as regular source file
+ --subjective, --strict enable more subjective tests
+ --root=PATH PATH to the kernel tree root
+ --no-summary suppress the per-file summary
+ --mailback only produce a report in case of warnings/errors
+ --summary-file include the filename in summary
+ --debug KEY=[0|1] turn on/off debugging of KEY, where KEY is one of
+ 'values', 'possible', 'type', and 'attr' (default
+ is all off)
+ --test-only=WORD report only warnings/errors containing WORD
+ literally
+ -h, --help, --version display this help and exit
+
+When FILE is - read standard input.
+EOM
+
+ exit($exitcode);
+}
+
+GetOptions(
+ 'q|quiet+' => \$quiet,
+ 'tree!' => \$tree,
+ 'signoff!' => \$chk_signoff,
+ 'patch!' => \$chk_patch,
+ 'emacs!' => \$emacs,
+ 'terse!' => \$terse,
+ 'f|file!' => \$file,
+ 'subjective!' => \$check,
+ 'strict!' => \$check,
+ 'root=s' => \$root,
+ 'summary!' => \$summary,
+ 'mailback!' => \$mailback,
+ 'summary-file!' => \$summary_file,
+
+ 'debug=s' => \%debug,
+ 'test-only=s' => \$tst_only,
+ 'h|help' => \$help,
+ 'version' => \$help
+) or help(1);
+
+help(0) if ($help);
+
+my $exit = 0;
+
+if ($#ARGV < 0) {
+ print "$P: no input files\n";
+ exit(1);
+}
+
+my $dbg_values = 0;
+my $dbg_possible = 0;
+my $dbg_type = 0;
+my $dbg_attr = 0;
+my $dbg_adv_dcs = 0;
+my $dbg_adv_checking = 0;
+my $dbg_adv_apw = 0;
+for my $key (keys %debug) {
+ ## no critic
+ eval "\${dbg_$key} = '$debug{$key}';";
+ die "$@" if ($@);
+}
+
+my $rpt_cleaners = 0;
+
+if ($terse) {
+ $emacs = 1;
+ $quiet++;
+}
+
+if ($tree) {
+ if (defined $root) {
+ if (!top_of_kernel_tree($root)) {
+ die "$P: $root: --root does not point at a valid tree\n";
+ }
+ } else {
+ if (top_of_kernel_tree('.')) {
+ $root = '.';
+ } elsif ($0 =~ m@(.*)/scripts/[^/]*$@ &&
+ top_of_kernel_tree($1)) {
+ $root = $1;
+ }
+ }
+
+ if (!defined $root) {
+ print "Must be run from the top-level dir. of a kernel tree\n";
+ exit(2);
+ }
+}
+
+my $emitted_corrupt = 0;
+
+our $Ident = qr{
+ [A-Za-z_][A-Za-z\d_]*
+ (?:\s*\#\#\s*[A-Za-z_][A-Za-z\d_]*)*
+ }x;
+our $Storage = qr{extern|static|asmlinkage};
+our $Sparse = qr{
+ __user|
+ __kernel|
+ __force|
+ __iomem|
+ __must_check|
+ __init_refok|
+ __kprobes|
+ __ref
+ }x;
+
+# Notes to $Attribute:
+# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
+our $Attribute = qr{
+ const|
+ __percpu|
+ __nocast|
+ __safe|
+ __bitwise__|
+ __packed__|
+ __packed2__|
+ __naked|
+ __maybe_unused|
+ __always_unused|
+ __noreturn|
+ __used|
+ __cold|
+ __noclone|
+ __deprecated|
+ __read_mostly|
+ __kprobes|
+ __(?:mem|cpu|dev|)(?:initdata|initconst|init\b)|
+ ____cacheline_aligned|
+ ____cacheline_aligned_in_smp|
+ ____cacheline_internodealigned_in_smp|
+ __weak
+ }x;
+our $Modifier;
+our $Inline = qr{inline|__always_inline|noinline};
+our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
+our $Lval = qr{$Ident(?:$Member)*};
+
+our $Constant = qr{(?:[0-9]+|0x[0-9a-fA-F]+)[UL]*};
+our $Assignment = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
+our $Compare = qr{<=|>=|==|!=|<|>};
+our $Operators = qr{
+ <=|>=|==|!=|
+ =>|->|<<|>>|<|>|!|~|
+ &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%
+ }x;
+
+our $NonptrType;
+our $Type;
+our $Declare;
+
+our $UTF8 = qr {
+ [\x09\x0A\x0D\x20-\x7E] # ASCII
+ | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte
+ | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs
+ | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte
+ | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates
+ | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3
+ | [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15
+ | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16
+}x;
+
+# There are still some false positives, but this catches most
+# common cases.
+our $typeTypedefs = qr{(?x:
+ [A-Z][A-Z\d_]*[a-z][A-Za-z\d_]* # camelcase
+ | [A-Z][A-Z\d_]*AIOCB # all uppercase
+ | [A-Z][A-Z\d_]*CPU # all uppercase
+ | QEMUBH # all uppercase
+)};
+
+our $logFunctions = qr{(?x:
+ printk|
+ pr_(debug|dbg|vdbg|devel|info|warning|err|notice|alert|crit|emerg|cont)|
+ (dev|netdev|netif)_(printk|dbg|vdbg|info|warn|err|notice|alert|crit|emerg|WARN)|
+ WARN|
+ panic
+)};
+
+our @typeList = (
+ qr{void},
+ qr{(?:unsigned\s+)?char},
+ qr{(?:unsigned\s+)?short},
+ qr{(?:unsigned\s+)?int},
+ qr{(?:unsigned\s+)?long},
+ qr{(?:unsigned\s+)?long\s+int},
+ qr{(?:unsigned\s+)?long\s+long},
+ qr{(?:unsigned\s+)?long\s+long\s+int},
+ qr{unsigned},
+ qr{float},
+ qr{double},
+ qr{bool},
+ qr{struct\s+$Ident},
+ qr{union\s+$Ident},
+ qr{enum\s+$Ident},
+ qr{${Ident}_t},
+ qr{${Ident}_handler},
+ qr{${Ident}_handler_fn},
+);
+our @modifierList = (
+ qr{fastcall},
+);
+
+our $allowed_asm_includes = qr{(?x:
+ irq|
+ memory
+)};
+# memory.h: ARM has a custom one
+
+sub build_types {
+ my $mods = "(?x: \n" . join("|\n ", @modifierList) . "\n)";
+ my $all = "(?x: \n" . join("|\n ", @typeList) . "\n)";
+ $Modifier = qr{(?:$Attribute|$Sparse|$mods)};
+ $NonptrType = qr{
+ (?:$Modifier\s+|const\s+)*
+ (?:
+ (?:typeof|__typeof__)\s*\(\s*\**\s*$Ident\s*\)|
+ (?:$typeTypedefs\b)|
+ (?:${all}\b)
+ )
+ (?:\s+$Modifier|\s+const)*
+ }x;
+ $Type = qr{
+ $NonptrType
+ (?:[\s\*]+\s*const|[\s\*]+|(?:\s*\[\s*\])+)?
+ (?:\s+$Inline|\s+$Modifier)*
+ }x;
+ $Declare = qr{(?:$Storage\s+)?$Type};
+}
+build_types();
+
+$chk_signoff = 0 if ($file);
+
+my @dep_includes = ();
+my @dep_functions = ();
+my $removal = "Documentation/feature-removal-schedule.txt";
+if ($tree && -f "$root/$removal") {
+ open(my $REMOVE, '<', "$root/$removal") ||
+ die "$P: $removal: open failed - $!\n";
+ while (<$REMOVE>) {
+ if (/^Check:\s+(.*\S)/) {
+ for my $entry (split(/[, ]+/, $1)) {
+ if ($entry =~ m@include/(.*)@) {
+ push(@dep_includes, $1);
+
+ } elsif ($entry !~ m@/@) {
+ push(@dep_functions, $entry);
+ }
+ }
+ }
+ }
+ close($REMOVE);
+}
+
+my @rawlines = ();
+my @lines = ();
+my $vname;
+for my $filename (@ARGV) {
+ my $FILE;
+ if ($file) {
+ open($FILE, '-|', "diff -u /dev/null $filename") ||
+ die "$P: $filename: diff failed - $!\n";
+ } elsif ($filename eq '-') {
+ open($FILE, '<&STDIN');
+ } else {
+ open($FILE, '<', "$filename") ||
+ die "$P: $filename: open failed - $!\n";
+ }
+ if ($filename eq '-') {
+ $vname = 'Your patch';
+ } else {
+ $vname = $filename;
+ }
+ while (<$FILE>) {
+ chomp;
+ push(@rawlines, $_);
+ }
+ close($FILE);
+ if (!process($filename)) {
+ $exit = 1;
+ }
+ @rawlines = ();
+ @lines = ();
+}
+
+exit($exit);
+
+sub top_of_kernel_tree {
+ my ($root) = @_;
+
+ my @tree_check = (
+ "COPYING", "MAINTAINERS", "Makefile",
+ "README", "docs", "VERSION",
+ "vl.c"
+ );
+
+ foreach my $check (@tree_check) {
+ if (! -e $root . '/' . $check) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+sub expand_tabs {
+ my ($str) = @_;
+
+ my $res = '';
+ my $n = 0;
+ for my $c (split(//, $str)) {
+ if ($c eq "\t") {
+ $res .= ' ';
+ $n++;
+ for (; ($n % 8) != 0; $n++) {
+ $res .= ' ';
+ }
+ next;
+ }
+ $res .= $c;
+ $n++;
+ }
+
+ return $res;
+}
+sub copy_spacing {
+ (my $res = shift) =~ tr/\t/ /c;
+ return $res;
+}
+
+sub line_stats {
+ my ($line) = @_;
+
+ # Drop the diff line leader and expand tabs
+ $line =~ s/^.//;
+ $line = expand_tabs($line);
+
+ # Pick the indent from the front of the line.
+ my ($white) = ($line =~ /^(\s*)/);
+
+ return (length($line), length($white));
+}
+
+my $sanitise_quote = '';
+
+sub sanitise_line_reset {
+ my ($in_comment) = @_;
+
+ if ($in_comment) {
+ $sanitise_quote = '*/';
+ } else {
+ $sanitise_quote = '';
+ }
+}
+sub sanitise_line {
+ my ($line) = @_;
+
+ my $res = '';
+ my $l = '';
+
+ my $qlen = 0;
+ my $off = 0;
+ my $c;
+
+ # Always copy over the diff marker.
+ $res = substr($line, 0, 1);
+
+ for ($off = 1; $off < length($line); $off++) {
+ $c = substr($line, $off, 1);
+
+ # Comments we are wacking completly including the begin
+ # and end, all to $;.
+ if ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {
+ $sanitise_quote = '*/';
+
+ substr($res, $off, 2, "$;$;");
+ $off++;
+ next;
+ }
+ if ($sanitise_quote eq '*/' && substr($line, $off, 2) eq '*/') {
+ $sanitise_quote = '';
+ substr($res, $off, 2, "$;$;");
+ $off++;
+ next;
+ }
+ if ($sanitise_quote eq '' && substr($line, $off, 2) eq '//') {
+ $sanitise_quote = '//';
+
+ substr($res, $off, 2, $sanitise_quote);
+ $off++;
+ next;
+ }
+
+ # A \ in a string means ignore the next character.
+ if (($sanitise_quote eq "'" || $sanitise_quote eq '"') &&
+ $c eq "\\") {
+ substr($res, $off, 2, 'XX');
+ $off++;
+ next;
+ }
+ # Regular quotes.
+ if ($c eq "'" || $c eq '"') {
+ if ($sanitise_quote eq '') {
+ $sanitise_quote = $c;
+
+ substr($res, $off, 1, $c);
+ next;
+ } elsif ($sanitise_quote eq $c) {
+ $sanitise_quote = '';
+ }
+ }
+
+ #print "c<$c> SQ<$sanitise_quote>\n";
+ if ($off != 0 && $sanitise_quote eq '*/' && $c ne "\t") {
+ substr($res, $off, 1, $;);
+ } elsif ($off != 0 && $sanitise_quote eq '//' && $c ne "\t") {
+ substr($res, $off, 1, $;);
+ } elsif ($off != 0 && $sanitise_quote && $c ne "\t") {
+ substr($res, $off, 1, 'X');
+ } else {
+ substr($res, $off, 1, $c);
+ }
+ }
+
+ if ($sanitise_quote eq '//') {
+ $sanitise_quote = '';
+ }
+
+ # The pathname on a #include may be surrounded by '<' and '>'.
+ if ($res =~ /^.\s*\#\s*include\s+\<(.*)\>/) {
+ my $clean = 'X' x length($1);
+ $res =~ s@\<.*\>@<$clean>@;
+
+ # The whole of a #error is a string.
+ } elsif ($res =~ /^.\s*\#\s*(?:error|warning)\s+(.*)\b/) {
+ my $clean = 'X' x length($1);
+ $res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@;
+ }
+
+ return $res;
+}
+
+sub ctx_statement_block {
+ my ($linenr, $remain, $off) = @_;
+ my $line = $linenr - 1;
+ my $blk = '';
+ my $soff = $off;
+ my $coff = $off - 1;
+ my $coff_set = 0;
+
+ my $loff = 0;
+
+ my $type = '';
+ my $level = 0;
+ my @stack = ();
+ my $p;
+ my $c;
+ my $len = 0;
+
+ my $remainder;
+ while (1) {
+ @stack = (['', 0]) if ($#stack == -1);
+
+ #warn "CSB: blk<$blk> remain<$remain>\n";
+ # If we are about to drop off the end, pull in more
+ # context.
+ if ($off >= $len) {
+ for (; $remain > 0; $line++) {
+ last if (!defined $lines[$line]);
+ next if ($lines[$line] =~ /^-/);
+ $remain--;
+ $loff = $len;
+ $blk .= $lines[$line] . "\n";
+ $len = length($blk);
+ $line++;
+ last;
+ }
+ # Bail if there is no further context.
+ #warn "CSB: blk<$blk> off<$off> len<$len>\n";
+ if ($off >= $len) {
+ last;
+ }
+ }
+ $p = $c;
+ $c = substr($blk, $off, 1);
+ $remainder = substr($blk, $off);
+
+ #warn "CSB: c<$c> type<$type> level<$level> remainder<$remainder> coff_set<$coff_set>\n";
+
+ # Handle nested #if/#else.
+ if ($remainder =~ /^#\s*(?:ifndef|ifdef|if)\s/) {
+ push(@stack, [ $type, $level ]);
+ } elsif ($remainder =~ /^#\s*(?:else|elif)\b/) {
+ ($type, $level) = @{$stack[$#stack - 1]};
+ } elsif ($remainder =~ /^#\s*endif\b/) {
+ ($type, $level) = @{pop(@stack)};
+ }
+
+ # Statement ends at the ';' or a close '}' at the
+ # outermost level.
+ if ($level == 0 && $c eq ';') {
+ last;
+ }
+
+ # An else is really a conditional as long as its not else if
+ if ($level == 0 && $coff_set == 0 &&
+ (!defined($p) || $p =~ /(?:\s|\}|\+)/) &&
+ $remainder =~ /^(else)(?:\s|{)/ &&
+ $remainder !~ /^else\s+if\b/) {
+ $coff = $off + length($1) - 1;
+ $coff_set = 1;
+ #warn "CSB: mark coff<$coff> soff<$soff> 1<$1>\n";
+ #warn "[" . substr($blk, $soff, $coff - $soff + 1) . "]\n";
+ }
+
+ if (($type eq '' || $type eq '(') && $c eq '(') {
+ $level++;
+ $type = '(';
+ }
+ if ($type eq '(' && $c eq ')') {
+ $level--;
+ $type = ($level != 0)? '(' : '';
+
+ if ($level == 0 && $coff < $soff) {
+ $coff = $off;
+ $coff_set = 1;
+ #warn "CSB: mark coff<$coff>\n";
+ }
+ }
+ if (($type eq '' || $type eq '{') && $c eq '{') {
+ $level++;
+ $type = '{';
+ }
+ if ($type eq '{' && $c eq '}') {
+ $level--;
+ $type = ($level != 0)? '{' : '';
+
+ if ($level == 0) {
+ if (substr($blk, $off + 1, 1) eq ';') {
+ $off++;
+ }
+ last;
+ }
+ }
+ $off++;
+ }
+ # We are truly at the end, so shuffle to the next line.
+ if ($off == $len) {
+ $loff = $len + 1;
+ $line++;
+ $remain--;
+ }
+
+ my $statement = substr($blk, $soff, $off - $soff + 1);
+ my $condition = substr($blk, $soff, $coff - $soff + 1);
+
+ #warn "STATEMENT<$statement>\n";
+ #warn "CONDITION<$condition>\n";
+
+ #print "coff<$coff> soff<$off> loff<$loff>\n";
+
+ return ($statement, $condition,
+ $line, $remain + 1, $off - $loff + 1, $level);
+}
+
+sub statement_lines {
+ my ($stmt) = @_;
+
+ # Strip the diff line prefixes and rip blank lines at start and end.
+ $stmt =~ s/(^|\n)./$1/g;
+ $stmt =~ s/^\s*//;
+ $stmt =~ s/\s*$//;
+
+ my @stmt_lines = ($stmt =~ /\n/g);
+
+ return $#stmt_lines + 2;
+}
+
+sub statement_rawlines {
+ my ($stmt) = @_;
+
+ my @stmt_lines = ($stmt =~ /\n/g);
+
+ return $#stmt_lines + 2;
+}
+
+sub statement_block_size {
+ my ($stmt) = @_;
+
+ $stmt =~ s/(^|\n)./$1/g;
+ $stmt =~ s/^\s*{//;
+ $stmt =~ s/}\s*$//;
+ $stmt =~ s/^\s*//;
+ $stmt =~ s/\s*$//;
+
+ my @stmt_lines = ($stmt =~ /\n/g);
+ my @stmt_statements = ($stmt =~ /;/g);
+
+ my $stmt_lines = $#stmt_lines + 2;
+ my $stmt_statements = $#stmt_statements + 1;
+
+ if ($stmt_lines > $stmt_statements) {
+ return $stmt_lines;
+ } else {
+ return $stmt_statements;
+ }
+}
+
+sub ctx_statement_full {
+ my ($linenr, $remain, $off) = @_;
+ my ($statement, $condition, $level);
+
+ my (@chunks);
+
+ # Grab the first conditional/block pair.
+ ($statement, $condition, $linenr, $remain, $off, $level) =
+ ctx_statement_block($linenr, $remain, $off);
+ #print "F: c<$condition> s<$statement> remain<$remain>\n";
+ push(@chunks, [ $condition, $statement ]);
+ if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) {
+ return ($level, $linenr, @chunks);
+ }
+
+ # Pull in the following conditional/block pairs and see if they
+ # could continue the statement.
+ for (;;) {
+ ($statement, $condition, $linenr, $remain, $off, $level) =
+ ctx_statement_block($linenr, $remain, $off);
+ #print "C: c<$condition> s<$statement> remain<$remain>\n";
+ last if (!($remain > 0 && $condition =~ /^(?:\s*\n[+-])*\s*(?:else|do)\b/s));
+ #print "C: push\n";
+ push(@chunks, [ $condition, $statement ]);
+ }
+
+ return ($level, $linenr, @chunks);
+}
+
+sub ctx_block_get {
+ my ($linenr, $remain, $outer, $open, $close, $off) = @_;
+ my $line;
+ my $start = $linenr - 1;
+ my $blk = '';
+ my @o;
+ my @c;
+ my @res = ();
+
+ my $level = 0;
+ my @stack = ($level);
+ for ($line = $start; $remain > 0; $line++) {
+ next if ($rawlines[$line] =~ /^-/);
+ $remain--;
+
+ $blk .= $rawlines[$line];
+
+ # Handle nested #if/#else.
+ if ($lines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
+ push(@stack, $level);
+ } elsif ($lines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
+ $level = $stack[$#stack - 1];
+ } elsif ($lines[$line] =~ /^.\s*#\s*endif\b/) {
+ $level = pop(@stack);
+ }
+
+ foreach my $c (split(//, $lines[$line])) {
+ ##print "C<$c>L<$level><$open$close>O<$off>\n";
+ if ($off > 0) {
+ $off--;
+ next;
+ }
+
+ if ($c eq $close && $level > 0) {
+ $level--;
+ last if ($level == 0);
+ } elsif ($c eq $open) {
+ $level++;
+ }
+ }
+
+ if (!$outer || $level <= 1) {
+ push(@res, $rawlines[$line]);
+ }
+
+ last if ($level == 0);
+ }
+
+ return ($level, @res);
+}
+sub ctx_block_outer {
+ my ($linenr, $remain) = @_;
+
+ my ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);
+ return @r;
+}
+sub ctx_block {
+ my ($linenr, $remain) = @_;
+
+ my ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);
+ return @r;
+}
+sub ctx_statement {
+ my ($linenr, $remain, $off) = @_;
+
+ my ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);
+ return @r;
+}
+sub ctx_block_level {
+ my ($linenr, $remain) = @_;
+
+ return ctx_block_get($linenr, $remain, 0, '{', '}', 0);
+}
+sub ctx_statement_level {
+ my ($linenr, $remain, $off) = @_;
+
+ return ctx_block_get($linenr, $remain, 0, '(', ')', $off);
+}
+
+sub ctx_locate_comment {
+ my ($first_line, $end_line) = @_;
+
+ # Catch a comment on the end of the line itself.
+ my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
+ return $current_comment if (defined $current_comment);
+
+ # Look through the context and try and figure out if there is a
+ # comment.
+ my $in_comment = 0;
+ $current_comment = '';
+ for (my $linenr = $first_line; $linenr < $end_line; $linenr++) {
+ my $line = $rawlines[$linenr - 1];
+ #warn " $line\n";
+ if ($linenr == $first_line and $line =~ m@^.\s*\*@) {
+ $in_comment = 1;
+ }
+ if ($line =~ m@/\*@) {
+ $in_comment = 1;
+ }
+ if (!$in_comment && $current_comment ne '') {
+ $current_comment = '';
+ }
+ $current_comment .= $line . "\n" if ($in_comment);
+ if ($line =~ m@\*/@) {
+ $in_comment = 0;
+ }
+ }
+
+ chomp($current_comment);
+ return($current_comment);
+}
+sub ctx_has_comment {
+ my ($first_line, $end_line) = @_;
+ my $cmt = ctx_locate_comment($first_line, $end_line);
+
+ ##print "LINE: $rawlines[$end_line - 1 ]\n";
+ ##print "CMMT: $cmt\n";
+
+ return ($cmt ne '');
+}
+
+sub raw_line {
+ my ($linenr, $cnt) = @_;
+
+ my $offset = $linenr - 1;
+ $cnt++;
+
+ my $line;
+ while ($cnt) {
+ $line = $rawlines[$offset++];
+ next if (defined($line) && $line =~ /^-/);
+ $cnt--;
+ }
+
+ return $line;
+}
+
+sub cat_vet {
+ my ($vet) = @_;
+ my ($res, $coded);
+
+ $res = '';
+ while ($vet =~ /([^[:cntrl:]]*)([[:cntrl:]]|$)/g) {
+ $res .= $1;
+ if ($2 ne '') {
+ $coded = sprintf("^%c", unpack('C', $2) + 64);
+ $res .= $coded;
+ }
+ }
+ $res =~ s/$/\$/;
+
+ return $res;
+}
+
+my $av_preprocessor = 0;
+my $av_pending;
+my @av_paren_type;
+my $av_pend_colon;
+
+sub annotate_reset {
+ $av_preprocessor = 0;
+ $av_pending = '_';
+ @av_paren_type = ('E');
+ $av_pend_colon = 'O';
+}
+
+sub annotate_values {
+ my ($stream, $type) = @_;
+
+ my $res;
+ my $var = '_' x length($stream);
+ my $cur = $stream;
+
+ print "$stream\n" if ($dbg_values > 1);
+
+ while (length($cur)) {
+ @av_paren_type = ('E') if ($#av_paren_type < 0);
+ print " <" . join('', @av_paren_type) .
+ "> <$type> <$av_pending>" if ($dbg_values > 1);
+ if ($cur =~ /^(\s+)/o) {
+ print "WS($1)\n" if ($dbg_values > 1);
+ if ($1 =~ /\n/ && $av_preprocessor) {
+ $type = pop(@av_paren_type);
+ $av_preprocessor = 0;
+ }
+
+ } elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
+ print "CAST($1)\n" if ($dbg_values > 1);
+ push(@av_paren_type, $type);
+ $type = 'C';
+
+ } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
+ print "DECLARE($1)\n" if ($dbg_values > 1);
+ $type = 'T';
+
+ } elsif ($cur =~ /^($Modifier)\s*/) {
+ print "MODIFIER($1)\n" if ($dbg_values > 1);
+ $type = 'T';
+
+ } elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) {
+ print "DEFINE($1,$2)\n" if ($dbg_values > 1);
+ $av_preprocessor = 1;
+ push(@av_paren_type, $type);
+ if ($2 ne '') {
+ $av_pending = 'N';
+ }
+ $type = 'E';
+
+ } elsif ($cur =~ /^(\#\s*(?:undef\s*$Ident|include\b))/o) {
+ print "UNDEF($1)\n" if ($dbg_values > 1);
+ $av_preprocessor = 1;
+ push(@av_paren_type, $type);
+
+ } elsif ($cur =~ /^(\#\s*(?:ifdef|ifndef|if))/o) {
+ print "PRE_START($1)\n" if ($dbg_values > 1);
+ $av_preprocessor = 1;
+
+ push(@av_paren_type, $type);
+ push(@av_paren_type, $type);
+ $type = 'E';
+
+ } elsif ($cur =~ /^(\#\s*(?:else|elif))/o) {
+ print "PRE_RESTART($1)\n" if ($dbg_values > 1);
+ $av_preprocessor = 1;
+
+ push(@av_paren_type, $av_paren_type[$#av_paren_type]);
+
+ $type = 'E';
+
+ } elsif ($cur =~ /^(\#\s*(?:endif))/o) {
+ print "PRE_END($1)\n" if ($dbg_values > 1);
+
+ $av_preprocessor = 1;
+
+ # Assume all arms of the conditional end as this
+ # one does, and continue as if the #endif was not here.
+ pop(@av_paren_type);
+ push(@av_paren_type, $type);
+ $type = 'E';
+
+ } elsif ($cur =~ /^(\\\n)/o) {
+ print "PRECONT($1)\n" if ($dbg_values > 1);
+
+ } elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
+ print "ATTR($1)\n" if ($dbg_values > 1);
+ $av_pending = $type;
+ $type = 'N';
+
+ } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
+ print "SIZEOF($1)\n" if ($dbg_values > 1);
+ if (defined $2) {
+ $av_pending = 'V';
+ }
+ $type = 'N';
+
+ } elsif ($cur =~ /^(if|while|for)\b/o) {
+ print "COND($1)\n" if ($dbg_values > 1);
+ $av_pending = 'E';
+ $type = 'N';
+
+ } elsif ($cur =~/^(case)/o) {
+ print "CASE($1)\n" if ($dbg_values > 1);
+ $av_pend_colon = 'C';
+ $type = 'N';
+
+ } elsif ($cur =~/^(return|else|goto|typeof|__typeof__)\b/o) {
+ print "KEYWORD($1)\n" if ($dbg_values > 1);
+ $type = 'N';
+
+ } elsif ($cur =~ /^(\()/o) {
+ print "PAREN('$1')\n" if ($dbg_values > 1);
+ push(@av_paren_type, $av_pending);
+ $av_pending = '_';
+ $type = 'N';
+
+ } elsif ($cur =~ /^(\))/o) {
+ my $new_type = pop(@av_paren_type);
+ if ($new_type ne '_') {
+ $type = $new_type;
+ print "PAREN('$1') -> $type\n"
+ if ($dbg_values > 1);
+ } else {
+ print "PAREN('$1')\n" if ($dbg_values > 1);
+ }
+
+ } elsif ($cur =~ /^($Ident)\s*\(/o) {
+ print "FUNC($1)\n" if ($dbg_values > 1);
+ $type = 'V';
+ $av_pending = 'V';
+
+ } elsif ($cur =~ /^($Ident\s*):(?:\s*\d+\s*(,|=|;))?/) {
+ if (defined $2 && $type eq 'C' || $type eq 'T') {
+ $av_pend_colon = 'B';
+ } elsif ($type eq 'E') {
+ $av_pend_colon = 'L';
+ }
+ print "IDENT_COLON($1,$type>$av_pend_colon)\n" if ($dbg_values > 1);
+ $type = 'V';
+
+ } elsif ($cur =~ /^($Ident|$Constant)/o) {
+ print "IDENT($1)\n" if ($dbg_values > 1);
+ $type = 'V';
+
+ } elsif ($cur =~ /^($Assignment)/o) {
+ print "ASSIGN($1)\n" if ($dbg_values > 1);
+ $type = 'N';
+
+ } elsif ($cur =~/^(;|{|})/) {
+ print "END($1)\n" if ($dbg_values > 1);
+ $type = 'E';
+ $av_pend_colon = 'O';
+
+ } elsif ($cur =~/^(,)/) {
+ print "COMMA($1)\n" if ($dbg_values > 1);
+ $type = 'C';
+
+ } elsif ($cur =~ /^(\?)/o) {
+ print "QUESTION($1)\n" if ($dbg_values > 1);
+ $type = 'N';
+
+ } elsif ($cur =~ /^(:)/o) {
+ print "COLON($1,$av_pend_colon)\n" if ($dbg_values > 1);
+
+ substr($var, length($res), 1, $av_pend_colon);
+ if ($av_pend_colon eq 'C' || $av_pend_colon eq 'L') {
+ $type = 'E';
+ } else {
+ $type = 'N';
+ }
+ $av_pend_colon = 'O';
+
+ } elsif ($cur =~ /^(\[)/o) {
+ print "CLOSE($1)\n" if ($dbg_values > 1);
+ $type = 'N';
+
+ } elsif ($cur =~ /^(-(?![->])|\+(?!\+)|\*|\&\&|\&)/o) {
+ my $variant;
+
+ print "OPV($1)\n" if ($dbg_values > 1);
+ if ($type eq 'V') {
+ $variant = 'B';
+ } else {
+ $variant = 'U';
+ }
+
+ substr($var, length($res), 1, $variant);
+ $type = 'N';
+
+ } elsif ($cur =~ /^($Operators)/o) {
+ print "OP($1)\n" if ($dbg_values > 1);
+ if ($1 ne '++' && $1 ne '--') {
+ $type = 'N';
+ }
+
+ } elsif ($cur =~ /(^.)/o) {
+ print "C($1)\n" if ($dbg_values > 1);
+ }
+ if (defined $1) {
+ $cur = substr($cur, length($1));
+ $res .= $type x length($1);
+ }
+ }
+
+ return ($res, $var);
+}
+
+sub possible {
+ my ($possible, $line) = @_;
+ my $notPermitted = qr{(?:
+ ^(?:
+ $Modifier|
+ $Storage|
+ $Type|
+ DEFINE_\S+
+ )$|
+ ^(?:
+ goto|
+ return|
+ case|
+ else|
+ asm|__asm__|
+ do
+ )(?:\s|$)|
+ ^(?:typedef|struct|enum)\b
+ )}x;
+ warn "CHECK<$possible> ($line)\n" if ($dbg_possible > 2);
+ if ($possible !~ $notPermitted) {
+ # Check for modifiers.
+ $possible =~ s/\s*$Storage\s*//g;
+ $possible =~ s/\s*$Sparse\s*//g;
+ if ($possible =~ /^\s*$/) {
+
+ } elsif ($possible =~ /\s/) {
+ $possible =~ s/\s*$Type\s*//g;
+ for my $modifier (split(' ', $possible)) {
+ if ($modifier !~ $notPermitted) {
+ warn "MODIFIER: $modifier ($possible) ($line)\n" if ($dbg_possible);
+ push(@modifierList, $modifier);
+ }
+ }
+
+ } else {
+ warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible);
+ push(@typeList, $possible);
+ }
+ build_types();
+ } else {
+ warn "NOTPOSS: $possible ($line)\n" if ($dbg_possible > 1);
+ }
+}
+
+my $prefix = '';
+
+sub report {
+ if (defined $tst_only && $_[0] !~ /\Q$tst_only\E/) {
+ return 0;
+ }
+ my $line = $prefix . $_[0];
+
+ $line = (split('\n', $line))[0] . "\n" if ($terse);
+
+ push(our @report, $line);
+
+ return 1;
+}
+sub report_dump {
+ our @report;
+}
+sub ERROR {
+ if (report("ERROR: $_[0]\n")) {
+ our $clean = 0;
+ our $cnt_error++;
+ }
+}
+sub WARN {
+ if (report("WARNING: $_[0]\n")) {
+ our $clean = 0;
+ our $cnt_warn++;
+ }
+}
+sub CHK {
+ if ($check && report("CHECK: $_[0]\n")) {
+ our $clean = 0;
+ our $cnt_chk++;
+ }
+}
+
+sub check_absolute_file {
+ my ($absolute, $herecurr) = @_;
+ my $file = $absolute;
+
+ ##print "absolute<$absolute>\n";
+
+ # See if any suffix of this path is a path within the tree.
+ while ($file =~ s@^[^/]*/@@) {
+ if (-f "$root/$file") {
+ ##print "file<$file>\n";
+ last;
+ }
+ }
+ if (! -f _) {
+ return 0;
+ }
+
+ # It is, so see if the prefix is acceptable.
+ my $prefix = $absolute;
+ substr($prefix, -length($file)) = '';
+
+ ##print "prefix<$prefix>\n";
+ if ($prefix ne ".../") {
+ WARN("use relative pathname instead of absolute in changelog text\n" . $herecurr);
+ }
+}
+
+sub process {
+ my $filename = shift;
+
+ my $linenr=0;
+ my $prevline="";
+ my $prevrawline="";
+ my $stashline="";
+ my $stashrawline="";
+
+ my $length;
+ my $indent;
+ my $previndent=0;
+ my $stashindent=0;
+
+ our $clean = 1;
+ my $signoff = 0;
+ my $is_patch = 0;
+
+ our @report = ();
+ our $cnt_lines = 0;
+ our $cnt_error = 0;
+ our $cnt_warn = 0;
+ our $cnt_chk = 0;
+
+ # Trace the real file/line as we go.
+ my $realfile = '';
+ my $realline = 0;
+ my $realcnt = 0;
+ my $here = '';
+ my $in_comment = 0;
+ my $comment_edge = 0;
+ my $first_line = 0;
+ my $p1_prefix = '';
+
+ my $prev_values = 'E';
+
+ # suppression flags
+ my %suppress_ifbraces;
+ my %suppress_whiletrailers;
+ my %suppress_export;
+
+ # Pre-scan the patch sanitizing the lines.
+ # Pre-scan the patch looking for any __setup documentation.
+ #
+ my @setup_docs = ();
+ my $setup_docs = 0;
+
+ sanitise_line_reset();
+ my $line;
+ foreach my $rawline (@rawlines) {
+ $linenr++;
+ $line = $rawline;
+
+ if ($rawline=~/^\+\+\+\s+(\S+)/) {
+ $setup_docs = 0;
+ if ($1 =~ m@Documentation/kernel-parameters.txt$@) {
+ $setup_docs = 1;
+ }
+ #next;
+ }
+ if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
+ $realline=$1-1;
+ if (defined $2) {
+ $realcnt=$3+1;
+ } else {
+ $realcnt=1+1;
+ }
+ $in_comment = 0;
+
+ # Guestimate if this is a continuing comment. Run
+ # the context looking for a comment "edge". If this
+ # edge is a close comment then we must be in a comment
+ # at context start.
+ my $edge;
+ my $cnt = $realcnt;
+ for (my $ln = $linenr + 1; $cnt > 0; $ln++) {
+ next if (defined $rawlines[$ln - 1] &&
+ $rawlines[$ln - 1] =~ /^-/);
+ $cnt--;
+ #print "RAW<$rawlines[$ln - 1]>\n";
+ last if (!defined $rawlines[$ln - 1]);
+ if ($rawlines[$ln - 1] =~ m@(/\*|\*/)@ &&
+ $rawlines[$ln - 1] !~ m@"[^"]*(?:/\*|\*/)[^"]*"@) {
+ ($edge) = $1;
+ last;
+ }
+ }
+ if (defined $edge && $edge eq '*/') {
+ $in_comment = 1;
+ }
+
+ # Guestimate if this is a continuing comment. If this
+ # is the start of a diff block and this line starts
+ # ' *' then it is very likely a comment.
+ if (!defined $edge &&
+ $rawlines[$linenr] =~ m@^.\s*(?:\*\*+| \*)(?:\s|$)@)
+ {
+ $in_comment = 1;
+ }
+
+ ##print "COMMENT:$in_comment edge<$edge> $rawline\n";
+ sanitise_line_reset($in_comment);
+
+ } elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) {
+ # Standardise the strings and chars within the input to
+ # simplify matching -- only bother with positive lines.
+ $line = sanitise_line($rawline);
+ }
+ push(@lines, $line);
+
+ if ($realcnt > 1) {
+ $realcnt-- if ($line =~ /^(?:\+| |$)/);
+ } else {
+ $realcnt = 0;
+ }
+
+ #print "==>$rawline\n";
+ #print "-->$line\n";
+
+ if ($setup_docs && $line =~ /^\+/) {
+ push(@setup_docs, $line);
+ }
+ }
+
+ $prefix = '';
+
+ $realcnt = 0;
+ $linenr = 0;
+ foreach my $line (@lines) {
+ $linenr++;
+
+ my $rawline = $rawlines[$linenr - 1];
+
+#extract the line range in the file after the patch is applied
+ if ($line=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
+ $is_patch = 1;
+ $first_line = $linenr + 1;
+ $realline=$1-1;
+ if (defined $2) {
+ $realcnt=$3+1;
+ } else {
+ $realcnt=1+1;
+ }
+ annotate_reset();
+ $prev_values = 'E';
+
+ %suppress_ifbraces = ();
+ %suppress_whiletrailers = ();
+ %suppress_export = ();
+ next;
+
+# track the line number as we move through the hunk, note that
+# new versions of GNU diff omit the leading space on completely
+# blank context lines so we need to count that too.
+ } elsif ($line =~ /^( |\+|$)/) {
+ $realline++;
+ $realcnt-- if ($realcnt != 0);
+
+ # Measure the line length and indent.
+ ($length, $indent) = line_stats($rawline);
+
+ # Track the previous line.
+ ($prevline, $stashline) = ($stashline, $line);
+ ($previndent, $stashindent) = ($stashindent, $indent);
+ ($prevrawline, $stashrawline) = ($stashrawline, $rawline);
+
+ #warn "line<$line>\n";
+
+ } elsif ($realcnt == 1) {
+ $realcnt--;
+ }
+
+ my $hunk_line = ($realcnt != 0);
+
+#make up the handle for any error we report on this line
+ $prefix = "$filename:$realline: " if ($emacs && $file);
+ $prefix = "$filename:$linenr: " if ($emacs && !$file);
+
+ $here = "#$linenr: " if (!$file);
+ $here = "#$realline: " if ($file);
+
+ # extract the filename as it passes
+ if ($line =~ /^diff --git.*?(\S+)$/) {
+ $realfile = $1;
+ $realfile =~ s@^([^/]*)/@@;
+
+ } elsif ($line =~ /^\+\+\+\s+(\S+)/) {
+ $realfile = $1;
+ $realfile =~ s@^([^/]*)/@@;
+
+ $p1_prefix = $1;
+ if (!$file && $tree && $p1_prefix ne '' &&
+ -e "$root/$p1_prefix") {
+ WARN("patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
+ }
+
+ if ($realfile =~ m@^include/asm/@) {
+ ERROR("do not modify files in include/asm, change architecture specific files in include/asm-<architecture>\n" . "$here$rawline\n");
+ }
+ next;
+ }
+
+ $here .= "FILE: $realfile:$realline:" if ($realcnt != 0);
+
+ my $hereline = "$here\n$rawline\n";
+ my $herecurr = "$here\n$rawline\n";
+ my $hereprev = "$here\n$prevrawline\n$rawline\n";
+
+ $cnt_lines++ if ($realcnt != 0);
+
+# Check for incorrect file permissions
+ if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
+ my $permhere = $here . "FILE: $realfile\n";
+ if ($realfile =~ /(Makefile|Kconfig|\.c|\.cpp|\.h|\.S|\.tmpl)$/) {
+ ERROR("do not set execute permissions for source files\n" . $permhere);
+ }
+ }
+
+#check the patch for a signoff:
+ if ($line =~ /^\s*signed-off-by:/i) {
+ # This is a signoff, if ugly, so do not double report.
+ $signoff++;
+ if (!($line =~ /^\s*Signed-off-by:/)) {
+ WARN("Signed-off-by: is the preferred form\n" .
+ $herecurr);
+ }
+ if ($line =~ /^\s*signed-off-by:\S/i) {
+ WARN("space required after Signed-off-by:\n" .
+ $herecurr);
+ }
+ }
+
+# Check for wrappage within a valid hunk of the file
+ if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
+ ERROR("patch seems to be corrupt (line wrapped?)\n" .
+ $herecurr) if (!$emitted_corrupt++);
+ }
+
+# Check for absolute kernel paths.
+ if ($tree) {
+ while ($line =~ m{(?:^|\s)(/\S*)}g) {
+ my $file = $1;
+
+ if ($file =~ m{^(.*?)(?::\d+)+:?$} &&
+ check_absolute_file($1, $herecurr)) {
+ #
+ } else {
+ check_absolute_file($file, $herecurr);
+ }
+ }
+ }
+
+# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
+ if (($realfile =~ /^$/ || $line =~ /^\+/) &&
+ $rawline !~ m/^$UTF8*$/) {
+ my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);
+
+ my $blank = copy_spacing($rawline);
+ my $ptr = substr($blank, 0, length($utf8_prefix)) . "^";
+ my $hereptr = "$hereline$ptr\n";
+
+ ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr);
+ }
+
+# ignore non-hunk lines and lines being removed
+ next if (!$hunk_line || $line =~ /^-/);
+
+#trailing whitespace
+ if ($line =~ /^\+.*\015/) {
+ my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+ ERROR("DOS line endings\n" . $herevet);
+
+ } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
+ my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+ ERROR("trailing whitespace\n" . $herevet);
+ $rpt_cleaners = 1;
+ }
+
+# check for Kconfig help text having a real description
+# Only applies when adding the entry originally, after that we do not have
+# sufficient context to determine whether it is indeed long enough.
+ if ($realfile =~ /Kconfig/ &&
+ $line =~ /\+\s*(?:---)?help(?:---)?$/) {
+ my $length = 0;
+ my $cnt = $realcnt;
+ my $ln = $linenr + 1;
+ my $f;
+ my $is_end = 0;
+ while ($cnt > 0 && defined $lines[$ln - 1]) {
+ $f = $lines[$ln - 1];
+ $cnt-- if ($lines[$ln - 1] !~ /^-/);
+ $is_end = $lines[$ln - 1] =~ /^\+/;
+ $ln++;
+
+ next if ($f =~ /^-/);
+ $f =~ s/^.//;
+ $f =~ s/#.*//;
+ $f =~ s/^\s+//;
+ next if ($f =~ /^$/);
+ if ($f =~ /^\s*config\s/) {
+ $is_end = 1;
+ last;
+ }
+ $length++;
+ }
+ WARN("please write a paragraph that describes the config symbol fully\n" . $herecurr) if ($is_end && $length < 4);
+ #print "is_end<$is_end> length<$length>\n";
+ }
+
+# check we are in a valid source file if not then ignore this hunk
+ next if ($realfile !~ /\.(h|c|cpp|s|S|pl|sh)$/);
+
+#80 column limit
+ if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
+ $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
+ !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:,|\)\s*;)\s*$/ ||
+ $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
+ $length > 80)
+ {
+ WARN("line over 80 characters\n" . $herecurr);
+ }
+
+# check for spaces before a quoted newline
+ if ($rawline =~ /^.*\".*\s\\n/) {
+ WARN("unnecessary whitespace before a quoted newline\n" . $herecurr);
+ }
+
+# check for adding lines without a newline.
+ if ($line =~ /^\+/ && defined $lines[$linenr] && $lines[$linenr] =~ /^\\ No newline at end of file/) {
+ WARN("adding a line without newline at end of file\n" . $herecurr);
+ }
+
+# Blackfin: use hi/lo macros
+ if ($realfile =~ m@arch/blackfin/.*\.S$@) {
+ if ($line =~ /\.[lL][[:space:]]*=.*&[[:space:]]*0x[fF][fF][fF][fF]/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the LO() macro, not (... & 0xFFFF)\n" . $herevet);
+ }
+ if ($line =~ /\.[hH][[:space:]]*=.*>>[[:space:]]*16/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the HI() macro, not (... >> 16)\n" . $herevet);
+ }
+ }
+
+# check we are in a valid source file C or perl if not then ignore this hunk
+ next if ($realfile !~ /\.(h|c|cpp|pl)$/);
+
+# in QEMU, no tabs are allowed
+ if ($rawline =~ /^\+.*\t/) {
+ my $herevet = "$here\n" . cat_vet($rawline) . "\n";
+ ERROR("code indent should never use tabs\n" . $herevet);
+ $rpt_cleaners = 1;
+ }
+
+# check we are in a valid C source file if not then ignore this hunk
+ next if ($realfile !~ /\.(h|c|cpp)$/);
+
+# check for RCS/CVS revision markers
+ if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
+ WARN("CVS style keyword markers, these will _not_ be updated\n". $herecurr);
+ }
+
+# Blackfin: don't use __builtin_bfin_[cs]sync
+ if ($line =~ /__builtin_bfin_csync/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the CSYNC() macro in asm/blackfin.h\n" . $herevet);
+ }
+ if ($line =~ /__builtin_bfin_ssync/) {
+ my $herevet = "$here\n" . cat_vet($line) . "\n";
+ ERROR("use the SSYNC() macro in asm/blackfin.h\n" . $herevet);
+ }
+
+# Check for potential 'bare' types
+ my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
+ $realline_next);
+ if ($realcnt && $line =~ /.\s*\S/) {
+ ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
+ ctx_statement_block($linenr, $realcnt, 0);
+ $stat =~ s/\n./\n /g;
+ $cond =~ s/\n./\n /g;
+
+ # Find the real next line.
+ $realline_next = $line_nr_next;
+ if (defined $realline_next &&
+ (!defined $lines[$realline_next - 1] ||
+ substr($lines[$realline_next - 1], $off_next) =~ /^\s*$/)) {
+ $realline_next++;
+ }
+
+ my $s = $stat;
+ $s =~ s/{.*$//s;
+
+ # Ignore goto labels.
+ if ($s =~ /$Ident:\*$/s) {
+
+ # Ignore functions being called
+ } elsif ($s =~ /^.\s*$Ident\s*\(/s) {
+
+ } elsif ($s =~ /^.\s*else\b/s) {
+
+ # declarations always start with types
+ } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+?)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))(?:\s*$Modifier)?\s*(?:;|=|,|\()/s) {
+ my $type = $1;
+ $type =~ s/\s+/ /g;
+ possible($type, "A:" . $s);
+
+ # definitions in global scope can only start with types
+ } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b\s*(?!:)/s) {
+ possible($1, "B:" . $s);
+ }
+
+ # any (foo ... *) is a pointer cast, and foo is a type
+ while ($s =~ /\(($Ident)(?:\s+$Sparse)*[\s\*]+\s*\)/sg) {
+ possible($1, "C:" . $s);
+ }
+
+ # Check for any sort of function declaration.
+ # int foo(something bar, other baz);
+ # void (*store_gdt)(x86_descr_ptr *);
+ if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) {
+ my ($name_len) = length($1);
+
+ my $ctx = $s;
+ substr($ctx, 0, $name_len + 1, '');
+ $ctx =~ s/\)[^\)]*$//;
+
+ for my $arg (split(/\s*,\s*/, $ctx)) {
+ if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/s || $arg =~ /^($Ident)$/s) {
+
+ possible($1, "D:" . $s);
+ }
+ }
+ }
+
+ }
+
+#
+# Checks which may be anchored in the context.
+#
+
+# Check for switch () and associated case and default
+# statements should be at the same indent.
+ if ($line=~/\bswitch\s*\(.*\)/) {
+ my $err = '';
+ my $sep = '';
+ my @ctx = ctx_block_outer($linenr, $realcnt);
+ shift(@ctx);
+ for my $ctx (@ctx) {
+ my ($clen, $cindent) = line_stats($ctx);
+ if ($ctx =~ /^\+\s*(case\s+|default:)/ &&
+ $indent != $cindent) {
+ $err .= "$sep$ctx\n";
+ $sep = '';
+ } else {
+ $sep = "[...]\n";
+ }
+ }
+ if ($err ne '') {
+ ERROR("switch and case should be at the same indent\n$hereline$err");
+ }
+ }
+
+# if/while/etc brace do not go on next line, unless defining a do while loop,
+# or if that brace on the next line is for something else
+ if ($line =~ /(.*)\b((?:if|while|for|switch)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
+ my $pre_ctx = "$1$2";
+
+ my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
+ my $ctx_cnt = $realcnt - $#ctx - 1;
+ my $ctx = join("\n", @ctx);
+
+ my $ctx_ln = $linenr;
+ my $ctx_skip = $realcnt;
+
+ while ($ctx_skip > $ctx_cnt || ($ctx_skip == $ctx_cnt &&
+ defined $lines[$ctx_ln - 1] &&
+ $lines[$ctx_ln - 1] =~ /^-/)) {
+ ##print "SKIP<$ctx_skip> CNT<$ctx_cnt>\n";
+ $ctx_skip-- if (!defined $lines[$ctx_ln - 1] || $lines[$ctx_ln - 1] !~ /^-/);
+ $ctx_ln++;
+ }
+
+ #print "realcnt<$realcnt> ctx_cnt<$ctx_cnt>\n";
+ #print "pre<$pre_ctx>\nline<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>\n";
+
+ # The length of the "previous line" is checked against 80 because it
+ # includes the + at the beginning of the line (if the actual line has
+ # 79 or 80 characters, it is no longer possible to add a space and an
+ # opening brace there)
+ if ($#ctx == 0 && $ctx !~ /{\s*/ &&
+ defined($lines[$ctx_ln - 1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/ &&
+ defined($lines[$ctx_ln - 2]) && length($lines[$ctx_ln - 2]) < 80) {
+ ERROR("that open brace { should be on the previous line\n" .
+ "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
+ }
+ if ($level == 0 && $pre_ctx !~ /}\s*while\s*\($/ &&
+ $ctx =~ /\)\s*\;\s*$/ &&
+ defined $lines[$ctx_ln - 1])
+ {
+ my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
+ if ($nindent > $indent) {
+ WARN("trailing semicolon indicates no statements, indent implies otherwise\n" .
+ "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
+ }
+ }
+ }
+
+# Check relative indent for conditionals and blocks.
+ if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
+ my ($s, $c) = ($stat, $cond);
+
+ substr($s, 0, length($c), '');
+
+ # Make sure we remove the line prefixes as we have
+ # none on the first line, and are going to readd them
+ # where necessary.
+ $s =~ s/\n./\n/gs;
+
+ # Find out how long the conditional actually is.
+ my @newlines = ($c =~ /\n/gs);
+ my $cond_lines = 1 + $#newlines;
+
+ # We want to check the first line inside the block
+ # starting at the end of the conditional, so remove:
+ # 1) any blank line termination
+ # 2) any opening brace { on end of the line
+ # 3) any do (...) {
+ my $continuation = 0;
+ my $check = 0;
+ $s =~ s/^.*\bdo\b//;
+ $s =~ s/^\s*{//;
+ if ($s =~ s/^\s*\\//) {
+ $continuation = 1;
+ }
+ if ($s =~ s/^\s*?\n//) {
+ $check = 1;
+ $cond_lines++;
+ }
+
+ # Also ignore a loop construct at the end of a
+ # preprocessor statement.
+ if (($prevline =~ /^.\s*#\s*define\s/ ||
+ $prevline =~ /\\\s*$/) && $continuation == 0) {
+ $check = 0;
+ }
+
+ my $cond_ptr = -1;
+ $continuation = 0;
+ while ($cond_ptr != $cond_lines) {
+ $cond_ptr = $cond_lines;
+
+ # If we see an #else/#elif then the code
+ # is not linear.
+ if ($s =~ /^\s*\#\s*(?:else|elif)/) {
+ $check = 0;
+ }
+
+ # Ignore:
+ # 1) blank lines, they should be at 0,
+ # 2) preprocessor lines, and
+ # 3) labels.
+ if ($continuation ||
+ $s =~ /^\s*?\n/ ||
+ $s =~ /^\s*#\s*?/ ||
+ $s =~ /^\s*$Ident\s*:/) {
+ $continuation = ($s =~ /^.*?\\\n/) ? 1 : 0;
+ if ($s =~ s/^.*?\n//) {
+ $cond_lines++;
+ }
+ }
+ }
+
+ my (undef, $sindent) = line_stats("+" . $s);
+ my $stat_real = raw_line($linenr, $cond_lines);
+
+ # Check if either of these lines are modified, else
+ # this is not this patch's fault.
+ if (!defined($stat_real) ||
+ $stat !~ /^\+/ && $stat_real !~ /^\+/) {
+ $check = 0;
+ }
+ if (defined($stat_real) && $cond_lines > 1) {
+ $stat_real = "[...]\n$stat_real";
+ }
+
+ #print "line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s> cond_lines<$cond_lines> stat_real<$stat_real> stat<$stat>\n";
+
+ if ($check && (($sindent % 4) != 0 ||
+ ($sindent <= $indent && $s ne ''))) {
+ WARN("suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
+ }
+ }
+
+ # Track the 'values' across context and added lines.
+ my $opline = $line; $opline =~ s/^./ /;
+ my ($curr_values, $curr_vars) =
+ annotate_values($opline . "\n", $prev_values);
+ $curr_values = $prev_values . $curr_values;
+ if ($dbg_values) {
+ my $outline = $opline; $outline =~ s/\t/ /g;
+ print "$linenr > .$outline\n";
+ print "$linenr > $curr_values\n";
+ print "$linenr > $curr_vars\n";
+ }
+ $prev_values = substr($curr_values, -1);
+
+#ignore lines not being added
+ if ($line=~/^[^\+]/) {next;}
+
+# TEST: allow direct testing of the type matcher.
+ if ($dbg_type) {
+ if ($line =~ /^.\s*$Declare\s*$/) {
+ ERROR("TEST: is type\n" . $herecurr);
+ } elsif ($dbg_type > 1 && $line =~ /^.+($Declare)/) {
+ ERROR("TEST: is not type ($1 is)\n". $herecurr);
+ }
+ next;
+ }
+# TEST: allow direct testing of the attribute matcher.
+ if ($dbg_attr) {
+ if ($line =~ /^.\s*$Modifier\s*$/) {
+ ERROR("TEST: is attr\n" . $herecurr);
+ } elsif ($dbg_attr > 1 && $line =~ /^.+($Modifier)/) {
+ ERROR("TEST: is not attr ($1 is)\n". $herecurr);
+ }
+ next;
+ }
+
+# check for initialisation to aggregates open brace on the next line
+ if ($line =~ /^.\s*{/ &&
+ $prevline =~ /(?:^|[^=])=\s*$/) {
+ ERROR("that open brace { should be on the previous line\n" . $hereprev);
+ }
+
+#
+# Checks which are anchored on the added line.
+#
+
+# check for malformed paths in #include statements (uses RAW line)
+ if ($rawline =~ m{^.\s*\#\s*include\s+[<"](.*)[">]}) {
+ my $path = $1;
+ if ($path =~ m{//}) {
+ ERROR("malformed #include filename\n" .
+ $herecurr);
+ }
+ }
+
+# no C99 // comments
+ if ($line =~ m{//}) {
+ ERROR("do not use C99 // comments\n" . $herecurr);
+ }
+ # Remove C99 comments.
+ $line =~ s@//.*@@;
+ $opline =~ s@//.*@@;
+
+# EXPORT_SYMBOL should immediately follow the thing it is exporting, consider
+# the whole statement.
+#print "APW <$lines[$realline_next - 1]>\n";
+ if (defined $realline_next &&
+ exists $lines[$realline_next - 1] &&
+ !defined $suppress_export{$realline_next} &&
+ ($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
+ $lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
+ # Handle definitions which produce identifiers with
+ # a prefix:
+ # XXX(foo);
+ # EXPORT_SYMBOL(something_foo);
+ my $name = $1;
+ if ($stat =~ /^.([A-Z_]+)\s*\(\s*($Ident)/ &&
+ $name =~ /^${Ident}_$2/) {
+#print "FOO C name<$name>\n";
+ $suppress_export{$realline_next} = 1;
+
+ } elsif ($stat !~ /(?:
+ \n.}\s*$|
+ ^.DEFINE_$Ident\(\Q$name\E\)|
+ ^.DECLARE_$Ident\(\Q$name\E\)|
+ ^.LIST_HEAD\(\Q$name\E\)|
+ ^.(?:$Storage\s+)?$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(|
+ \b\Q$name\E(?:\s+$Attribute)*\s*(?:;|=|\[|\()
+ )/x) {
+#print "FOO A<$lines[$realline_next - 1]> stat<$stat> name<$name>\n";
+ $suppress_export{$realline_next} = 2;
+ } else {
+ $suppress_export{$realline_next} = 1;
+ }
+ }
+ if (!defined $suppress_export{$linenr} &&
+ $prevline =~ /^.\s*$/ &&
+ ($line =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
+ $line =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
+#print "FOO B <$lines[$linenr - 1]>\n";
+ $suppress_export{$linenr} = 2;
+ }
+ if (defined $suppress_export{$linenr} &&
+ $suppress_export{$linenr} == 2) {
+ WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
+ }
+
+# check for global initialisers.
+ if ($line =~ /^.$Type\s*$Ident\s*(?:\s+$Modifier)*\s*=\s*(0|NULL|false)\s*;/) {
+ ERROR("do not initialise globals to 0 or NULL\n" .
+ $herecurr);
+ }
+# check for static initialisers.
+ if ($line =~ /\bstatic\s.*=\s*(0|NULL|false)\s*;/) {
+ ERROR("do not initialise statics to 0 or NULL\n" .
+ $herecurr);
+ }
+
+# * goes on variable not on type
+ # (char*[ const])
+ if ($line =~ m{\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\)}) {
+ my ($from, $to) = ($1, $1);
+
+ # Should start with a space.
+ $to =~ s/^(\S)/ $1/;
+ # Should not end with a space.
+ $to =~ s/\s+$//;
+ # '*'s should not have spaces between.
+ while ($to =~ s/\*\s+\*/\*\*/) {
+ }
+
+ #print "from<$from> to<$to>\n";
+ if ($from ne $to) {
+ ERROR("\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr);
+ }
+ } elsif ($line =~ m{\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident)}) {
+ my ($from, $to, $ident) = ($1, $1, $2);
+
+ # Should start with a space.
+ $to =~ s/^(\S)/ $1/;
+ # Should not end with a space.
+ $to =~ s/\s+$//;
+ # '*'s should not have spaces between.
+ while ($to =~ s/\*\s+\*/\*\*/) {
+ }
+ # Modifiers should have spaces.
+ $to =~ s/(\b$Modifier$)/$1 /;
+
+ #print "from<$from> to<$to> ident<$ident>\n";
+ if ($from ne $to && $ident !~ /^$Modifier$/) {
+ ERROR("\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr);
+ }
+ }
+
+# # no BUG() or BUG_ON()
+# if ($line =~ /\b(BUG|BUG_ON)\b/) {
+# print "Try to use WARN_ON & Recovery code rather than BUG() or BUG_ON()\n";
+# print "$herecurr";
+# $clean = 0;
+# }
+
+ if ($line =~ /\bLINUX_VERSION_CODE\b/) {
+ WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
+ }
+
+# printk should use KERN_* levels. Note that follow on printk's on the
+# same line do not need a level, so we use the current block context
+# to try and find and validate the current printk. In summary the current
+# printk includes all preceding printk's which have no newline on the end.
+# we assume the first bad printk is the one to report.
+ if ($line =~ /\bprintk\((?!KERN_)\s*"/) {
+ my $ok = 0;
+ for (my $ln = $linenr - 1; $ln >= $first_line; $ln--) {
+ #print "CHECK<$lines[$ln - 1]\n";
+ # we have a preceding printk if it ends
+ # with "\n" ignore it, else it is to blame
+ if ($lines[$ln - 1] =~ m{\bprintk\(}) {
+ if ($rawlines[$ln - 1] !~ m{\\n"}) {
+ $ok = 1;
+ }
+ last;
+ }
+ }
+ if ($ok == 0) {
+ WARN("printk() should include KERN_ facility level\n" . $herecurr);
+ }
+ }
+
+# function brace can't be on same line, except for #defines of do while,
+# or if closed on same line
+ if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
+ !($line=~/\#\s*define.*do\s{/) and !($line=~/}/)) {
+ ERROR("open brace '{' following function declarations go on the next line\n" . $herecurr);
+ }
+
+# open braces for enum, union and struct go on the same line.
+ if ($line =~ /^.\s*{/ &&
+ $prevline =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?\s*$/) {
+ ERROR("open brace '{' following $1 go on the same line\n" . $hereprev);
+ }
+
+# missing space after union, struct or enum definition
+ if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?(?:\s+$Ident)?[=\{]/) {
+ WARN("missing space after $1 definition\n" . $herecurr);
+ }
+
+# check for spacing round square brackets; allowed:
+# 1. with a type on the left -- int [] a;
+# 2. at the beginning of a line for slice initialisers -- [0...10] = 5,
+# 3. inside a curly brace -- = { [0...10] = 5 }
+ while ($line =~ /(.*?\s)\[/g) {
+ my ($where, $prefix) = ($-[1], $1);
+ if ($prefix !~ /$Type\s+$/ &&
+ ($where != 0 || $prefix !~ /^.\s+$/) &&
+ $prefix !~ /{\s+$/) {
+ ERROR("space prohibited before open square bracket '['\n" . $herecurr);
+ }
+ }
+
+# check for spaces between functions and their parentheses.
+ while ($line =~ /($Ident)\s+\(/g) {
+ my $name = $1;
+ my $ctx_before = substr($line, 0, $-[1]);
+ my $ctx = "$ctx_before$name";
+
+ # Ignore those directives where spaces _are_ permitted.
+ if ($name =~ /^(?:
+ if|for|while|switch|return|case|
+ volatile|__volatile__|
+ __attribute__|format|__extension__|
+ asm|__asm__)$/x)
+ {
+
+ # Ignore 'catch (...)' in C++
+ } elsif ($name =~ /^catch$/ && $realfile =~ /(\.cpp|\.h)$/) {
+
+ # cpp #define statements have non-optional spaces, ie
+ # if there is a space between the name and the open
+ # parenthesis it is simply not a parameter group.
+ } elsif ($ctx_before =~ /^.\s*\#\s*define\s*$/) {
+
+ # cpp #elif statement condition may start with a (
+ } elsif ($ctx =~ /^.\s*\#\s*elif\s*$/) {
+
+ # If this whole things ends with a type its most
+ # likely a typedef for a function.
+ } elsif ($ctx =~ /$Type$/) {
+
+ } else {
+ WARN("space prohibited between function name and open parenthesis '('\n" . $herecurr);
+ }
+ }
+# Check operator spacing.
+ if (!($line=~/\#\s*include/)) {
+ my $ops = qr{
+ <<=|>>=|<=|>=|==|!=|
+ \+=|-=|\*=|\/=|%=|\^=|\|=|&=|
+ =>|->|<<|>>|<|>|=|!|~|
+ &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%|
+ \?|::|:
+ }x;
+ my @elements = split(/($ops|;)/, $opline);
+ my $off = 0;
+
+ my $blank = copy_spacing($opline);
+
+ for (my $n = 0; $n < $#elements; $n += 2) {
+ $off += length($elements[$n]);
+
+ # Pick up the preceding and succeeding characters.
+ my $ca = substr($opline, 0, $off);
+ my $cc = '';
+ if (length($opline) >= ($off + length($elements[$n + 1]))) {
+ $cc = substr($opline, $off + length($elements[$n + 1]));
+ }
+ my $cb = "$ca$;$cc";
+
+ my $a = '';
+ $a = 'V' if ($elements[$n] ne '');
+ $a = 'W' if ($elements[$n] =~ /\s$/);
+ $a = 'C' if ($elements[$n] =~ /$;$/);
+ $a = 'B' if ($elements[$n] =~ /(\[|\()$/);
+ $a = 'O' if ($elements[$n] eq '');
+ $a = 'E' if ($ca =~ /^\s*$/);
+
+ my $op = $elements[$n + 1];
+
+ my $c = '';
+ if (defined $elements[$n + 2]) {
+ $c = 'V' if ($elements[$n + 2] ne '');
+ $c = 'W' if ($elements[$n + 2] =~ /^\s/);
+ $c = 'C' if ($elements[$n + 2] =~ /^$;/);
+ $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/);
+ $c = 'O' if ($elements[$n + 2] eq '');
+ $c = 'E' if ($elements[$n + 2] =~ /^\s*\\$/);
+ } else {
+ $c = 'E';
+ }
+
+ my $ctx = "${a}x${c}";
+
+ my $at = "(ctx:$ctx)";
+
+ my $ptr = substr($blank, 0, $off) . "^";
+ my $hereptr = "$hereline$ptr\n";
+
+ # Pull out the value of this operator.
+ my $op_type = substr($curr_values, $off + 1, 1);
+
+ # Get the full operator variant.
+ my $opv = $op . substr($curr_vars, $off, 1);
+
+ # Ignore operators passed as parameters.
+ if ($op_type ne 'V' &&
+ $ca =~ /\s$/ && $cc =~ /^\s*,/) {
+
+# # Ignore comments
+# } elsif ($op =~ /^$;+$/) {
+
+ # ; should have either the end of line or a space or \ after it
+ } elsif ($op eq ';') {
+ if ($ctx !~ /.x[WEBC]/ &&
+ $cc !~ /^\\/ && $cc !~ /^;/) {
+ ERROR("space required after that '$op' $at\n" . $hereptr);
+ }
+
+ # // is a comment
+ } elsif ($op eq '//') {
+
+ # Ignore : used in class declaration in C++
+ } elsif ($opv eq ':B' && $ctx =~ /Wx[WE]/ &&
+ $line =~ /class/ && $realfile =~ /(\.cpp|\.h)$/) {
+
+ # No spaces for:
+ # ->
+ # : when part of a bitfield
+ } elsif ($op eq '->' || $opv eq ':B') {
+ if ($ctx =~ /Wx.|.xW/) {
+ ERROR("spaces prohibited around that '$op' $at\n" . $hereptr);
+ }
+
+ # , must have a space on the right.
+ # not required when having a single },{ on one line
+ } elsif ($op eq ',') {
+ if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/ &&
+ ($elements[$n] . $elements[$n + 2]) !~ " *}{") {
+ ERROR("space required after that '$op' $at\n" . $hereptr);
+ }
+
+ # '*' as part of a type definition -- reported already.
+ } elsif ($opv eq '*_') {
+ #warn "'*' is part of type\n";
+
+ # unary operators should have a space before and
+ # none after. May be left adjacent to another
+ # unary operator, or a cast
+ } elsif ($op eq '!' || $op eq '~' ||
+ $opv eq '*U' || $opv eq '-U' ||
+ $opv eq '&U' || $opv eq '&&U') {
+ if ($op eq '~' && $ca =~ /::$/ && $realfile =~ /(\.cpp|\.h)$/) {
+ # '~' used as a name of Destructor
+
+ } elsif ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
+ ERROR("space required before that '$op' $at\n" . $hereptr);
+ }
+ if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
+ # A unary '*' may be const
+
+ } elsif ($ctx =~ /.xW/) {
+ ERROR("space prohibited after that '$op' $at\n" . $hereptr);
+ }
+
+ # unary ++ and unary -- are allowed no space on one side.
+ } elsif ($op eq '++' or $op eq '--') {
+ if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
+ ERROR("space required one side of that '$op' $at\n" . $hereptr);
+ }
+ if ($ctx =~ /Wx[BE]/ ||
+ ($ctx =~ /Wx./ && $cc =~ /^;/)) {
+ ERROR("space prohibited before that '$op' $at\n" . $hereptr);
+ }
+ if ($ctx =~ /ExW/) {
+ ERROR("space prohibited after that '$op' $at\n" . $hereptr);
+ }
+
+
+ # << and >> may either have or not have spaces both sides
+ } elsif ($op eq '<<' or $op eq '>>' or
+ $op eq '&' or $op eq '^' or $op eq '|' or
+ $op eq '+' or $op eq '-' or
+ $op eq '*' or $op eq '/' or
+ $op eq '%')
+ {
+ if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
+ ERROR("need consistent spacing around '$op' $at\n" .
+ $hereptr);
+ }
+
+ # A colon needs no spaces before when it is
+ # terminating a case value or a label.
+ } elsif ($opv eq ':C' || $opv eq ':L') {
+ if ($ctx =~ /Wx./) {
+ ERROR("space prohibited before that '$op' $at\n" . $hereptr);
+ }
+
+ # All the others need spaces both sides.
+ } elsif ($ctx !~ /[EWC]x[CWE]/) {
+ my $ok = 0;
+
+ if ($realfile =~ /\.cpp|\.h$/) {
+ # Ignore template arguments <...> in C++
+ if (($op eq '<' || $op eq '>') && $line =~ /<.*>/) {
+ $ok = 1;
+ }
+
+ # Ignore :: in C++
+ if ($op eq '::') {
+ $ok = 1;
+ }
+ }
+
+ # Ignore email addresses <foo@bar>
+ if (($op eq '<' &&
+ $cc =~ /^\S+\@\S+>/) ||
+ ($op eq '>' &&
+ $ca =~ /<\S+\@\S+$/))
+ {
+ $ok = 1;
+ }
+
+ # Ignore ?:
+ if (($opv eq ':O' && $ca =~ /\?$/) ||
+ ($op eq '?' && $cc =~ /^:/)) {
+ $ok = 1;
+ }
+
+ if ($ok == 0) {
+ ERROR("spaces required around that '$op' $at\n" . $hereptr);
+ }
+ }
+ $off += length($elements[$n + 1]);
+ }
+ }
+
+# check for multiple assignments
+ if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
+ CHK("multiple assignments should be avoided\n" . $herecurr);
+ }
+
+## # check for multiple declarations, allowing for a function declaration
+## # continuation.
+## if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
+## $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
+##
+## # Remove any bracketed sections to ensure we do not
+## # falsly report the parameters of functions.
+## my $ln = $line;
+## while ($ln =~ s/\([^\(\)]*\)//g) {
+## }
+## if ($ln =~ /,/) {
+## WARN("declaring multiple variables together should be avoided\n" . $herecurr);
+## }
+## }
+
+#need space before brace following if, while, etc
+ if (($line =~ /\(.*\){/ && $line !~ /\($Type\){/) ||
+ $line =~ /do{/) {
+ ERROR("space required before the open brace '{'\n" . $herecurr);
+ }
+
+# closing brace should have a space following it when it has anything
+# on the line
+ if ($line =~ /}(?!(?:,|;|\)))\S/) {
+ ERROR("space required after that close brace '}'\n" . $herecurr);
+ }
+
+# check spacing on square brackets
+ if ($line =~ /\[\s/ && $line !~ /\[\s*$/) {
+ ERROR("space prohibited after that open square bracket '['\n" . $herecurr);
+ }
+ if ($line =~ /\s\]/) {
+ ERROR("space prohibited before that close square bracket ']'\n" . $herecurr);
+ }
+
+# check spacing on parentheses
+ if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ &&
+ $line !~ /for\s*\(\s+;/) {
+ ERROR("space prohibited after that open parenthesis '('\n" . $herecurr);
+ }
+ if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ &&
+ $line !~ /for\s*\(.*;\s+\)/ &&
+ $line !~ /:\s+\)/) {
+ ERROR("space prohibited before that close parenthesis ')'\n" . $herecurr);
+ }
+
+# Return is not a function.
+ if (defined($stat) && $stat =~ /^.\s*return(\s*)(\(.*);/s) {
+ my $spacing = $1;
+ my $value = $2;
+
+ # Flatten any parentheses
+ $value =~ s/\(/ \(/g;
+ $value =~ s/\)/\) /g;
+ while ($value =~ s/\[[^\{\}]*\]/1/ ||
+ $value !~ /(?:$Ident|-?$Constant)\s*
+ $Compare\s*
+ (?:$Ident|-?$Constant)/x &&
+ $value =~ s/\([^\(\)]*\)/1/) {
+ }
+#print "value<$value>\n";
+ if ($value =~ /^\s*(?:$Ident|-?$Constant)\s*$/) {
+ ERROR("return is not a function, parentheses are not required\n" . $herecurr);
+
+ } elsif ($spacing !~ /\s+/) {
+ ERROR("space required before the open parenthesis '('\n" . $herecurr);
+ }
+ }
+# Return of what appears to be an errno should normally be -'ve
+ if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
+ my $name = $1;
+ if ($name ne 'EOF' && $name ne 'ERROR') {
+ CHK("return of an errno should typically be -ve (return -$1)\n" . $herecurr);
+ }
+ }
+
+# Need a space before open parenthesis after if, while etc
+ if ($line=~/\b(if|while|for|switch)\(/) {
+ ERROR("space required before the open parenthesis '('\n" . $herecurr);
+ }
+
+# Check for illegal assignment in if conditional -- and check for trailing
+# statements after the conditional.
+ if ($line =~ /do\s*(?!{)/) {
+ my ($stat_next) = ctx_statement_block($line_nr_next,
+ $remain_next, $off_next);
+ $stat_next =~ s/\n./\n /g;
+ ##print "stat<$stat> stat_next<$stat_next>\n";
+
+ if ($stat_next =~ /^\s*while\b/) {
+ # If the statement carries leading newlines,
+ # then count those as offsets.
+ my ($whitespace) =
+ ($stat_next =~ /^((?:\s*\n[+-])*\s*)/s);
+ my $offset =
+ statement_rawlines($whitespace) - 1;
+
+ $suppress_whiletrailers{$line_nr_next +
+ $offset} = 1;
+ }
+ }
+ if (!defined $suppress_whiletrailers{$linenr} &&
+ $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
+ my ($s, $c) = ($stat, $cond);
+
+ if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/s) {
+ ERROR("do not use assignment in if condition\n" . $herecurr);
+ }
+
+ # Find out what is on the end of the line after the
+ # conditional.
+ substr($s, 0, length($c), '');
+ $s =~ s/\n.*//g;
+ $s =~ s/$;//g; # Remove any comments
+ if (length($c) && $s !~ /^\s*{?\s*\\*\s*$/ &&
+ $c !~ /}\s*while\s*/)
+ {
+ # Find out how long the conditional actually is.
+ my @newlines = ($c =~ /\n/gs);
+ my $cond_lines = 1 + $#newlines;
+ my $stat_real = '';
+
+ $stat_real = raw_line($linenr, $cond_lines)
+ . "\n" if ($cond_lines);
+ if (defined($stat_real) && $cond_lines > 1) {
+ $stat_real = "[...]\n$stat_real";
+ }
+
+ ERROR("trailing statements should be on next line\n" . $herecurr . $stat_real);
+ }
+ }
+
+# Check for bitwise tests written as boolean
+ if ($line =~ /
+ (?:
+ (?:\[|\(|\&\&|\|\|)
+ \s*0[xX][0-9]+\s*
+ (?:\&\&|\|\|)
+ |
+ (?:\&\&|\|\|)
+ \s*0[xX][0-9]+\s*
+ (?:\&\&|\|\||\)|\])
+ )/x)
+ {
+ WARN("boolean test with hexadecimal, perhaps just 1 \& or \|?\n" . $herecurr);
+ }
+
+# if and else should not have general statements after it
+ if ($line =~ /^.\s*(?:}\s*)?else\b(.*)/) {
+ my $s = $1;
+ $s =~ s/$;//g; # Remove any comments
+ if ($s !~ /^\s*(?:\sif|(?:{|)\s*\\?\s*$)/) {
+ ERROR("trailing statements should be on next line\n" . $herecurr);
+ }
+ }
+# if should not continue a brace
+ if ($line =~ /}\s*if\b/) {
+ ERROR("trailing statements should be on next line\n" .
+ $herecurr);
+ }
+# case and default should not have general statements after them
+ if ($line =~ /^.\s*(?:case\s*.*|default\s*):/g &&
+ $line !~ /\G(?:
+ (?:\s*$;*)(?:\s*{)?(?:\s*$;*)(?:\s*\\)?\s*$|
+ \s*return\s+
+ )/xg)
+ {
+ ERROR("trailing statements should be on next line\n" . $herecurr);
+ }
+
+ # Check for }<nl>else {, these must be at the same
+ # indent level to be relevant to each other.
+ if ($prevline=~/}\s*$/ and $line=~/^.\s*else\s*/ and
+ $previndent == $indent) {
+ ERROR("else should follow close brace '}'\n" . $hereprev);
+ }
+
+ if ($prevline=~/}\s*$/ and $line=~/^.\s*while\s*/ and
+ $previndent == $indent) {
+ my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
+
+ # Find out what is on the end of the line after the
+ # conditional.
+ substr($s, 0, length($c), '');
+ $s =~ s/\n.*//g;
+
+ if ($s =~ /^\s*;/) {
+ ERROR("while should follow close brace '}'\n" . $hereprev);
+ }
+ }
+
+#studly caps, commented out until figure out how to distinguish between use of existing and adding new
+# if (($line=~/[\w_][a-z\d]+[A-Z]/) and !($line=~/print/)) {
+# print "No studly caps, use _\n";
+# print "$herecurr";
+# $clean = 0;
+# }
+
+#no spaces allowed after \ in define
+ if ($line=~/\#\s*define.*\\\s$/) {
+ WARN("Whitepspace after \\ makes next lines useless\n" . $herecurr);
+ }
+
+#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
+ if ($tree && $rawline =~ m{^.\s*\#\s*include\s*\<asm\/(.*)\.h\>}) {
+ my $file = "$1.h";
+ my $checkfile = "include/linux/$file";
+ if (-f "$root/$checkfile" &&
+ $realfile ne $checkfile &&
+ $1 !~ /$allowed_asm_includes/)
+ {
+ if ($realfile =~ m{^arch/}) {
+ CHK("Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+ } else {
+ WARN("Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
+ }
+ }
+ }
+
+# multi-statement macros should be enclosed in a do while loop, grab the
+# first statement and ensure its the whole macro if its not enclosed
+# in a known good container
+ if ($realfile !~ m@/vmlinux.lds.h$@ &&
+ $line =~ /^.\s*\#\s*define\s*$Ident(\()?/) {
+ my $ln = $linenr;
+ my $cnt = $realcnt;
+ my ($off, $dstat, $dcond, $rest);
+ my $ctx = '';
+
+ my $args = defined($1);
+
+ # Find the end of the macro and limit our statement
+ # search to that.
+ while ($cnt > 0 && defined $lines[$ln - 1] &&
+ $lines[$ln - 1] =~ /^(?:-|..*\\$)/)
+ {
+ $ctx .= $rawlines[$ln - 1] . "\n";
+ $cnt-- if ($lines[$ln - 1] !~ /^-/);
+ $ln++;
+ }
+ $ctx .= $rawlines[$ln - 1];
+
+ ($dstat, $dcond, $ln, $cnt, $off) =
+ ctx_statement_block($linenr, $ln - $linenr + 1, 0);
+ #print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
+ #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
+
+ # Extract the remainder of the define (if any) and
+ # rip off surrounding spaces, and trailing \'s.
+ $rest = '';
+ while ($off != 0 || ($cnt > 0 && $rest =~ /\\\s*$/)) {
+ #print "ADDING cnt<$cnt> $off <" . substr($lines[$ln - 1], $off) . "> rest<$rest>\n";
+ if ($off != 0 || $lines[$ln - 1] !~ /^-/) {
+ $rest .= substr($lines[$ln - 1], $off) . "\n";
+ $cnt--;
+ }
+ $ln++;
+ $off = 0;
+ }
+ $rest =~ s/\\\n.//g;
+ $rest =~ s/^\s*//s;
+ $rest =~ s/\s*$//s;
+
+ # Clean up the original statement.
+ if ($args) {
+ substr($dstat, 0, length($dcond), '');
+ } else {
+ $dstat =~ s/^.\s*\#\s*define\s+$Ident\s*//;
+ }
+ $dstat =~ s/$;//g;
+ $dstat =~ s/\\\n.//g;
+ $dstat =~ s/^\s*//s;
+ $dstat =~ s/\s*$//s;
+
+ # Flatten any parentheses and braces
+ while ($dstat =~ s/\([^\(\)]*\)/1/ ||
+ $dstat =~ s/\{[^\{\}]*\}/1/ ||
+ $dstat =~ s/\[[^\{\}]*\]/1/)
+ {
+ }
+
+ my $exceptions = qr{
+ $Declare|
+ module_param_named|
+ MODULE_PARAM_DESC|
+ DECLARE_PER_CPU|
+ DEFINE_PER_CPU|
+ __typeof__\(|
+ union|
+ struct|
+ \.$Ident\s*=\s*|
+ ^\"|\"$
+ }x;
+ #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+ if ($rest ne '' && $rest ne ',') {
+ if ($rest !~ /while\s*\(/ &&
+ $dstat !~ /$exceptions/)
+ {
+ ERROR("Macros with multiple statements should be enclosed in a do - while loop\n" . "$here\n$ctx\n");
+ }
+
+ } elsif ($ctx !~ /;/) {
+ if ($dstat ne '' &&
+ $dstat !~ /^(?:$Ident|-?$Constant)$/ &&
+ $dstat !~ /$exceptions/ &&
+ $dstat !~ /^\.$Ident\s*=/ &&
+ $dstat =~ /$Operators/)
+ {
+ ERROR("Macros with complex values should be enclosed in parenthesis\n" . "$here\n$ctx\n");
+ }
+ }
+ }
+
+# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...
+# all assignments may have only one of the following with an assignment:
+# .
+# ALIGN(...)
+# VMLINUX_SYMBOL(...)
+ if ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\s)$Ident\s*=|=\s*$Ident(?:\s|$))/) {
+ WARN("vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\n" . $herecurr);
+ }
+
+# check for missing bracing round if etc
+ if ($line =~ /(^.*)\bif\b/ && $line !~ /\#\s*if/) {
+ my ($level, $endln, @chunks) =
+ ctx_statement_full($linenr, $realcnt, 1);
+ if ($dbg_adv_apw) {
+ print "APW: chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n";
+ print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n"
+ if $#chunks >= 1;
+ }
+ if ($#chunks >= 0 && $level == 0) {
+ my $allowed = 0;
+ my $seen = 0;
+ my $herectx = $here . "\n";
+ my $ln = $linenr - 1;
+ for my $chunk (@chunks) {
+ my ($cond, $block) = @{$chunk};
+
+ # If the condition carries leading newlines, then count those as offsets.
+ my ($whitespace) = ($cond =~ /^((?:\s*\n[+-])*\s*)/s);
+ my $offset = statement_rawlines($whitespace) - 1;
+
+ #print "COND<$cond> whitespace<$whitespace> offset<$offset>\n";
+
+ # We have looked at and allowed this specific line.
+ $suppress_ifbraces{$ln + $offset} = 1;
+
+ $herectx .= "$rawlines[$ln + $offset]\n[...]\n";
+ $ln += statement_rawlines($block) - 1;
+
+ substr($block, 0, length($cond), '');
+
+ my $spaced_block = $block;
+ $spaced_block =~ s/\n\+/ /g;
+
+ $seen++ if ($spaced_block =~ /^\s*{/);
+
+ print "APW: cond<$cond> block<$block> allowed<$allowed>\n"
+ if $dbg_adv_apw;
+ if (statement_lines($cond) > 1) {
+ print "APW: ALLOWED: cond<$cond>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ if ($block =~/\b(?:if|for|while)\b/) {
+ print "APW: ALLOWED: block<$block>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ if (statement_block_size($block) > 1) {
+ print "APW: ALLOWED: lines block<$block>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ }
+ if ($seen != ($#chunks + 1)) {
+ WARN("braces {} are necessary for all arms of this statement\n" . $herectx);
+ }
+ }
+ }
+ if (!defined $suppress_ifbraces{$linenr - 1} &&
+ $line =~ /\b(if|while|for|else)\b/ &&
+ $line !~ /\#\s*if/ &&
+ $line !~ /\#\s*else/) {
+ my $allowed = 0;
+
+ # Check the pre-context.
+ if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) {
+ my $pre = $1;
+
+ if ($line !~ /else/) {
+ print "APW: ALLOWED: pre<$pre> line<$line>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ }
+
+ my ($level, $endln, @chunks) =
+ ctx_statement_full($linenr, $realcnt, $-[0]);
+
+ # Check the condition.
+ my ($cond, $block) = @{$chunks[0]};
+ print "CHECKING<$linenr> cond<$cond> block<$block>\n"
+ if $dbg_adv_checking;
+ if (defined $cond) {
+ substr($block, 0, length($cond), '');
+ }
+ if (statement_lines($cond) > 1) {
+ print "APW: ALLOWED: cond<$cond>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ if ($block =~/\b(?:if|for|while)\b/) {
+ print "APW: ALLOWED: block<$block>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ if (statement_block_size($block) > 1) {
+ print "APW: ALLOWED: lines block<$block>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ # Check the post-context.
+ if (defined $chunks[1]) {
+ my ($cond, $block) = @{$chunks[1]};
+ if (defined $cond) {
+ substr($block, 0, length($cond), '');
+ }
+ if ($block =~ /^\s*\{/) {
+ print "APW: ALLOWED: chunk-1 block<$block>\n"
+ if $dbg_adv_apw;
+ $allowed = 1;
+ }
+ }
+ print "DCS: level=$level block<$block> allowed=$allowed\n"
+ if $dbg_adv_dcs;
+ if ($level == 0 && $block !~ /^\s*\{/ && !$allowed) {
+ my $herectx = $here . "\n";;
+ my $cnt = statement_rawlines($block);
+
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";;
+ }
+
+ WARN("braces {} are necessary even for single statement blocks\n" . $herectx);
+ }
+ }
+
+# don't include deprecated include files (uses RAW line)
+ for my $inc (@dep_includes) {
+ if ($rawline =~ m@^.\s*\#\s*include\s*\<$inc>@) {
+ ERROR("Don't use <$inc>: see Documentation/feature-removal-schedule.txt\n" . $herecurr);
+ }
+ }
+
+# don't use deprecated functions
+ for my $func (@dep_functions) {
+ if ($line =~ /\b$func\b/) {
+ ERROR("Don't use $func(): see Documentation/feature-removal-schedule.txt\n" . $herecurr);
+ }
+ }
+
+# no volatiles please
+ my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
+ if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
+ WARN("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
+ }
+
+# SPIN_LOCK_UNLOCKED & RW_LOCK_UNLOCKED are deprecated
+ if ($line =~ /\b(SPIN_LOCK_UNLOCKED|RW_LOCK_UNLOCKED)/) {
+ ERROR("Use of $1 is deprecated: see Documentation/spinlocks.txt\n" . $herecurr);
+ }
+
+# warn about #if 0
+ if ($line =~ /^.\s*\#\s*if\s+0\b/) {
+ CHK("if this code is redundant consider removing it\n" .
+ $herecurr);
+ }
+
+# check for needless kfree() checks
+ if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
+ my $expr = $1;
+ if ($line =~ /\bkfree\(\Q$expr\E\);/) {
+ WARN("kfree(NULL) is safe this check is probably not required\n" . $hereprev);
+ }
+ }
+# check for needless usb_free_urb() checks
+ if ($prevline =~ /\bif\s*\(([^\)]*)\)/) {
+ my $expr = $1;
+ if ($line =~ /\busb_free_urb\(\Q$expr\E\);/) {
+ WARN("usb_free_urb(NULL) is safe this check is probably not required\n" . $hereprev);
+ }
+ }
+
+# prefer usleep_range over udelay
+ if ($line =~ /\budelay\s*\(\s*(\w+)\s*\)/) {
+ # ignore udelay's < 10, however
+ if (! (($1 =~ /(\d+)/) && ($1 < 10)) ) {
+ CHK("usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\n" . $line);
+ }
+ }
+
+# warn about unexpectedly long msleep's
+ if ($line =~ /\bmsleep\s*\((\d+)\);/) {
+ if ($1 < 20) {
+ WARN("msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\n" . $line);
+ }
+ }
+
+# warn about #ifdefs in C files
+# if ($line =~ /^.\s*\#\s*if(|n)def/ && ($realfile =~ /\.c$/)) {
+# print "#ifdef in C files should be avoided\n";
+# print "$herecurr";
+# $clean = 0;
+# }
+
+# warn about spacing in #ifdefs
+ if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) {
+ ERROR("exactly one space required after that #$1\n" . $herecurr);
+ }
+
+# check for spinlock_t definitions without a comment.
+ if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
+ $line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
+ my $which = $1;
+ if (!ctx_has_comment($first_line, $linenr)) {
+ CHK("$1 definition without comment\n" . $herecurr);
+ }
+ }
+# check for memory barriers without a comment.
+ if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
+ if (!ctx_has_comment($first_line, $linenr)) {
+ CHK("memory barrier without comment\n" . $herecurr);
+ }
+ }
+# check of hardware specific defines
+ if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
+ CHK("architecture specific defines should be avoided\n" . $herecurr);
+ }
+
+# Check that the storage class is at the beginning of a declaration
+ if ($line =~ /\b$Storage\b/ && $line !~ /^.\s*$Storage\b/) {
+ WARN("storage class should be at the beginning of the declaration\n" . $herecurr)
+ }
+
+# check the location of the inline attribute, that it is between
+# storage class and type.
+ if ($line =~ /\b$Type\s+$Inline\b/ ||
+ $line =~ /\b$Inline\s+$Storage\b/) {
+ ERROR("inline keyword should sit between storage class and type\n" . $herecurr);
+ }
+
+# Check for __inline__ and __inline, prefer inline
+ if ($line =~ /\b(__inline__|__inline)\b/) {
+ WARN("plain inline is preferred over $1\n" . $herecurr);
+ }
+
+# check for sizeof(&)
+ if ($line =~ /\bsizeof\s*\(\s*\&/) {
+ WARN("sizeof(& should be avoided\n" . $herecurr);
+ }
+
+# check for new externs in .c files.
+ if ($realfile =~ /\.c$/ && defined $stat &&
+ $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
+ {
+ my $function_name = $1;
+ my $paren_space = $2;
+
+ my $s = $stat;
+ if (defined $cond) {
+ substr($s, 0, length($cond), '');
+ }
+ if ($s =~ /^\s*;/ &&
+ $function_name ne 'uninitialized_var')
+ {
+ WARN("externs should be avoided in .c files\n" . $herecurr);
+ }
+
+ if ($paren_space =~ /\n/) {
+ WARN("arguments for function declarations should follow identifier\n" . $herecurr);
+ }
+
+ } elsif ($realfile =~ /\.c$/ && defined $stat &&
+ $stat =~ /^.\s*extern\s+/)
+ {
+ WARN("externs should be avoided in .c files\n" . $herecurr);
+ }
+
+# checks for new __setup's
+ if ($rawline =~ /\b__setup\("([^"]*)"/) {
+ my $name = $1;
+
+ if (!grep(/$name/, @setup_docs)) {
+ CHK("__setup appears un-documented -- check Documentation/kernel-parameters.txt\n" . $herecurr);
+ }
+ }
+
+# check for pointless casting of kmalloc return
+ if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) {
+ WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
+ }
+
+# check for gcc specific __FUNCTION__
+ if ($line =~ /__FUNCTION__/) {
+ WARN("__func__ should be used instead of gcc specific __FUNCTION__\n" . $herecurr);
+ }
+
+# check for semaphores used as mutexes
+ if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
+ WARN("mutexes are preferred for single holder semaphores\n" . $herecurr);
+ }
+# check for semaphores used as mutexes
+ if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
+ WARN("consider using a completion\n" . $herecurr);
+
+ }
+# recommend strict_strto* over simple_strto*
+ if ($line =~ /\bsimple_(strto.*?)\s*\(/) {
+ WARN("consider using strict_$1 in preference to simple_$1\n" . $herecurr);
+ }
+# check for __initcall(), use device_initcall() explicitly please
+ if ($line =~ /^.\s*__initcall\s*\(/) {
+ WARN("please use device_initcall() instead of __initcall()\n" . $herecurr);
+ }
+# check for various ops structs, ensure they are const.
+ my $struct_ops = qr{acpi_dock_ops|
+ address_space_operations|
+ backlight_ops|
+ block_device_operations|
+ dentry_operations|
+ dev_pm_ops|
+ dma_map_ops|
+ extent_io_ops|
+ file_lock_operations|
+ file_operations|
+ hv_ops|
+ ide_dma_ops|
+ intel_dvo_dev_ops|
+ item_operations|
+ iwl_ops|
+ kgdb_arch|
+ kgdb_io|
+ kset_uevent_ops|
+ lock_manager_operations|
+ microcode_ops|
+ mtrr_ops|
+ neigh_ops|
+ nlmsvc_binding|
+ pci_raw_ops|
+ pipe_buf_operations|
+ platform_hibernation_ops|
+ platform_suspend_ops|
+ proto_ops|
+ rpc_pipe_ops|
+ seq_operations|
+ snd_ac97_build_ops|
+ soc_pcmcia_socket_ops|
+ stacktrace_ops|
+ sysfs_ops|
+ tty_operations|
+ usb_mon_operations|
+ wd_ops}x;
+ if ($line !~ /\bconst\b/ &&
+ $line =~ /\bstruct\s+($struct_ops)\b/) {
+ WARN("struct $1 should normally be const\n" .
+ $herecurr);
+ }
+
+# use of NR_CPUS is usually wrong
+# ignore definitions of NR_CPUS and usage to define arrays as likely right
+ if ($line =~ /\bNR_CPUS\b/ &&
+ $line !~ /^.\s*\s*#\s*if\b.*\bNR_CPUS\b/ &&
+ $line !~ /^.\s*\s*#\s*define\b.*\bNR_CPUS\b/ &&
+ $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ &&
+ $line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ &&
+ $line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/)
+ {
+ WARN("usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
+ }
+
+# check for %L{u,d,i} in strings
+ my $string;
+ while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
+ $string = substr($rawline, $-[1], $+[1] - $-[1]);
+ $string =~ s/%%/__/g;
+ if ($string =~ /(?<!%)%L[udi]/) {
+ WARN("\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr);
+ last;
+ }
+ }
+
+# whine mightly about in_atomic
+ if ($line =~ /\bin_atomic\s*\(/) {
+ if ($realfile =~ m@^drivers/@) {
+ ERROR("do not use in_atomic in drivers\n" . $herecurr);
+ } elsif ($realfile !~ m@^kernel/@) {
+ WARN("use of in_atomic() is incorrect outside core kernel code\n" . $herecurr);
+ }
+ }
+
+# check for lockdep_set_novalidate_class
+ if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
+ $line =~ /__lockdep_no_validate__\s*\)/ ) {
+ if ($realfile !~ m@^kernel/lockdep@ &&
+ $realfile !~ m@^include/linux/lockdep@ &&
+ $realfile !~ m@^drivers/base/core@) {
+ ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
+ }
+ }
+
+# QEMU specific tests
+ if ($rawline =~ /\b(?:Qemu|QEmu)\b/) {
+ WARN("use QEMU instead of Qemu or QEmu\n" . $herecurr);
+ }
+
+# check for non-portable ffs() calls that have portable alternatives in QEMU
+ if ($line =~ /\bffs\(/) {
+ ERROR("use ctz32() instead of ffs()\n" . $herecurr);
+ }
+ if ($line =~ /\bffsl\(/) {
+ ERROR("use ctz32() or ctz64() instead of ffsl()\n" . $herecurr);
+ }
+ if ($line =~ /\bffsll\(/) {
+ ERROR("use ctz64() instead of ffsll()\n" . $herecurr);
+ }
+ }
+
+ # If we have no input at all, then there is nothing to report on
+ # so just keep quiet.
+ if ($#rawlines == -1) {
+ exit(0);
+ }
+
+ # In mailback mode only produce a report in the negative, for
+ # things that appear to be patches.
+ if ($mailback && ($clean == 1 || !$is_patch)) {
+ exit(0);
+ }
+
+ # This is not a patch, and we are are in 'no-patch' mode so
+ # just keep quiet.
+ if (!$chk_patch && !$is_patch) {
+ exit(0);
+ }
+
+ if (!$is_patch) {
+ ERROR("Does not appear to be a unified-diff format patch\n");
+ }
+ if ($is_patch && $chk_signoff && $signoff == 0) {
+ ERROR("Missing Signed-off-by: line(s)\n");
+ }
+
+ print report_dump();
+ if ($summary && !($clean == 1 && $quiet == 1)) {
+ print "$filename " if ($summary_file);
+ print "total: $cnt_error errors, $cnt_warn warnings, " .
+ (($check)? "$cnt_chk checks, " : "") .
+ "$cnt_lines lines checked\n";
+ print "\n" if ($quiet == 0);
+ }
+
+ if ($quiet == 0) {
+ # If there were whitespace errors which cleanpatch can fix
+ # then suggest that.
+# if ($rpt_cleaners) {
+# print "NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\n";
+# print " scripts/cleanfile\n\n";
+# }
+ }
+
+ if ($clean == 1 && $quiet == 0) {
+ print "$vname has no obvious style problems and is ready for submission.\n"
+ }
+ if ($clean == 0 && $quiet == 0) {
+ print "$vname has style problems, please review. If any of these errors\n";
+ print "are false positives report them to the maintainer, see\n";
+ print "CHECKPATCH in MAINTAINERS.\n";
+ }
+
+ return $clean;
+}
diff --git a/scripts/cleanup-trace-events.pl b/scripts/cleanup-trace-events.pl
new file mode 100755
index 00000000..7e808efb
--- /dev/null
+++ b/scripts/cleanup-trace-events.pl
@@ -0,0 +1,51 @@
+#!/usr/bin/perl
+# Copyright (C) 2013 Red Hat, Inc.
+#
+# Authors:
+# Markus Armbruster <armbru@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+# Usage: cleanup-trace-events.pl trace-events
+#
+# Print cleaned up trace-events to standard output.
+
+use warnings;
+use strict;
+
+my $buf = '';
+my %seen = ();
+
+sub out {
+ print $buf;
+ $buf = '';
+ %seen = ();
+}
+
+while (<>) {
+ if (/^(disable )?([a-z_0-9]+)\(/) {
+ open GREP, '-|', 'git', 'grep', '-lw', "trace_$2"
+ or die "run git grep: $!";
+ my $fname;
+ while ($fname = <GREP>) {
+ chomp $fname;
+ next if $seen{$fname} || $fname eq 'trace-events';
+ $seen{$fname} = 1;
+ $buf = "# $fname\n" . $buf;
+ }
+ unless (close GREP) {
+ die "close git grep: $!"
+ if $!;
+ next;
+ }
+ } elsif (/^# ([^ ]*\.[ch])$/) {
+ out;
+ next;
+ } elsif (!/^#|^$/) {
+ warn "unintelligible line";
+ }
+ $buf .= $_;
+}
+
+out;
diff --git a/scripts/coverity-model.c b/scripts/coverity-model.c
new file mode 100644
index 00000000..617f67d7
--- /dev/null
+++ b/scripts/coverity-model.c
@@ -0,0 +1,343 @@
+/* Coverity Scan model
+ *
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or, at your
+ * option, any later version. See the COPYING file in the top-level directory.
+ */
+
+
+/*
+ * This is the source code for our Coverity user model file. The
+ * purpose of user models is to increase scanning accuracy by explaining
+ * code Coverity can't see (out of tree libraries) or doesn't
+ * sufficiently understand. Better accuracy means both fewer false
+ * positives and more true defects. Memory leaks in particular.
+ *
+ * - A model file can't import any header files. Some built-in primitives are
+ * available but not wchar_t, NULL etc.
+ * - Modeling doesn't need full structs and typedefs. Rudimentary structs
+ * and similar types are sufficient.
+ * - An uninitialized local variable signifies that the variable could be
+ * any value.
+ *
+ * The model file must be uploaded by an admin in the analysis settings of
+ * http://scan.coverity.com/projects/378
+ */
+
+#define NULL ((void *)0)
+
+typedef unsigned char uint8_t;
+typedef char int8_t;
+typedef unsigned int uint32_t;
+typedef int int32_t;
+typedef long ssize_t;
+typedef unsigned long long uint64_t;
+typedef long long int64_t;
+typedef _Bool bool;
+
+typedef struct va_list_str *va_list;
+
+/* exec.c */
+
+typedef struct AddressSpace AddressSpace;
+typedef uint64_t hwaddr;
+typedef uint32_t MemTxResult;
+typedef uint64_t MemTxAttrs;
+
+static void __bufwrite(uint8_t *buf, ssize_t len)
+{
+ int first, last;
+ __coverity_negative_sink__(len);
+ if (len == 0) return;
+ buf[0] = first;
+ buf[len-1] = last;
+ __coverity_writeall__(buf);
+}
+
+static void __bufread(uint8_t *buf, ssize_t len)
+{
+ __coverity_negative_sink__(len);
+ if (len == 0) return;
+ int first = buf[0];
+ int last = buf[len-1];
+}
+
+MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len, bool is_write)
+{
+ MemTxResult result;
+
+ // TODO: investigate impact of treating reads as producing
+ // tainted data, with __coverity_tainted_data_argument__(buf).
+ if (is_write) __bufread(buf, len); else __bufwrite(buf, len);
+
+ return result;
+}
+
+/* Tainting */
+
+typedef struct {} name2keysym_t;
+static int get_keysym(const name2keysym_t *table,
+ const char *name)
+{
+ int result;
+ if (result > 0) {
+ __coverity_tainted_string_sanitize_content__(name);
+ return result;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * GLib memory allocation functions.
+ *
+ * Note that we ignore the fact that g_malloc of 0 bytes returns NULL,
+ * and g_realloc of 0 bytes frees the pointer.
+ *
+ * Modeling this would result in Coverity flagging a lot of memory
+ * allocations as potentially returning NULL, and asking us to check
+ * whether the result of the allocation is NULL or not. However, the
+ * resulting pointer should never be dereferenced anyway, and in fact
+ * it is not in the vast majority of cases.
+ *
+ * If a dereference did happen, this would suppress a defect report
+ * for an actual null pointer dereference. But it's too unlikely to
+ * be worth wading through the false positives, and with some luck
+ * we'll get a buffer overflow reported anyway.
+ */
+
+/*
+ * Allocation primitives, cannot return NULL
+ * See also Coverity's library/generic/libc/all/all.c
+ */
+
+void *g_malloc_n(size_t nmemb, size_t size)
+{
+ size_t sz;
+ void *ptr;
+
+ __coverity_negative_sink__(nmemb);
+ __coverity_negative_sink__(size);
+ sz = nmemb * size;
+ ptr = __coverity_alloc__(sz);
+ __coverity_mark_as_uninitialized_buffer__(ptr);
+ __coverity_mark_as_afm_allocated__(ptr, "g_free");
+ return ptr;
+}
+
+void *g_malloc0_n(size_t nmemb, size_t size)
+{
+ size_t sz;
+ void *ptr;
+
+ __coverity_negative_sink__(nmemb);
+ __coverity_negative_sink__(size);
+ sz = nmemb * size;
+ ptr = __coverity_alloc__(sz);
+ __coverity_writeall0__(ptr);
+ __coverity_mark_as_afm_allocated__(ptr, "g_free");
+ return ptr;
+}
+
+void *g_realloc_n(void *ptr, size_t nmemb, size_t size)
+{
+ size_t sz;
+
+ __coverity_negative_sink__(nmemb);
+ __coverity_negative_sink__(size);
+ sz = nmemb * size;
+ __coverity_escape__(ptr);
+ ptr = __coverity_alloc__(sz);
+ /*
+ * Memory beyond the old size isn't actually initialized. Can't
+ * model that. See Coverity's realloc() model
+ */
+ __coverity_writeall__(ptr);
+ __coverity_mark_as_afm_allocated__(ptr, "g_free");
+ return ptr;
+}
+
+void g_free(void *ptr)
+{
+ __coverity_free__(ptr);
+ __coverity_mark_as_afm_freed__(ptr, "g_free");
+}
+
+/*
+ * Derive the g_try_FOO_n() from the g_FOO_n() by adding indeterminate
+ * out of memory conditions
+ */
+
+void *g_try_malloc_n(size_t nmemb, size_t size)
+{
+ int nomem;
+
+ if (nomem) {
+ return NULL;
+ }
+ return g_malloc_n(nmemb, size);
+}
+
+void *g_try_malloc0_n(size_t nmemb, size_t size)
+{
+ int nomem;
+
+ if (nomem) {
+ return NULL;
+ }
+ return g_malloc0_n(nmemb, size);
+}
+
+void *g_try_realloc_n(void *ptr, size_t nmemb, size_t size)
+{
+ int nomem;
+
+ if (nomem) {
+ return NULL;
+ }
+ return g_realloc_n(ptr, nmemb, size);
+}
+
+/* Trivially derive the g_FOO() from the g_FOO_n() */
+
+void *g_malloc(size_t size)
+{
+ return g_malloc_n(1, size);
+}
+
+void *g_malloc0(size_t size)
+{
+ return g_malloc0_n(1, size);
+}
+
+void *g_realloc(void *ptr, size_t size)
+{
+ return g_realloc_n(ptr, 1, size);
+}
+
+void *g_try_malloc(size_t size)
+{
+ return g_try_malloc_n(1, size);
+}
+
+void *g_try_malloc0(size_t size)
+{
+ return g_try_malloc0_n(1, size);
+}
+
+void *g_try_realloc(void *ptr, size_t size)
+{
+ return g_try_realloc_n(ptr, 1, size);
+}
+
+/*
+ * GLib string allocation functions
+ */
+
+char *g_strdup(const char *s)
+{
+ char *dup;
+ size_t i;
+
+ if (!s) {
+ return NULL;
+ }
+
+ __coverity_string_null_sink__(s);
+ __coverity_string_size_sink__(s);
+ dup = __coverity_alloc_nosize__();
+ __coverity_mark_as_afm_allocated__(dup, "g_free");
+ for (i = 0; (dup[i] = s[i]); i++) ;
+ return dup;
+}
+
+char *g_strndup(const char *s, size_t n)
+{
+ char *dup;
+ size_t i;
+
+ __coverity_negative_sink__(n);
+
+ if (!s) {
+ return NULL;
+ }
+
+ dup = g_malloc(n + 1);
+ for (i = 0; i < n && (dup[i] = s[i]); i++) ;
+ dup[i] = 0;
+ return dup;
+}
+
+char *g_strdup_printf(const char *format, ...)
+{
+ char ch, *s;
+ size_t len;
+
+ __coverity_string_null_sink__(format);
+ __coverity_string_size_sink__(format);
+
+ ch = *format;
+
+ s = __coverity_alloc_nosize__();
+ __coverity_writeall__(s);
+ __coverity_mark_as_afm_allocated__(s, "g_free");
+ return s;
+}
+
+char *g_strdup_vprintf(const char *format, va_list ap)
+{
+ char ch, *s;
+ size_t len;
+
+ __coverity_string_null_sink__(format);
+ __coverity_string_size_sink__(format);
+
+ ch = *format;
+ ch = *(char *)ap;
+
+ s = __coverity_alloc_nosize__();
+ __coverity_writeall__(s);
+ __coverity_mark_as_afm_allocated__(s, "g_free");
+
+ return len;
+}
+
+char *g_strconcat(const char *s, ...)
+{
+ char *s;
+
+ /*
+ * Can't model: last argument must be null, the others
+ * null-terminated strings
+ */
+
+ s = __coverity_alloc_nosize__();
+ __coverity_writeall__(s);
+ __coverity_mark_as_afm_allocated__(s, "g_free");
+ return s;
+}
+
+/* Other glib functions */
+
+typedef struct _GIOChannel GIOChannel;
+GIOChannel *g_io_channel_unix_new(int fd)
+{
+ GIOChannel *c = g_malloc0(sizeof(GIOChannel));
+ __coverity_escape__(fd);
+ return c;
+}
+
+void g_assertion_message_expr(const char *domain,
+ const char *file,
+ int line,
+ const char *func,
+ const char *expr)
+{
+ __coverity_panic__();
+}
diff --git a/scripts/create_config b/scripts/create_config
new file mode 100755
index 00000000..546f8891
--- /dev/null
+++ b/scripts/create_config
@@ -0,0 +1,116 @@
+#!/bin/sh
+
+echo "/* Automatically generated by create_config - do not modify */"
+
+while read line; do
+
+case $line in
+ VERSION=*) # configuration
+ version=${line#*=}
+ echo "#define QEMU_VERSION \"$version\""
+ ;;
+ PKGVERSION=*) # configuration
+ pkgversion=${line#*=}
+ echo "#define QEMU_PKGVERSION \"$pkgversion\""
+ ;;
+ qemu_*dir=*) # qemu-specific directory configuration
+ name=${line%=*}
+ value=${line#*=}
+ define_name=`echo $name | LC_ALL=C tr '[a-z]' '[A-Z]'`
+ eval "define_value=\"$value\""
+ echo "#define CONFIG_$define_name \"$define_value\""
+ # save for the next definitions
+ eval "$name=\$define_value"
+ ;;
+ prefix=*)
+ # save for the next definitions
+ prefix=${line#*=}
+ ;;
+ IASL=*) # iasl executable
+ value=${line#*=}
+ echo "#define CONFIG_IASL $value"
+ ;;
+ CONFIG_AUDIO_DRIVERS=*)
+ drivers=${line#*=}
+ echo "#define CONFIG_AUDIO_DRIVERS \\"
+ for drv in $drivers; do
+ echo " &${drv}_audio_driver,\\"
+ done
+ echo ""
+ ;;
+ CONFIG_BDRV_RW_WHITELIST=*)
+ echo "#define CONFIG_BDRV_RW_WHITELIST\\"
+ for drv in ${line#*=}; do
+ echo " \"${drv}\",\\"
+ done
+ echo " NULL"
+ ;;
+ CONFIG_BDRV_RO_WHITELIST=*)
+ echo "#define CONFIG_BDRV_RO_WHITELIST\\"
+ for drv in ${line#*=}; do
+ echo " \"${drv}\",\\"
+ done
+ echo " NULL"
+ ;;
+ CONFIG_*=y) # configuration
+ name=${line%=*}
+ echo "#define $name 1"
+ ;;
+ CONFIG_*=*) # configuration
+ name=${line%=*}
+ value=${line#*=}
+ echo "#define $name $value"
+ ;;
+ ARCH=*) # configuration
+ arch=${line#*=}
+ arch_name=`echo $arch | LC_ALL=C tr '[a-z]' '[A-Z]'`
+ echo "#define HOST_$arch_name 1"
+ ;;
+ HOST_USB=*)
+ # do nothing
+ ;;
+ HOST_CC=*)
+ # do nothing
+ ;;
+ HOST_*=y) # configuration
+ name=${line%=*}
+ echo "#define $name 1"
+ ;;
+ HOST_*=*) # configuration
+ name=${line%=*}
+ value=${line#*=}
+ echo "#define $name $value"
+ ;;
+ TARGET_BASE_ARCH=*) # configuration
+ target_base_arch=${line#*=}
+ base_arch_name=`echo $target_base_arch | LC_ALL=C tr '[a-z]' '[A-Z]'`
+ echo "#define TARGET_$base_arch_name 1"
+ ;;
+ TARGET_XML_FILES=*)
+ # do nothing
+ ;;
+ TARGET_ABI_DIR=*)
+ # do nothing
+ ;;
+ TARGET_NAME=*)
+ target_name=${line#*=}
+ echo "#define TARGET_NAME \"$target_name\""
+ ;;
+ TARGET_DIRS=*)
+ # do nothing
+ ;;
+ TARGET_*=y) # configuration
+ name=${line%=*}
+ echo "#define $name 1"
+ ;;
+ TARGET_*=*) # configuration
+ name=${line%=*}
+ value=${line#*=}
+ echo "#define $name $value"
+ ;;
+ DSOSUF=*)
+ echo "#define HOST_DSOSUF \"${line#*=}\""
+ ;;
+esac
+
+done # read
diff --git a/scripts/disas-objdump.pl b/scripts/disas-objdump.pl
new file mode 100755
index 00000000..8f7e8182
--- /dev/null
+++ b/scripts/disas-objdump.pl
@@ -0,0 +1,99 @@
+#!/usr/bin/perl -w
+
+use File::Temp qw/ tempfile /;
+use Getopt::Long;
+
+# Default to the system objdump if a cross-compiler edition not given.
+my $aobjdump = "objdump";
+my $hobjdump = "";
+my $tobjdump = "";
+my $hmachine = "";
+my $tmachine = "";
+
+GetOptions ('O|objdump=s' => \$aobjdump,
+ 'host-objdump=s' => \$hobjdump,
+ 'target-objdump=s' => \$tobjdump,
+ 'h|host-machine=s' => \$hmachine,
+ 't|target-machine=s' => \$tmachine);
+
+# But we can't default the machines. Sanity check that we've at least one.
+die "No host or target machine type" if !$hmachine && !$tmachine;
+
+# Reuse one temp file for all of the hunks.
+my ($outh, $outname) = tempfile();
+binmode($outh);
+END { unlink $outname; }
+
+# Pre-construct the command-lines for executing the dump.
+sub mkobjcommand ($$) {
+ my ($cmd, $mach) = @_;
+ return 0 if !$mach;
+ $cmd = $aobjdump if !$cmd;
+ return "$cmd -m $mach --disassemble-all -b binary";
+}
+
+$objdump[1] = mkobjcommand($hobjdump, $hmachine);
+$objdump[2] = mkobjcommand($tobjdump, $tmachine);
+
+# Zero-initialize current dumping state.
+my $mem = "";
+my $inobjd = 0;
+my $vma = 0;
+
+sub objcommand {
+ my $ret = $objdump[$inobjd];
+ if (!$ret) {
+ die "Host machine type not specified" if $inobjd == 1;
+ die "Target machine type not specified" if $inobjd == 2;
+ die "Internal error";
+ }
+ return $ret;
+}
+
+while (<>) {
+ # Collect the data from the relevant OBJD-* lines ...
+ if (/^OBJD-H: /) {
+ die "Internal error" if $inobjd == 2;
+ $mem = $mem . pack("H*", substr($_, 8, -1));
+ $inobjd = 1;
+ } elsif (/^OBJD-T: /) {
+ die "Internal error" if $inobjd == 1;
+ $mem = $mem . pack("H*", substr($_, 8, -1));
+ $inobjd = 2;
+ }
+ # ... which will always be followed by a blank line,
+ # at which point we should produce our dump.
+ elsif ($inobjd) {
+ # Rewrite the temp file in one go; it will usually be small.
+ sysseek $outh, 0, 0;
+ truncate $outh, 0;
+ syswrite $outh, $mem;
+
+ my $cmd = objcommand();
+ $cmd = $cmd . " --adjust-vma=" . $vma if $vma;
+ $cmd = $cmd . " " . $outname;
+
+ # Pipe from objdump...
+ open IN, "-|", $cmd;
+
+ # ... copying all but the first 7 lines of boilerplate to our stdout.
+ my $i = 0;
+ while (<IN>) {
+ print if (++$i > 7);
+ }
+ close IN;
+ print "\n";
+
+ $mem = "";
+ $inobjd = 0;
+ $vma = 0;
+ }
+ # The line before "OBJD-*" will be of the form "0x<hex>+: +\n".
+ # Extract the value for passing to --adjust-vma.
+ elsif (/^(0x[0-9a-fA-F]+):\s*$/) {
+ $vma = $1;
+ print;
+ } else {
+ print;
+ }
+}
diff --git a/scripts/dump-guest-memory.py b/scripts/dump-guest-memory.py
new file mode 100644
index 00000000..08796fff
--- /dev/null
+++ b/scripts/dump-guest-memory.py
@@ -0,0 +1,339 @@
+# This python script adds a new gdb command, "dump-guest-memory". It
+# should be loaded with "source dump-guest-memory.py" at the (gdb)
+# prompt.
+#
+# Copyright (C) 2013, Red Hat, Inc.
+#
+# Authors:
+# Laszlo Ersek <lersek@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later. See
+# the COPYING file in the top-level directory.
+#
+# The leading docstring doesn't have idiomatic Python formatting. It is
+# printed by gdb's "help" command (the first line is printed in the
+# "help data" summary), and it should match how other help texts look in
+# gdb.
+
+import struct
+
+class DumpGuestMemory(gdb.Command):
+ """Extract guest vmcore from qemu process coredump.
+
+The sole argument is FILE, identifying the target file to write the
+guest vmcore to.
+
+This GDB command reimplements the dump-guest-memory QMP command in
+python, using the representation of guest memory as captured in the qemu
+coredump. The qemu process that has been dumped must have had the
+command line option "-machine dump-guest-core=on".
+
+For simplicity, the "paging", "begin" and "end" parameters of the QMP
+command are not supported -- no attempt is made to get the guest's
+internal paging structures (ie. paging=false is hard-wired), and guest
+memory is always fully dumped.
+
+Only x86_64 guests are supported.
+
+The CORE/NT_PRSTATUS and QEMU notes (that is, the VCPUs' statuses) are
+not written to the vmcore. Preparing these would require context that is
+only present in the KVM host kernel module when the guest is alive. A
+fake ELF note is written instead, only to keep the ELF parser of "crash"
+happy.
+
+Dependent on how busted the qemu process was at the time of the
+coredump, this command might produce unpredictable results. If qemu
+deliberately called abort(), or it was dumped in response to a signal at
+a halfway fortunate point, then its coredump should be in reasonable
+shape and this command should mostly work."""
+
+ TARGET_PAGE_SIZE = 0x1000
+ TARGET_PAGE_MASK = 0xFFFFFFFFFFFFF000
+
+ # Various ELF constants
+ EM_X86_64 = 62 # AMD x86-64 target machine
+ ELFDATA2LSB = 1 # little endian
+ ELFCLASS64 = 2
+ ELFMAG = "\x7FELF"
+ EV_CURRENT = 1
+ ET_CORE = 4
+ PT_LOAD = 1
+ PT_NOTE = 4
+
+ # Special value for e_phnum. This indicates that the real number of
+ # program headers is too large to fit into e_phnum. Instead the real
+ # value is in the field sh_info of section 0.
+ PN_XNUM = 0xFFFF
+
+ # Format strings for packing and header size calculation.
+ ELF64_EHDR = ("4s" # e_ident/magic
+ "B" # e_ident/class
+ "B" # e_ident/data
+ "B" # e_ident/version
+ "B" # e_ident/osabi
+ "8s" # e_ident/pad
+ "H" # e_type
+ "H" # e_machine
+ "I" # e_version
+ "Q" # e_entry
+ "Q" # e_phoff
+ "Q" # e_shoff
+ "I" # e_flags
+ "H" # e_ehsize
+ "H" # e_phentsize
+ "H" # e_phnum
+ "H" # e_shentsize
+ "H" # e_shnum
+ "H" # e_shstrndx
+ )
+ ELF64_PHDR = ("I" # p_type
+ "I" # p_flags
+ "Q" # p_offset
+ "Q" # p_vaddr
+ "Q" # p_paddr
+ "Q" # p_filesz
+ "Q" # p_memsz
+ "Q" # p_align
+ )
+
+ def __init__(self):
+ super(DumpGuestMemory, self).__init__("dump-guest-memory",
+ gdb.COMMAND_DATA,
+ gdb.COMPLETE_FILENAME)
+ self.uintptr_t = gdb.lookup_type("uintptr_t")
+ self.elf64_ehdr_le = struct.Struct("<%s" % self.ELF64_EHDR)
+ self.elf64_phdr_le = struct.Struct("<%s" % self.ELF64_PHDR)
+
+ def int128_get64(self, val):
+ assert (val["hi"] == 0)
+ return val["lo"]
+
+ def qlist_foreach(self, head, field_str):
+ var_p = head["lh_first"]
+ while (var_p != 0):
+ var = var_p.dereference()
+ yield var
+ var_p = var[field_str]["le_next"]
+
+ def qemu_get_ram_block(self, ram_addr):
+ ram_blocks = gdb.parse_and_eval("ram_list.blocks")
+ for block in self.qlist_foreach(ram_blocks, "next"):
+ if (ram_addr - block["offset"] < block["used_length"]):
+ return block
+ raise gdb.GdbError("Bad ram offset %x" % ram_addr)
+
+ def qemu_get_ram_ptr(self, ram_addr):
+ block = self.qemu_get_ram_block(ram_addr)
+ return block["host"] + (ram_addr - block["offset"])
+
+ def memory_region_get_ram_ptr(self, mr):
+ if (mr["alias"] != 0):
+ return (self.memory_region_get_ram_ptr(mr["alias"].dereference()) +
+ mr["alias_offset"])
+ return self.qemu_get_ram_ptr(mr["ram_addr"] & self.TARGET_PAGE_MASK)
+
+ def guest_phys_blocks_init(self):
+ self.guest_phys_blocks = []
+
+ def guest_phys_blocks_append(self):
+ print "guest RAM blocks:"
+ print ("target_start target_end host_addr message "
+ "count")
+ print ("---------------- ---------------- ---------------- ------- "
+ "-----")
+
+ current_map_p = gdb.parse_and_eval("address_space_memory.current_map")
+ current_map = current_map_p.dereference()
+ for cur in range(current_map["nr"]):
+ flat_range = (current_map["ranges"] + cur).dereference()
+ mr = flat_range["mr"].dereference()
+
+ # we only care about RAM
+ if (not mr["ram"]):
+ continue
+
+ section_size = self.int128_get64(flat_range["addr"]["size"])
+ target_start = self.int128_get64(flat_range["addr"]["start"])
+ target_end = target_start + section_size
+ host_addr = (self.memory_region_get_ram_ptr(mr) +
+ flat_range["offset_in_region"])
+ predecessor = None
+
+ # find continuity in guest physical address space
+ if (len(self.guest_phys_blocks) > 0):
+ predecessor = self.guest_phys_blocks[-1]
+ predecessor_size = (predecessor["target_end"] -
+ predecessor["target_start"])
+
+ # the memory API guarantees monotonically increasing
+ # traversal
+ assert (predecessor["target_end"] <= target_start)
+
+ # we want continuity in both guest-physical and
+ # host-virtual memory
+ if (predecessor["target_end"] < target_start or
+ predecessor["host_addr"] + predecessor_size != host_addr):
+ predecessor = None
+
+ if (predecessor is None):
+ # isolated mapping, add it to the list
+ self.guest_phys_blocks.append({"target_start": target_start,
+ "target_end" : target_end,
+ "host_addr" : host_addr})
+ message = "added"
+ else:
+ # expand predecessor until @target_end; predecessor's
+ # start doesn't change
+ predecessor["target_end"] = target_end
+ message = "joined"
+
+ print ("%016x %016x %016x %-7s %5u" %
+ (target_start, target_end, host_addr.cast(self.uintptr_t),
+ message, len(self.guest_phys_blocks)))
+
+ def cpu_get_dump_info(self):
+ # We can't synchronize the registers with KVM post-mortem, and
+ # the bits in (first_x86_cpu->env.hflags) seem to be stale; they
+ # may not reflect long mode for example. Hence just assume the
+ # most common values. This also means that instruction pointer
+ # etc. will be bogus in the dump, but at least the RAM contents
+ # should be valid.
+ self.dump_info = {"d_machine": self.EM_X86_64,
+ "d_endian" : self.ELFDATA2LSB,
+ "d_class" : self.ELFCLASS64}
+
+ def encode_elf64_ehdr_le(self):
+ return self.elf64_ehdr_le.pack(
+ self.ELFMAG, # e_ident/magic
+ self.dump_info["d_class"], # e_ident/class
+ self.dump_info["d_endian"], # e_ident/data
+ self.EV_CURRENT, # e_ident/version
+ 0, # e_ident/osabi
+ "", # e_ident/pad
+ self.ET_CORE, # e_type
+ self.dump_info["d_machine"], # e_machine
+ self.EV_CURRENT, # e_version
+ 0, # e_entry
+ self.elf64_ehdr_le.size, # e_phoff
+ 0, # e_shoff
+ 0, # e_flags
+ self.elf64_ehdr_le.size, # e_ehsize
+ self.elf64_phdr_le.size, # e_phentsize
+ self.phdr_num, # e_phnum
+ 0, # e_shentsize
+ 0, # e_shnum
+ 0 # e_shstrndx
+ )
+
+ def encode_elf64_note_le(self):
+ return self.elf64_phdr_le.pack(self.PT_NOTE, # p_type
+ 0, # p_flags
+ (self.memory_offset -
+ len(self.note)), # p_offset
+ 0, # p_vaddr
+ 0, # p_paddr
+ len(self.note), # p_filesz
+ len(self.note), # p_memsz
+ 0 # p_align
+ )
+
+ def encode_elf64_load_le(self, offset, start_hwaddr, range_size):
+ return self.elf64_phdr_le.pack(self.PT_LOAD, # p_type
+ 0, # p_flags
+ offset, # p_offset
+ 0, # p_vaddr
+ start_hwaddr, # p_paddr
+ range_size, # p_filesz
+ range_size, # p_memsz
+ 0 # p_align
+ )
+
+ def note_init(self, name, desc, type):
+ # name must include a trailing NUL
+ namesz = (len(name) + 1 + 3) / 4 * 4
+ descsz = (len(desc) + 3) / 4 * 4
+ fmt = ("<" # little endian
+ "I" # n_namesz
+ "I" # n_descsz
+ "I" # n_type
+ "%us" # name
+ "%us" # desc
+ % (namesz, descsz))
+ self.note = struct.pack(fmt,
+ len(name) + 1, len(desc), type, name, desc)
+
+ def dump_init(self):
+ self.guest_phys_blocks_init()
+ self.guest_phys_blocks_append()
+ self.cpu_get_dump_info()
+ # we have no way to retrieve the VCPU status from KVM
+ # post-mortem
+ self.note_init("NONE", "EMPTY", 0)
+
+ # Account for PT_NOTE.
+ self.phdr_num = 1
+
+ # We should never reach PN_XNUM for paging=false dumps: there's
+ # just a handful of discontiguous ranges after merging.
+ self.phdr_num += len(self.guest_phys_blocks)
+ assert (self.phdr_num < self.PN_XNUM)
+
+ # Calculate the ELF file offset where the memory dump commences:
+ #
+ # ELF header
+ # PT_NOTE
+ # PT_LOAD: 1
+ # PT_LOAD: 2
+ # ...
+ # PT_LOAD: len(self.guest_phys_blocks)
+ # ELF note
+ # memory dump
+ self.memory_offset = (self.elf64_ehdr_le.size +
+ self.elf64_phdr_le.size * self.phdr_num +
+ len(self.note))
+
+ def dump_begin(self, vmcore):
+ vmcore.write(self.encode_elf64_ehdr_le())
+ vmcore.write(self.encode_elf64_note_le())
+ running = self.memory_offset
+ for block in self.guest_phys_blocks:
+ range_size = block["target_end"] - block["target_start"]
+ vmcore.write(self.encode_elf64_load_le(running,
+ block["target_start"],
+ range_size))
+ running += range_size
+ vmcore.write(self.note)
+
+ def dump_iterate(self, vmcore):
+ qemu_core = gdb.inferiors()[0]
+ for block in self.guest_phys_blocks:
+ cur = block["host_addr"]
+ left = block["target_end"] - block["target_start"]
+ print ("dumping range at %016x for length %016x" %
+ (cur.cast(self.uintptr_t), left))
+ while (left > 0):
+ chunk_size = min(self.TARGET_PAGE_SIZE, left)
+ chunk = qemu_core.read_memory(cur, chunk_size)
+ vmcore.write(chunk)
+ cur += chunk_size
+ left -= chunk_size
+
+ def create_vmcore(self, filename):
+ vmcore = open(filename, "wb")
+ self.dump_begin(vmcore)
+ self.dump_iterate(vmcore)
+ vmcore.close()
+
+ def invoke(self, args, from_tty):
+ # Unwittingly pressing the Enter key after the command should
+ # not dump the same multi-gig coredump to the same file.
+ self.dont_repeat()
+
+ argv = gdb.string_to_argv(args)
+ if (len(argv) != 1):
+ raise gdb.GdbError("usage: dump-guest-memory FILE")
+
+ self.dump_init()
+ self.create_vmcore(argv[0])
+
+DumpGuestMemory()
diff --git a/scripts/extract-vsssdk-headers b/scripts/extract-vsssdk-headers
new file mode 100755
index 00000000..9e38510f
--- /dev/null
+++ b/scripts/extract-vsssdk-headers
@@ -0,0 +1,35 @@
+#! /bin/bash
+
+# extract-vsssdk-headers
+# Author: Paolo Bonzini <pbonzini@redhat.com>
+
+set -e
+if test $# != 1 || ! test -f "$1"; then
+ echo 'Usage: extract-vsssdk-headers /path/to/setup.exe' >&2
+ exit 1
+fi
+
+if ! command -v msiextract > /dev/null; then
+ echo 'msiextract not found. Please install msitools.' >&2
+ exit 1
+fi
+
+if test -e inc; then
+ echo '"inc" already exists.' >&2
+ exit 1
+fi
+
+# Extract .MSI file in the .exe, looking for the OLE compound
+# document signature. Extra data at the end does not matter.
+export LC_ALL=C
+MAGIC=$'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1'
+offset=$(grep -abom1 "$MAGIC" "$1" | sed -n 's/:/\n/; P')
+tmpdir=$(mktemp -d)
+trap 'rm -fr -- "$tmpdir" vsssdk.msi' EXIT HUP INT QUIT ALRM TERM
+tail -c +$(($offset+1)) -- "$1" > vsssdk.msi
+
+# Now extract the files.
+msiextract -C $tmpdir vsssdk.msi
+mv "$tmpdir/Program Files/Microsoft/VSSSDK72/inc" inc
+echo 'Extracted SDK headers into "inc" directory.'
+exit 0
diff --git a/scripts/feature_to_c.sh b/scripts/feature_to_c.sh
new file mode 100644
index 00000000..888548e5
--- /dev/null
+++ b/scripts/feature_to_c.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+# Convert text files to compilable C arrays.
+#
+# Copyright (C) 2007 Free Software Foundation, Inc.
+#
+# This file is part of GDB.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+output=$1
+shift
+
+if test -z "$output" || test -z "$1"; then
+ echo "Usage: $0 OUTPUTFILE INPUTFILE..."
+ exit 1
+fi
+
+if test -e "$output"; then
+ echo "Output file \"$output\" already exists; refusing to overwrite."
+ exit 1
+fi
+
+for input; do
+ arrayname=xml_feature_`echo $input | sed 's,.*/,,; s/[-.]/_/g'`
+
+ ${AWK:-awk} 'BEGIN { n = 0
+ printf "#include \"config.h\"\n"
+ printf "#include \"qemu-common.h\"\n"
+ printf "#include \"exec/gdbstub.h\"\n"
+ print "static const char '$arrayname'[] = {"
+ for (i = 0; i < 255; i++)
+ _ord_[sprintf("%c", i)] = i
+ } {
+ split($0, line, "");
+ printf " "
+ for (i = 1; i <= length($0); i++) {
+ c = line[i]
+ if (c == "'\''") {
+ printf "'\''\\'\'''\'', "
+ } else if (c == "\\") {
+ printf "'\''\\\\'\'', "
+ } else if (_ord_[c] >= 32 && _ord_[c] < 127) {
+ printf "'\''%s'\'', ", c
+ } else {
+ printf "'\''\\%03o'\'', ", _ord_[c]
+ }
+ if (i % 10 == 0)
+ printf "\n "
+ }
+ printf "'\''\\n'\'', \n"
+ } END {
+ print " 0 };"
+ }' < $input >> $output
+done
+
+echo >> $output
+echo "const char *const xml_builtin[][2] = {" >> $output
+
+for input; do
+ basename=`echo $input | sed 's,.*/,,'`
+ arrayname=xml_feature_`echo $input | sed 's,.*/,,; s/[-.]/_/g'`
+ echo " { \"$basename\", $arrayname }," >> $output
+done
+
+echo " { (char *)0, (char *)0 }" >> $output
+echo "};" >> $output
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
new file mode 100755
index 00000000..f39630eb
--- /dev/null
+++ b/scripts/get_maintainer.pl
@@ -0,0 +1,2121 @@
+#!/usr/bin/perl -w
+# (c) 2007, Joe Perches <joe@perches.com>
+# created from checkpatch.pl
+#
+# Print selected MAINTAINERS information for
+# the files modified in a patch or for a file
+#
+# usage: perl scripts/get_maintainer.pl [OPTIONS] <patch>
+# perl scripts/get_maintainer.pl [OPTIONS] -f <file>
+#
+# Licensed under the terms of the GNU GPL License version 2
+
+use strict;
+
+my $P = $0;
+my $V = '0.26';
+
+use Getopt::Long qw(:config no_auto_abbrev);
+
+my $lk_path = "./";
+my $email = 1;
+my $email_usename = 1;
+my $email_maintainer = 1;
+my $email_list = 1;
+my $email_subscriber_list = 0;
+my $email_git = 0;
+my $email_git_all_signature_types = 0;
+my $email_git_blame = 0;
+my $email_git_blame_signatures = 1;
+my $email_git_fallback = 1;
+my $email_git_min_signatures = 1;
+my $email_git_max_maintainers = 5;
+my $email_git_min_percent = 5;
+my $email_git_since = "1-year-ago";
+my $email_hg_since = "-365";
+my $interactive = 0;
+my $email_remove_duplicates = 1;
+my $email_use_mailmap = 1;
+my $output_multiline = 1;
+my $output_separator = ", ";
+my $output_roles = 0;
+my $output_rolestats = 1;
+my $scm = 0;
+my $web = 0;
+my $subsystem = 0;
+my $status = 0;
+my $keywords = 1;
+my $sections = 0;
+my $file_emails = 0;
+my $from_filename = 0;
+my $pattern_depth = 0;
+my $version = 0;
+my $help = 0;
+
+my $vcs_used = 0;
+
+my $exit = 0;
+
+my %commit_author_hash;
+my %commit_signer_hash;
+
+# Signature types of people who are either
+# a) responsible for the code in question, or
+# b) familiar enough with it to give relevant feedback
+my @signature_tags = ();
+push(@signature_tags, "Signed-off-by:");
+push(@signature_tags, "Reviewed-by:");
+push(@signature_tags, "Acked-by:");
+
+my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+
+# rfc822 email address - preloaded methods go here.
+my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
+my $rfc822_char = '[\\000-\\377]';
+
+# VCS command support: class-like functions and strings
+
+my %VCS_cmds;
+
+my %VCS_cmds_git = (
+ "execute_cmd" => \&git_execute_cmd,
+ "available" => '(which("git") ne "") && (-d ".git")',
+ "find_signers_cmd" =>
+ "git log --no-color --follow --since=\$email_git_since " .
+ '--format="GitCommit: %H%n' .
+ 'GitAuthor: %an <%ae>%n' .
+ 'GitDate: %aD%n' .
+ 'GitSubject: %s%n' .
+ '%b%n"' .
+ " -- \$file",
+ "find_commit_signers_cmd" =>
+ "git log --no-color " .
+ '--format="GitCommit: %H%n' .
+ 'GitAuthor: %an <%ae>%n' .
+ 'GitDate: %aD%n' .
+ 'GitSubject: %s%n' .
+ '%b%n"' .
+ " -1 \$commit",
+ "find_commit_author_cmd" =>
+ "git log --no-color " .
+ '--format="GitCommit: %H%n' .
+ 'GitAuthor: %an <%ae>%n' .
+ 'GitDate: %aD%n' .
+ 'GitSubject: %s%n"' .
+ " -1 \$commit",
+ "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
+ "blame_file_cmd" => "git blame -l \$file",
+ "commit_pattern" => "^GitCommit: ([0-9a-f]{40,40})",
+ "blame_commit_pattern" => "^([0-9a-f]+) ",
+ "author_pattern" => "^GitAuthor: (.*)",
+ "subject_pattern" => "^GitSubject: (.*)",
+);
+
+my %VCS_cmds_hg = (
+ "execute_cmd" => \&hg_execute_cmd,
+ "available" => '(which("hg") ne "") && (-d ".hg")',
+ "find_signers_cmd" =>
+ "hg log --date=\$email_hg_since " .
+ "--template='HgCommit: {node}\\n" .
+ "HgAuthor: {author}\\n" .
+ "HgSubject: {desc}\\n'" .
+ " -- \$file",
+ "find_commit_signers_cmd" =>
+ "hg log " .
+ "--template='HgSubject: {desc}\\n'" .
+ " -r \$commit",
+ "find_commit_author_cmd" =>
+ "hg log " .
+ "--template='HgCommit: {node}\\n" .
+ "HgAuthor: {author}\\n" .
+ "HgSubject: {desc|firstline}\\n'" .
+ " -r \$commit",
+ "blame_range_cmd" => "", # not supported
+ "blame_file_cmd" => "hg blame -n \$file",
+ "commit_pattern" => "^HgCommit: ([0-9a-f]{40,40})",
+ "blame_commit_pattern" => "^([ 0-9a-f]+):",
+ "author_pattern" => "^HgAuthor: (.*)",
+ "subject_pattern" => "^HgSubject: (.*)",
+);
+
+my $conf = which_conf(".get_maintainer.conf");
+if (-f $conf) {
+ my @conf_args;
+ open(my $conffile, '<', "$conf")
+ or warn "$P: Can't find a readable .get_maintainer.conf file $!\n";
+
+ while (<$conffile>) {
+ my $line = $_;
+
+ $line =~ s/\s*\n?$//g;
+ $line =~ s/^\s*//g;
+ $line =~ s/\s+/ /g;
+
+ next if ($line =~ m/^\s*#/);
+ next if ($line =~ m/^\s*$/);
+
+ my @words = split(" ", $line);
+ foreach my $word (@words) {
+ last if ($word =~ m/^#/);
+ push (@conf_args, $word);
+ }
+ }
+ close($conffile);
+ unshift(@ARGV, @conf_args) if @conf_args;
+}
+
+if (!GetOptions(
+ 'email!' => \$email,
+ 'git!' => \$email_git,
+ 'git-all-signature-types!' => \$email_git_all_signature_types,
+ 'git-blame!' => \$email_git_blame,
+ 'git-blame-signatures!' => \$email_git_blame_signatures,
+ 'git-fallback!' => \$email_git_fallback,
+ 'git-min-signatures=i' => \$email_git_min_signatures,
+ 'git-max-maintainers=i' => \$email_git_max_maintainers,
+ 'git-min-percent=i' => \$email_git_min_percent,
+ 'git-since=s' => \$email_git_since,
+ 'hg-since=s' => \$email_hg_since,
+ 'i|interactive!' => \$interactive,
+ 'remove-duplicates!' => \$email_remove_duplicates,
+ 'mailmap!' => \$email_use_mailmap,
+ 'm!' => \$email_maintainer,
+ 'n!' => \$email_usename,
+ 'l!' => \$email_list,
+ 's!' => \$email_subscriber_list,
+ 'multiline!' => \$output_multiline,
+ 'roles!' => \$output_roles,
+ 'rolestats!' => \$output_rolestats,
+ 'separator=s' => \$output_separator,
+ 'subsystem!' => \$subsystem,
+ 'status!' => \$status,
+ 'scm!' => \$scm,
+ 'web!' => \$web,
+ 'pattern-depth=i' => \$pattern_depth,
+ 'k|keywords!' => \$keywords,
+ 'sections!' => \$sections,
+ 'fe|file-emails!' => \$file_emails,
+ 'f|file' => \$from_filename,
+ 'v|version' => \$version,
+ 'h|help|usage' => \$help,
+ )) {
+ die "$P: invalid argument - use --help if necessary\n";
+}
+
+if ($help != 0) {
+ usage();
+ exit 0;
+}
+
+if ($version != 0) {
+ print("${P} ${V}\n");
+ exit 0;
+}
+
+if (-t STDIN && !@ARGV) {
+ # We're talking to a terminal, but have no command line arguments.
+ die "$P: missing patchfile or -f file - use --help if necessary\n";
+}
+
+$output_multiline = 0 if ($output_separator ne ", ");
+$output_rolestats = 1 if ($interactive);
+$output_roles = 1 if ($output_rolestats);
+
+if ($sections) {
+ $email = 0;
+ $email_list = 0;
+ $scm = 0;
+ $status = 0;
+ $subsystem = 0;
+ $web = 0;
+ $keywords = 0;
+ $interactive = 0;
+} else {
+ my $selections = $email + $scm + $status + $subsystem + $web;
+ if ($selections == 0) {
+ die "$P: Missing required option: email, scm, status, subsystem or web\n";
+ }
+}
+
+if ($email &&
+ ($email_maintainer + $email_list + $email_subscriber_list +
+ $email_git + $email_git_blame) == 0) {
+ die "$P: Please select at least 1 email option\n";
+}
+
+if (!top_of_tree($lk_path)) {
+ die "$P: The current directory does not appear to be "
+ . "a QEMU source tree.\n";
+}
+
+## Read MAINTAINERS for type/value pairs
+
+my @typevalue = ();
+my %keyword_hash;
+
+open (my $maint, '<', "${lk_path}MAINTAINERS")
+ or die "$P: Can't open MAINTAINERS: $!\n";
+while (<$maint>) {
+ my $line = $_;
+
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+
+ ##Filename pattern matching
+ if ($type eq "F" || $type eq "X") {
+ $value =~ s@\.@\\\.@g; ##Convert . to \.
+ $value =~ s/\*/\.\*/g; ##Convert * to .*
+ $value =~ s/\?/\./g; ##Convert ? to .
+ ##if pattern is a directory and it lacks a trailing slash, add one
+ if ((-d $value)) {
+ $value =~ s@([^/])$@$1/@;
+ }
+ } elsif ($type eq "K") {
+ $keyword_hash{@typevalue} = $value;
+ }
+ push(@typevalue, "$type:$value");
+ } elsif (!/^(\s)*$/) {
+ $line =~ s/\n$//g;
+ push(@typevalue, $line);
+ }
+}
+close($maint);
+
+
+#
+# Read mail address map
+#
+
+my $mailmap;
+
+read_mailmap();
+
+sub read_mailmap {
+ $mailmap = {
+ names => {},
+ addresses => {}
+ };
+
+ return if (!$email_use_mailmap || !(-f "${lk_path}.mailmap"));
+
+ open(my $mailmap_file, '<', "${lk_path}.mailmap")
+ or warn "$P: Can't open .mailmap: $!\n";
+
+ while (<$mailmap_file>) {
+ s/#.*$//; #strip comments
+ s/^\s+|\s+$//g; #trim
+
+ next if (/^\s*$/); #skip empty lines
+ #entries have one of the following formats:
+ # name1 <mail1>
+ # <mail1> <mail2>
+ # name1 <mail1> <mail2>
+ # name1 <mail1> name2 <mail2>
+ # (see man git-shortlog)
+
+ if (/^([^<]+)<([^>]+)>$/) {
+ my $real_name = $1;
+ my $address = $2;
+
+ $real_name =~ s/\s+$//;
+ ($real_name, $address) = parse_email("$real_name <$address>");
+ $mailmap->{names}->{$address} = $real_name;
+
+ } elsif (/^<([^>]+)>\s*<([^>]+)>$/) {
+ my $real_address = $1;
+ my $wrong_address = $2;
+
+ $mailmap->{addresses}->{$wrong_address} = $real_address;
+
+ } elsif (/^(.+)<([^>]+)>\s*<([^>]+)>$/) {
+ my $real_name = $1;
+ my $real_address = $2;
+ my $wrong_address = $3;
+
+ $real_name =~ s/\s+$//;
+ ($real_name, $real_address) =
+ parse_email("$real_name <$real_address>");
+ $mailmap->{names}->{$wrong_address} = $real_name;
+ $mailmap->{addresses}->{$wrong_address} = $real_address;
+
+ } elsif (/^(.+)<([^>]+)>\s*(.+)\s*<([^>]+)>$/) {
+ my $real_name = $1;
+ my $real_address = $2;
+ my $wrong_name = $3;
+ my $wrong_address = $4;
+
+ $real_name =~ s/\s+$//;
+ ($real_name, $real_address) =
+ parse_email("$real_name <$real_address>");
+
+ $wrong_name =~ s/\s+$//;
+ ($wrong_name, $wrong_address) =
+ parse_email("$wrong_name <$wrong_address>");
+
+ my $wrong_email = format_email($wrong_name, $wrong_address, 1);
+ $mailmap->{names}->{$wrong_email} = $real_name;
+ $mailmap->{addresses}->{$wrong_email} = $real_address;
+ }
+ }
+ close($mailmap_file);
+}
+
+## use the filenames on the command line or find the filenames in the patchfiles
+
+my @files = ();
+my @range = ();
+my @keyword_tvi = ();
+my @file_emails = ();
+
+if (!@ARGV) {
+ push(@ARGV, "&STDIN");
+}
+
+foreach my $file (@ARGV) {
+ if ($file ne "&STDIN") {
+ ##if $file is a directory and it lacks a trailing slash, add one
+ if ((-d $file)) {
+ $file =~ s@([^/])$@$1/@;
+ } elsif (!(-f $file)) {
+ die "$P: file '${file}' not found\n";
+ }
+ }
+ if ($from_filename) {
+ push(@files, $file);
+ if ($file ne "MAINTAINERS" && -f $file && ($keywords || $file_emails)) {
+ open(my $f, '<', $file)
+ or die "$P: Can't open $file: $!\n";
+ my $text = do { local($/) ; <$f> };
+ close($f);
+ if ($keywords) {
+ foreach my $line (keys %keyword_hash) {
+ if ($text =~ m/$keyword_hash{$line}/x) {
+ push(@keyword_tvi, $line);
+ }
+ }
+ }
+ if ($file_emails) {
+ my @poss_addr = $text =~ m$[A-Za-zÀ-ÿ\"\' \,\.\+-]*\s*[\,]*\s*[\(\<\{]{0,1}[A-Za-z0-9_\.\+-]+\@[A-Za-z0-9\.-]+\.[A-Za-z0-9]+[\)\>\}]{0,1}$g;
+ push(@file_emails, clean_file_emails(@poss_addr));
+ }
+ }
+ } else {
+ my $file_cnt = @files;
+ my $lastfile;
+
+ open(my $patch, "< $file")
+ or die "$P: Can't open $file: $!\n";
+
+ # We can check arbitrary information before the patch
+ # like the commit message, mail headers, etc...
+ # This allows us to match arbitrary keywords against any part
+ # of a git format-patch generated file (subject tags, etc...)
+
+ my $patch_prefix = ""; #Parsing the intro
+
+ while (<$patch>) {
+ my $patch_line = $_;
+ if (m/^\+\+\+\s+(\S+)/) {
+ my $filename = $1;
+ $filename =~ s@^[^/]*/@@;
+ $filename =~ s@\n@@;
+ $lastfile = $filename;
+ push(@files, $filename);
+ $patch_prefix = "^[+-].*"; #Now parsing the actual patch
+ } elsif (m/^\@\@ -(\d+),(\d+)/) {
+ if ($email_git_blame) {
+ push(@range, "$lastfile:$1:$2");
+ }
+ } elsif ($keywords) {
+ foreach my $line (keys %keyword_hash) {
+ if ($patch_line =~ m/${patch_prefix}$keyword_hash{$line}/x) {
+ push(@keyword_tvi, $line);
+ }
+ }
+ }
+ }
+ close($patch);
+
+ if ($file_cnt == @files) {
+ warn "$P: file '${file}' doesn't appear to be a patch. "
+ . "Add -f to options?\n";
+ }
+ @files = sort_and_uniq(@files);
+ }
+}
+
+@file_emails = uniq(@file_emails);
+
+my %email_hash_name;
+my %email_hash_address;
+my @email_to = ();
+my %hash_list_to;
+my @list_to = ();
+my @scm = ();
+my @web = ();
+my @subsystem = ();
+my @status = ();
+my %deduplicate_name_hash = ();
+my %deduplicate_address_hash = ();
+
+my @maintainers = get_maintainers();
+
+if (@maintainers) {
+ @maintainers = merge_email(@maintainers);
+ output(@maintainers);
+}
+
+if ($scm) {
+ @scm = uniq(@scm);
+ output(@scm);
+}
+
+if ($status) {
+ @status = uniq(@status);
+ output(@status);
+}
+
+if ($subsystem) {
+ @subsystem = uniq(@subsystem);
+ output(@subsystem);
+}
+
+if ($web) {
+ @web = uniq(@web);
+ output(@web);
+}
+
+exit($exit);
+
+sub range_is_maintained {
+ my ($start, $end) = @_;
+
+ for (my $i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+ if ($type eq 'S') {
+ if ($value =~ /(maintain|support)/i) {
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+sub range_has_maintainer {
+ my ($start, $end) = @_;
+
+ for (my $i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+ if ($type eq 'M') {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+sub get_maintainers {
+ %email_hash_name = ();
+ %email_hash_address = ();
+ %commit_author_hash = ();
+ %commit_signer_hash = ();
+ @email_to = ();
+ %hash_list_to = ();
+ @list_to = ();
+ @scm = ();
+ @web = ();
+ @subsystem = ();
+ @status = ();
+ %deduplicate_name_hash = ();
+ %deduplicate_address_hash = ();
+ if ($email_git_all_signature_types) {
+ $signature_pattern = "(.+?)[Bb][Yy]:";
+ } else {
+ $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
+ }
+
+ # Find responsible parties
+
+ my %exact_pattern_match_hash = ();
+
+ foreach my $file (@files) {
+
+ my %hash;
+ my $tvi = find_first_section();
+ while ($tvi < @typevalue) {
+ my $start = find_starting_index($tvi);
+ my $end = find_ending_index($tvi);
+ my $exclude = 0;
+ my $i;
+
+ #Do not match excluded file patterns
+
+ for ($i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+ if ($type eq 'X') {
+ if (file_match_pattern($file, $value)) {
+ $exclude = 1;
+ last;
+ }
+ }
+ }
+ }
+
+ if (!$exclude) {
+ for ($i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+ if ($type eq 'F') {
+ if (file_match_pattern($file, $value)) {
+ my $value_pd = ($value =~ tr@/@@);
+ my $file_pd = ($file =~ tr@/@@);
+ $value_pd++ if (substr($value,-1,1) ne "/");
+ $value_pd = -1 if ($value =~ /^\.\*/);
+ if ($value_pd >= $file_pd &&
+ range_is_maintained($start, $end) &&
+ range_has_maintainer($start, $end)) {
+ $exact_pattern_match_hash{$file} = 1;
+ }
+ if ($pattern_depth == 0 ||
+ (($file_pd - $value_pd) < $pattern_depth)) {
+ $hash{$tvi} = $value_pd;
+ }
+ }
+ }
+ }
+ }
+ }
+ $tvi = $end + 1;
+ }
+
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ add_categories($line);
+ if ($sections) {
+ my $i;
+ my $start = find_starting_index($line);
+ my $end = find_ending_index($line);
+ for ($i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ /^[FX]:/) { ##Restore file patterns
+ $line =~ s/([^\\])\.([^\*])/$1\?$2/g;
+ $line =~ s/([^\\])\.$/$1\?/g; ##Convert . back to ?
+ $line =~ s/\\\./\./g; ##Convert \. to .
+ $line =~ s/\.\*/\*/g; ##Convert .* to *
+ }
+ $line =~ s/^([A-Z]):/$1:\t/g;
+ print("$line\n");
+ }
+ print("\n");
+ }
+ }
+ }
+
+ if ($keywords) {
+ @keyword_tvi = sort_and_uniq(@keyword_tvi);
+ foreach my $line (@keyword_tvi) {
+ add_categories($line);
+ }
+ }
+
+ foreach my $email (@email_to, @list_to) {
+ $email->[0] = deduplicate_email($email->[0]);
+ }
+
+ if ($email) {
+ if (! $interactive) {
+ $email_git_fallback = 0 if @email_to > 0 || @list_to > 0 || $email_git || $email_git_blame;
+ if ($email_git_fallback) {
+ print STDERR "get_maintainer.pl: No maintainers found, printing recent contributors.\n";
+ print STDERR "get_maintainer.pl: Do not blindly cc: them on patches! Use common sense.\n";
+ print STDERR "\n";
+ }
+ }
+
+ foreach my $file (@files) {
+ if ($email_git || ($email_git_fallback &&
+ !$exact_pattern_match_hash{$file})) {
+ vcs_file_signoffs($file);
+ }
+ if ($email_git_blame) {
+ vcs_file_blame($file);
+ }
+ }
+
+ foreach my $email (@file_emails) {
+ my ($name, $address) = parse_email($email);
+
+ my $tmp_email = format_email($name, $address, $email_usename);
+ push_email_address($tmp_email, '');
+ add_role($tmp_email, 'in file');
+ }
+ }
+
+ my @to = ();
+ if ($email || $email_list) {
+ if ($email) {
+ @to = (@to, @email_to);
+ }
+ if ($email_list) {
+ @to = (@to, @list_to);
+ }
+ }
+
+ if ($interactive) {
+ @to = interactive_get_maintainers(\@to);
+ }
+
+ return @to;
+}
+
+sub file_match_pattern {
+ my ($file, $pattern) = @_;
+ if (substr($pattern, -1) eq "/") {
+ if ($file =~ m@^$pattern@) {
+ return 1;
+ }
+ } else {
+ if ($file =~ m@^$pattern@) {
+ my $s1 = ($file =~ tr@/@@);
+ my $s2 = ($pattern =~ tr@/@@);
+ if ($s1 == $s2) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+sub usage {
+ print <<EOT;
+usage: $P [options] patchfile
+ $P [options] -f file|directory
+version: $V
+
+MAINTAINER field selection options:
+ --email => print email address(es) if any
+ --git => include recent git \*-by: signers
+ --git-all-signature-types => include signers regardless of signature type
+ or use only ${signature_pattern} signers (default: $email_git_all_signature_types)
+ --git-fallback => use git when no exact MAINTAINERS pattern (default: $email_git_fallback)
+ --git-min-signatures => number of signatures required (default: $email_git_min_signatures)
+ --git-max-maintainers => maximum maintainers to add (default: $email_git_max_maintainers)
+ --git-min-percent => minimum percentage of commits required (default: $email_git_min_percent)
+ --git-blame => use git blame to find modified commits for patch or file
+ --git-since => git history to use (default: $email_git_since)
+ --hg-since => hg history to use (default: $email_hg_since)
+ --interactive => display a menu (mostly useful if used with the --git option)
+ --m => include maintainer(s) if any
+ --n => include name 'Full Name <addr\@domain.tld>'
+ --l => include list(s) if any
+ --s => include subscriber only list(s) if any
+ --remove-duplicates => minimize duplicate email names/addresses
+ --roles => show roles (status:subsystem, git-signer, list, etc...)
+ --rolestats => show roles and statistics (commits/total_commits, %)
+ --file-emails => add email addresses found in -f file (default: 0 (off))
+ --scm => print SCM tree(s) if any
+ --status => print status if any
+ --subsystem => print subsystem name if any
+ --web => print website(s) if any
+
+Output type options:
+ --separator [, ] => separator for multiple entries on 1 line
+ using --separator also sets --nomultiline if --separator is not [, ]
+ --multiline => print 1 entry per line
+
+Other options:
+ --pattern-depth => Number of pattern directory traversals (default: 0 (all))
+ --keywords => scan patch for keywords (default: $keywords)
+ --sections => print all of the subsystem sections with pattern matches
+ --mailmap => use .mailmap file (default: $email_use_mailmap)
+ --version => show version
+ --help => show this help information
+
+Default options:
+ [--email --nogit --git-fallback --m --n --l --multiline -pattern-depth=0
+ --remove-duplicates --rolestats]
+
+Notes:
+ Using "-f directory" may give unexpected results:
+ Used with "--git", git signators for _all_ files in and below
+ directory are examined as git recurses directories.
+ Any specified X: (exclude) pattern matches are _not_ ignored.
+ Used with "--nogit", directory is used as a pattern match,
+ no individual file within the directory or subdirectory
+ is matched.
+ Used with "--git-blame", does not iterate all files in directory
+ Using "--git-blame" is slow and may add old committers and authors
+ that are no longer active maintainers to the output.
+ Using "--roles" or "--rolestats" with git send-email --cc-cmd or any
+ other automated tools that expect only ["name"] <email address>
+ may not work because of additional output after <email address>.
+ Using "--rolestats" and "--git-blame" shows the #/total=% commits,
+ not the percentage of the entire file authored. # of commits is
+ not a good measure of amount of code authored. 1 major commit may
+ contain a thousand lines, 5 trivial commits may modify a single line.
+ If git is not installed, but mercurial (hg) is installed and an .hg
+ repository exists, the following options apply to mercurial:
+ --git,
+ --git-min-signatures, --git-max-maintainers, --git-min-percent, and
+ --git-blame
+ Use --hg-since not --git-since to control date selection
+ File ".get_maintainer.conf", if it exists in the QEMU source root
+ directory, can change whatever get_maintainer defaults are desired.
+ Entries in this file can be any command line argument.
+ This file is prepended to any additional command line arguments.
+ Multiple lines and # comments are allowed.
+EOT
+}
+
+sub top_of_tree {
+ my ($lk_path) = @_;
+
+ if ($lk_path ne "" && substr($lk_path,length($lk_path)-1,1) ne "/") {
+ $lk_path .= "/";
+ }
+ if ( (-f "${lk_path}COPYING")
+ && (-f "${lk_path}MAINTAINERS")
+ && (-f "${lk_path}Makefile")
+ && (-d "${lk_path}docs")
+ && (-f "${lk_path}VERSION")
+ && (-f "${lk_path}vl.c")) {
+ return 1;
+ }
+ return 0;
+}
+
+sub parse_email {
+ my ($formatted_email) = @_;
+
+ my $name = "";
+ my $address = "";
+
+ if ($formatted_email =~ /^([^<]+)<(.+\@.*)>.*$/) {
+ $name = $1;
+ $address = $2;
+ } elsif ($formatted_email =~ /^\s*<(.+\@\S*)>.*$/) {
+ $address = $1;
+ } elsif ($formatted_email =~ /^(.+\@\S*).*$/) {
+ $address = $1;
+ }
+
+ $name =~ s/^\s+|\s+$//g;
+ $name =~ s/^\"|\"$//g;
+ $address =~ s/^\s+|\s+$//g;
+
+ if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
+ $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
+ $name = "\"$name\"";
+ }
+
+ return ($name, $address);
+}
+
+sub format_email {
+ my ($name, $address, $usename) = @_;
+
+ my $formatted_email;
+
+ $name =~ s/^\s+|\s+$//g;
+ $name =~ s/^\"|\"$//g;
+ $address =~ s/^\s+|\s+$//g;
+
+ if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
+ $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
+ $name = "\"$name\"";
+ }
+
+ if ($usename) {
+ if ("$name" eq "") {
+ $formatted_email = "$address";
+ } else {
+ $formatted_email = "$name <$address>";
+ }
+ } else {
+ $formatted_email = $address;
+ }
+
+ return $formatted_email;
+}
+
+sub find_first_section {
+ my $index = 0;
+
+ while ($index < @typevalue) {
+ my $tv = $typevalue[$index];
+ if (($tv =~ m/^(\C):\s*(.*)/)) {
+ last;
+ }
+ $index++;
+ }
+
+ return $index;
+}
+
+sub find_starting_index {
+ my ($index) = @_;
+
+ while ($index > 0) {
+ my $tv = $typevalue[$index];
+ if (!($tv =~ m/^(\C):\s*(.*)/)) {
+ last;
+ }
+ $index--;
+ }
+
+ return $index;
+}
+
+sub find_ending_index {
+ my ($index) = @_;
+
+ while ($index < @typevalue) {
+ my $tv = $typevalue[$index];
+ if (!($tv =~ m/^(\C):\s*(.*)/)) {
+ last;
+ }
+ $index++;
+ }
+
+ return $index;
+}
+
+sub get_maintainer_role {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ my $role = "unknown";
+ my $subsystem = $typevalue[$start];
+ if (length($subsystem) > 20) {
+ $subsystem = substr($subsystem, 0, 17);
+ $subsystem =~ s/\s*$//;
+ $subsystem = $subsystem . "...";
+ }
+
+ for ($i = $start + 1; $i < $end; $i++) {
+ my $tv = $typevalue[$i];
+ if ($tv =~ m/^(\C):\s*(.*)/) {
+ my $ptype = $1;
+ my $pvalue = $2;
+ if ($ptype eq "S") {
+ $role = $pvalue;
+ }
+ }
+ }
+
+ $role = lc($role);
+ if ($role eq "supported") {
+ $role = "supporter";
+ } elsif ($role eq "maintained") {
+ $role = "maintainer";
+ } elsif ($role eq "odd fixes") {
+ $role = "odd fixer";
+ } elsif ($role eq "orphan") {
+ $role = "orphan minder";
+ } elsif ($role eq "obsolete") {
+ $role = "obsolete minder";
+ } elsif ($role eq "buried alive in reporters") {
+ $role = "chief penguin";
+ }
+
+ return $role . ":" . $subsystem;
+}
+
+sub get_list_role {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ my $subsystem = $typevalue[$start];
+ if (length($subsystem) > 20) {
+ $subsystem = substr($subsystem, 0, 17);
+ $subsystem =~ s/\s*$//;
+ $subsystem = $subsystem . "...";
+ }
+
+ if ($subsystem eq "THE REST") {
+ $subsystem = "";
+ }
+
+ return $subsystem;
+}
+
+sub add_categories {
+ my ($index) = @_;
+
+ my $i;
+ my $start = find_starting_index($index);
+ my $end = find_ending_index($index);
+
+ push(@subsystem, $typevalue[$start]);
+
+ for ($i = $start + 1; $i < $end; $i++) {
+ my $tv = $typevalue[$i];
+ if ($tv =~ m/^(\C):\s*(.*)/) {
+ my $ptype = $1;
+ my $pvalue = $2;
+ if ($ptype eq "L") {
+ my $list_address = $pvalue;
+ my $list_additional = "";
+ my $list_role = get_list_role($i);
+
+ if ($list_role ne "") {
+ $list_role = ":" . $list_role;
+ }
+ if ($list_address =~ m/([^\s]+)\s+(.*)$/) {
+ $list_address = $1;
+ $list_additional = $2;
+ }
+ if ($list_additional =~ m/subscribers-only/) {
+ if ($email_subscriber_list) {
+ if (!$hash_list_to{lc($list_address)}) {
+ $hash_list_to{lc($list_address)} = 1;
+ push(@list_to, [$list_address,
+ "subscriber list${list_role}"]);
+ }
+ }
+ } else {
+ if ($email_list) {
+ if (!$hash_list_to{lc($list_address)}) {
+ $hash_list_to{lc($list_address)} = 1;
+ if ($list_additional =~ m/moderated/) {
+ push(@list_to, [$list_address,
+ "moderated list${list_role}"]);
+ } else {
+ push(@list_to, [$list_address,
+ "open list${list_role}"]);
+ }
+ }
+ }
+ }
+ } elsif ($ptype eq "M") {
+ my ($name, $address) = parse_email($pvalue);
+ if ($name eq "") {
+ if ($i > 0) {
+ my $tv = $typevalue[$i - 1];
+ if ($tv =~ m/^(\C):\s*(.*)/) {
+ if ($1 eq "P") {
+ $name = $2;
+ $pvalue = format_email($name, $address, $email_usename);
+ }
+ }
+ }
+ }
+ if ($email_maintainer) {
+ my $role = get_maintainer_role($i);
+ push_email_addresses($pvalue, $role);
+ }
+ } elsif ($ptype eq "T") {
+ push(@scm, $pvalue);
+ } elsif ($ptype eq "W") {
+ push(@web, $pvalue);
+ } elsif ($ptype eq "S") {
+ push(@status, $pvalue);
+ }
+ }
+ }
+}
+
+sub email_inuse {
+ my ($name, $address) = @_;
+
+ return 1 if (($name eq "") && ($address eq ""));
+ return 1 if (($name ne "") && exists($email_hash_name{lc($name)}));
+ return 1 if (($address ne "") && exists($email_hash_address{lc($address)}));
+
+ return 0;
+}
+
+sub push_email_address {
+ my ($line, $role) = @_;
+
+ my ($name, $address) = parse_email($line);
+
+ if ($address eq "") {
+ return 0;
+ }
+
+ if (!$email_remove_duplicates) {
+ push(@email_to, [format_email($name, $address, $email_usename), $role]);
+ } elsif (!email_inuse($name, $address)) {
+ push(@email_to, [format_email($name, $address, $email_usename), $role]);
+ $email_hash_name{lc($name)}++ if ($name ne "");
+ $email_hash_address{lc($address)}++;
+ }
+
+ return 1;
+}
+
+sub push_email_addresses {
+ my ($address, $role) = @_;
+
+ my @address_list = ();
+
+ if (rfc822_valid($address)) {
+ push_email_address($address, $role);
+ } elsif (@address_list = rfc822_validlist($address)) {
+ my $array_count = shift(@address_list);
+ while (my $entry = shift(@address_list)) {
+ push_email_address($entry, $role);
+ }
+ } else {
+ if (!push_email_address($address, $role)) {
+ warn("Invalid MAINTAINERS address: '" . $address . "'\n");
+ }
+ }
+}
+
+sub add_role {
+ my ($line, $role) = @_;
+
+ my ($name, $address) = parse_email($line);
+ my $email = format_email($name, $address, $email_usename);
+
+ foreach my $entry (@email_to) {
+ if ($email_remove_duplicates) {
+ my ($entry_name, $entry_address) = parse_email($entry->[0]);
+ if (($name eq $entry_name || $address eq $entry_address)
+ && ($role eq "" || !($entry->[1] =~ m/$role/))
+ ) {
+ if ($entry->[1] eq "") {
+ $entry->[1] = "$role";
+ } else {
+ $entry->[1] = "$entry->[1],$role";
+ }
+ }
+ } else {
+ if ($email eq $entry->[0]
+ && ($role eq "" || !($entry->[1] =~ m/$role/))
+ ) {
+ if ($entry->[1] eq "") {
+ $entry->[1] = "$role";
+ } else {
+ $entry->[1] = "$entry->[1],$role";
+ }
+ }
+ }
+ }
+}
+
+sub which {
+ my ($bin) = @_;
+
+ foreach my $path (split(/:/, $ENV{PATH})) {
+ if (-e "$path/$bin") {
+ return "$path/$bin";
+ }
+ }
+
+ return "";
+}
+
+sub which_conf {
+ my ($conf) = @_;
+
+ foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
+ if (-e "$path/$conf") {
+ return "$path/$conf";
+ }
+ }
+
+ return "";
+}
+
+sub mailmap_email {
+ my ($line) = @_;
+
+ my ($name, $address) = parse_email($line);
+ my $email = format_email($name, $address, 1);
+ my $real_name = $name;
+ my $real_address = $address;
+
+ if (exists $mailmap->{names}->{$email} ||
+ exists $mailmap->{addresses}->{$email}) {
+ if (exists $mailmap->{names}->{$email}) {
+ $real_name = $mailmap->{names}->{$email};
+ }
+ if (exists $mailmap->{addresses}->{$email}) {
+ $real_address = $mailmap->{addresses}->{$email};
+ }
+ } else {
+ if (exists $mailmap->{names}->{$address}) {
+ $real_name = $mailmap->{names}->{$address};
+ }
+ if (exists $mailmap->{addresses}->{$address}) {
+ $real_address = $mailmap->{addresses}->{$address};
+ }
+ }
+ return format_email($real_name, $real_address, 1);
+}
+
+sub mailmap {
+ my (@addresses) = @_;
+
+ my @mapped_emails = ();
+ foreach my $line (@addresses) {
+ push(@mapped_emails, mailmap_email($line));
+ }
+ merge_by_realname(@mapped_emails) if ($email_use_mailmap);
+ return @mapped_emails;
+}
+
+sub merge_by_realname {
+ my %address_map;
+ my (@emails) = @_;
+
+ foreach my $email (@emails) {
+ my ($name, $address) = parse_email($email);
+ if (exists $address_map{$name}) {
+ $address = $address_map{$name};
+ $email = format_email($name, $address, 1);
+ } else {
+ $address_map{$name} = $address;
+ }
+ }
+}
+
+sub git_execute_cmd {
+ my ($cmd) = @_;
+ my @lines = ();
+
+ my $output = `$cmd`;
+ $output =~ s/^\s*//gm;
+ @lines = split("\n", $output);
+
+ return @lines;
+}
+
+sub hg_execute_cmd {
+ my ($cmd) = @_;
+ my @lines = ();
+
+ my $output = `$cmd`;
+ @lines = split("\n", $output);
+
+ return @lines;
+}
+
+sub extract_formatted_signatures {
+ my (@signature_lines) = @_;
+
+ my @type = @signature_lines;
+
+ s/\s*(.*):.*/$1/ for (@type);
+
+ # cut -f2- -d":"
+ s/\s*.*:\s*(.+)\s*/$1/ for (@signature_lines);
+
+## Reformat email addresses (with names) to avoid badly written signatures
+
+ foreach my $signer (@signature_lines) {
+ $signer = deduplicate_email($signer);
+ }
+
+ return (\@type, \@signature_lines);
+}
+
+sub vcs_find_signers {
+ my ($cmd) = @_;
+ my $commits;
+ my @lines = ();
+ my @signatures = ();
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ my $pattern = $VCS_cmds{"commit_pattern"};
+
+ $commits = grep(/$pattern/, @lines); # of commits
+
+ @signatures = grep(/^[ \t]*${signature_pattern}.*\@.*$/, @lines);
+
+ return (0, @signatures) if !@signatures;
+
+ save_commits_by_author(@lines) if ($interactive);
+ save_commits_by_signer(@lines) if ($interactive);
+
+ my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
+
+ return ($commits, @$signers_ref);
+}
+
+sub vcs_find_author {
+ my ($cmd) = @_;
+ my @lines = ();
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ return @lines if !@lines;
+
+ my @authors = ();
+ foreach my $line (@lines) {
+ if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+ my $author = $1;
+ my ($name, $address) = parse_email($author);
+ $author = format_email($name, $address, 1);
+ push(@authors, $author);
+ }
+ }
+
+ save_commits_by_author(@lines) if ($interactive);
+ save_commits_by_signer(@lines) if ($interactive);
+
+ return @authors;
+}
+
+sub vcs_save_commits {
+ my ($cmd) = @_;
+ my @lines = ();
+ my @commits = ();
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ foreach my $line (@lines) {
+ if ($line =~ m/$VCS_cmds{"blame_commit_pattern"}/) {
+ push(@commits, $1);
+ }
+ }
+
+ return @commits;
+}
+
+sub vcs_blame {
+ my ($file) = @_;
+ my $cmd;
+ my @commits = ();
+
+ return @commits if (!(-f $file));
+
+ if (@range && $VCS_cmds{"blame_range_cmd"} eq "") {
+ my @all_commits = ();
+
+ $cmd = $VCS_cmds{"blame_file_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ @all_commits = vcs_save_commits($cmd);
+
+ foreach my $file_range_diff (@range) {
+ next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
+ my $diff_file = $1;
+ my $diff_start = $2;
+ my $diff_length = $3;
+ next if ("$file" ne "$diff_file");
+ for (my $i = $diff_start; $i < $diff_start + $diff_length; $i++) {
+ push(@commits, $all_commits[$i]);
+ }
+ }
+ } elsif (@range) {
+ foreach my $file_range_diff (@range) {
+ next if (!($file_range_diff =~ m/(.+):(.+):(.+)/));
+ my $diff_file = $1;
+ my $diff_start = $2;
+ my $diff_length = $3;
+ next if ("$file" ne "$diff_file");
+ $cmd = $VCS_cmds{"blame_range_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ push(@commits, vcs_save_commits($cmd));
+ }
+ } else {
+ $cmd = $VCS_cmds{"blame_file_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ @commits = vcs_save_commits($cmd);
+ }
+
+ foreach my $commit (@commits) {
+ $commit =~ s/^\^//g;
+ }
+
+ return @commits;
+}
+
+my $printed_novcs = 0;
+sub vcs_exists {
+ %VCS_cmds = %VCS_cmds_git;
+ return 1 if eval $VCS_cmds{"available"};
+ %VCS_cmds = %VCS_cmds_hg;
+ return 2 if eval $VCS_cmds{"available"};
+ %VCS_cmds = ();
+ if (!$printed_novcs) {
+ warn("$P: No supported VCS found. Add --nogit to options?\n");
+ warn("Using a git repository produces better results.\n");
+ warn("Try latest git repository using:\n");
+ warn("git clone git://git.qemu-project.org/qemu.git\n");
+ $printed_novcs = 1;
+ }
+ return 0;
+}
+
+sub vcs_is_git {
+ vcs_exists();
+ return $vcs_used == 1;
+}
+
+sub vcs_is_hg {
+ return $vcs_used == 2;
+}
+
+sub interactive_get_maintainers {
+ my ($list_ref) = @_;
+ my @list = @$list_ref;
+
+ vcs_exists();
+
+ my %selected;
+ my %authored;
+ my %signed;
+ my $count = 0;
+ my $maintained = 0;
+ foreach my $entry (@list) {
+ $maintained = 1 if ($entry->[1] =~ /^(maintainer|supporter)/i);
+ $selected{$count} = 1;
+ $authored{$count} = 0;
+ $signed{$count} = 0;
+ $count++;
+ }
+
+ #menu loop
+ my $done = 0;
+ my $print_options = 0;
+ my $redraw = 1;
+ while (!$done) {
+ $count = 0;
+ if ($redraw) {
+ printf STDERR "\n%1s %2s %-65s",
+ "*", "#", "email/list and role:stats";
+ if ($email_git ||
+ ($email_git_fallback && !$maintained) ||
+ $email_git_blame) {
+ print STDERR "auth sign";
+ }
+ print STDERR "\n";
+ foreach my $entry (@list) {
+ my $email = $entry->[0];
+ my $role = $entry->[1];
+ my $sel = "";
+ $sel = "*" if ($selected{$count});
+ my $commit_author = $commit_author_hash{$email};
+ my $commit_signer = $commit_signer_hash{$email};
+ my $authored = 0;
+ my $signed = 0;
+ $authored++ for (@{$commit_author});
+ $signed++ for (@{$commit_signer});
+ printf STDERR "%1s %2d %-65s", $sel, $count + 1, $email;
+ printf STDERR "%4d %4d", $authored, $signed
+ if ($authored > 0 || $signed > 0);
+ printf STDERR "\n %s\n", $role;
+ if ($authored{$count}) {
+ my $commit_author = $commit_author_hash{$email};
+ foreach my $ref (@{$commit_author}) {
+ print STDERR " Author: @{$ref}[1]\n";
+ }
+ }
+ if ($signed{$count}) {
+ my $commit_signer = $commit_signer_hash{$email};
+ foreach my $ref (@{$commit_signer}) {
+ print STDERR " @{$ref}[2]: @{$ref}[1]\n";
+ }
+ }
+
+ $count++;
+ }
+ }
+ my $date_ref = \$email_git_since;
+ $date_ref = \$email_hg_since if (vcs_is_hg());
+ if ($print_options) {
+ $print_options = 0;
+ if (vcs_exists()) {
+ print STDERR <<EOT
+
+Version Control options:
+g use git history [$email_git]
+gf use git-fallback [$email_git_fallback]
+b use git blame [$email_git_blame]
+bs use blame signatures [$email_git_blame_signatures]
+c# minimum commits [$email_git_min_signatures]
+%# min percent [$email_git_min_percent]
+d# history to use [$$date_ref]
+x# max maintainers [$email_git_max_maintainers]
+t all signature types [$email_git_all_signature_types]
+m use .mailmap [$email_use_mailmap]
+EOT
+ }
+ print STDERR <<EOT
+
+Additional options:
+0 toggle all
+tm toggle maintainers
+tg toggle git entries
+tl toggle open list entries
+ts toggle subscriber list entries
+f emails in file [$file_emails]
+k keywords in file [$keywords]
+r remove duplicates [$email_remove_duplicates]
+p# pattern match depth [$pattern_depth]
+EOT
+ }
+ print STDERR
+"\n#(toggle), A#(author), S#(signed) *(all), ^(none), O(options), Y(approve): ";
+
+ my $input = <STDIN>;
+ chomp($input);
+
+ $redraw = 1;
+ my $rerun = 0;
+ my @wish = split(/[, ]+/, $input);
+ foreach my $nr (@wish) {
+ $nr = lc($nr);
+ my $sel = substr($nr, 0, 1);
+ my $str = substr($nr, 1);
+ my $val = 0;
+ $val = $1 if $str =~ /^(\d+)$/;
+
+ if ($sel eq "y") {
+ $interactive = 0;
+ $done = 1;
+ $output_rolestats = 0;
+ $output_roles = 0;
+ last;
+ } elsif ($nr =~ /^\d+$/ && $nr > 0 && $nr <= $count) {
+ $selected{$nr - 1} = !$selected{$nr - 1};
+ } elsif ($sel eq "*" || $sel eq '^') {
+ my $toggle = 0;
+ $toggle = 1 if ($sel eq '*');
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = $toggle;
+ }
+ } elsif ($sel eq "0") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i};
+ }
+ } elsif ($sel eq "t") {
+ if (lc($str) eq "m") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(maintainer|supporter)/i);
+ }
+ } elsif (lc($str) eq "g") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(author|commit|signer)/i);
+ }
+ } elsif (lc($str) eq "l") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(open list)/i);
+ }
+ } elsif (lc($str) eq "s") {
+ for (my $i = 0; $i < $count; $i++) {
+ $selected{$i} = !$selected{$i}
+ if ($list[$i]->[1] =~ /^(subscriber list)/i);
+ }
+ }
+ } elsif ($sel eq "a") {
+ if ($val > 0 && $val <= $count) {
+ $authored{$val - 1} = !$authored{$val - 1};
+ } elsif ($str eq '*' || $str eq '^') {
+ my $toggle = 0;
+ $toggle = 1 if ($str eq '*');
+ for (my $i = 0; $i < $count; $i++) {
+ $authored{$i} = $toggle;
+ }
+ }
+ } elsif ($sel eq "s") {
+ if ($val > 0 && $val <= $count) {
+ $signed{$val - 1} = !$signed{$val - 1};
+ } elsif ($str eq '*' || $str eq '^') {
+ my $toggle = 0;
+ $toggle = 1 if ($str eq '*');
+ for (my $i = 0; $i < $count; $i++) {
+ $signed{$i} = $toggle;
+ }
+ }
+ } elsif ($sel eq "o") {
+ $print_options = 1;
+ $redraw = 1;
+ } elsif ($sel eq "g") {
+ if ($str eq "f") {
+ bool_invert(\$email_git_fallback);
+ } else {
+ bool_invert(\$email_git);
+ }
+ $rerun = 1;
+ } elsif ($sel eq "b") {
+ if ($str eq "s") {
+ bool_invert(\$email_git_blame_signatures);
+ } else {
+ bool_invert(\$email_git_blame);
+ }
+ $rerun = 1;
+ } elsif ($sel eq "c") {
+ if ($val > 0) {
+ $email_git_min_signatures = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "x") {
+ if ($val > 0) {
+ $email_git_max_maintainers = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "%") {
+ if ($str ne "" && $val >= 0) {
+ $email_git_min_percent = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "d") {
+ if (vcs_is_git()) {
+ $email_git_since = $str;
+ } elsif (vcs_is_hg()) {
+ $email_hg_since = $str;
+ }
+ $rerun = 1;
+ } elsif ($sel eq "t") {
+ bool_invert(\$email_git_all_signature_types);
+ $rerun = 1;
+ } elsif ($sel eq "f") {
+ bool_invert(\$file_emails);
+ $rerun = 1;
+ } elsif ($sel eq "r") {
+ bool_invert(\$email_remove_duplicates);
+ $rerun = 1;
+ } elsif ($sel eq "m") {
+ bool_invert(\$email_use_mailmap);
+ read_mailmap();
+ $rerun = 1;
+ } elsif ($sel eq "k") {
+ bool_invert(\$keywords);
+ $rerun = 1;
+ } elsif ($sel eq "p") {
+ if ($str ne "" && $val >= 0) {
+ $pattern_depth = $val;
+ $rerun = 1;
+ }
+ } elsif ($sel eq "h" || $sel eq "?") {
+ print STDERR <<EOT
+
+Interactive mode allows you to select the various maintainers, submitters,
+commit signers and mailing lists that could be CC'd on a patch.
+
+Any *'d entry is selected.
+
+If you have git or hg installed, you can choose to summarize the commit
+history of files in the patch. Also, each line of the current file can
+be matched to its commit author and that commits signers with blame.
+
+Various knobs exist to control the length of time for active commit
+tracking, the maximum number of commit authors and signers to add,
+and such.
+
+Enter selections at the prompt until you are satisfied that the selected
+maintainers are appropriate. You may enter multiple selections separated
+by either commas or spaces.
+
+EOT
+ } else {
+ print STDERR "invalid option: '$nr'\n";
+ $redraw = 0;
+ }
+ }
+ if ($rerun) {
+ print STDERR "git-blame can be very slow, please have patience..."
+ if ($email_git_blame);
+ goto &get_maintainers;
+ }
+ }
+
+ #drop not selected entries
+ $count = 0;
+ my @new_emailto = ();
+ foreach my $entry (@list) {
+ if ($selected{$count}) {
+ push(@new_emailto, $list[$count]);
+ }
+ $count++;
+ }
+ return @new_emailto;
+}
+
+sub bool_invert {
+ my ($bool_ref) = @_;
+
+ if ($$bool_ref) {
+ $$bool_ref = 0;
+ } else {
+ $$bool_ref = 1;
+ }
+}
+
+sub deduplicate_email {
+ my ($email) = @_;
+
+ my $matched = 0;
+ my ($name, $address) = parse_email($email);
+ $email = format_email($name, $address, 1);
+ $email = mailmap_email($email);
+
+ return $email if (!$email_remove_duplicates);
+
+ ($name, $address) = parse_email($email);
+
+ if ($name ne "" && $deduplicate_name_hash{lc($name)}) {
+ $name = $deduplicate_name_hash{lc($name)}->[0];
+ $address = $deduplicate_name_hash{lc($name)}->[1];
+ $matched = 1;
+ } elsif ($deduplicate_address_hash{lc($address)}) {
+ $name = $deduplicate_address_hash{lc($address)}->[0];
+ $address = $deduplicate_address_hash{lc($address)}->[1];
+ $matched = 1;
+ }
+ if (!$matched) {
+ $deduplicate_name_hash{lc($name)} = [ $name, $address ];
+ $deduplicate_address_hash{lc($address)} = [ $name, $address ];
+ }
+ $email = format_email($name, $address, 1);
+ $email = mailmap_email($email);
+ return $email;
+}
+
+sub save_commits_by_author {
+ my (@lines) = @_;
+
+ my @authors = ();
+ my @commits = ();
+ my @subjects = ();
+
+ foreach my $line (@lines) {
+ if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+ my $author = $1;
+ $author = deduplicate_email($author);
+ push(@authors, $author);
+ }
+ push(@commits, $1) if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
+ push(@subjects, $1) if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
+ }
+
+ for (my $i = 0; $i < @authors; $i++) {
+ my $exists = 0;
+ foreach my $ref(@{$commit_author_hash{$authors[$i]}}) {
+ if (@{$ref}[0] eq $commits[$i] &&
+ @{$ref}[1] eq $subjects[$i]) {
+ $exists = 1;
+ last;
+ }
+ }
+ if (!$exists) {
+ push(@{$commit_author_hash{$authors[$i]}},
+ [ ($commits[$i], $subjects[$i]) ]);
+ }
+ }
+}
+
+sub save_commits_by_signer {
+ my (@lines) = @_;
+
+ my $commit = "";
+ my $subject = "";
+
+ foreach my $line (@lines) {
+ $commit = $1 if ($line =~ m/$VCS_cmds{"commit_pattern"}/);
+ $subject = $1 if ($line =~ m/$VCS_cmds{"subject_pattern"}/);
+ if ($line =~ /^[ \t]*${signature_pattern}.*\@.*$/) {
+ my @signatures = ($line);
+ my ($types_ref, $signers_ref) = extract_formatted_signatures(@signatures);
+ my @types = @$types_ref;
+ my @signers = @$signers_ref;
+
+ my $type = $types[0];
+ my $signer = $signers[0];
+
+ $signer = deduplicate_email($signer);
+
+ my $exists = 0;
+ foreach my $ref(@{$commit_signer_hash{$signer}}) {
+ if (@{$ref}[0] eq $commit &&
+ @{$ref}[1] eq $subject &&
+ @{$ref}[2] eq $type) {
+ $exists = 1;
+ last;
+ }
+ }
+ if (!$exists) {
+ push(@{$commit_signer_hash{$signer}},
+ [ ($commit, $subject, $type) ]);
+ }
+ }
+ }
+}
+
+sub vcs_assign {
+ my ($role, $divisor, @lines) = @_;
+
+ my %hash;
+ my $count = 0;
+
+ return if (@lines <= 0);
+
+ if ($divisor <= 0) {
+ warn("Bad divisor in " . (caller(0))[3] . ": $divisor\n");
+ $divisor = 1;
+ }
+
+ @lines = mailmap(@lines);
+
+ return if (@lines <= 0);
+
+ @lines = sort(@lines);
+
+ # uniq -c
+ $hash{$_}++ for @lines;
+
+ # sort -rn
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ my $sign_offs = $hash{$line};
+ my $percent = $sign_offs * 100 / $divisor;
+
+ $percent = 100 if ($percent > 100);
+ $count++;
+ last if ($sign_offs < $email_git_min_signatures ||
+ $count > $email_git_max_maintainers ||
+ $percent < $email_git_min_percent);
+ push_email_address($line, '');
+ if ($output_rolestats) {
+ my $fmt_percent = sprintf("%.0f", $percent);
+ add_role($line, "$role:$sign_offs/$divisor=$fmt_percent%");
+ } else {
+ add_role($line, $role);
+ }
+ }
+}
+
+sub vcs_file_signoffs {
+ my ($file) = @_;
+
+ my @signers = ();
+ my $commits;
+
+ $vcs_used = vcs_exists();
+ return if (!$vcs_used);
+
+ my $cmd = $VCS_cmds{"find_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; # interpolate $cmd
+
+ ($commits, @signers) = vcs_find_signers($cmd);
+
+ foreach my $signer (@signers) {
+ $signer = deduplicate_email($signer);
+ }
+
+ vcs_assign("commit_signer", $commits, @signers);
+}
+
+sub vcs_file_blame {
+ my ($file) = @_;
+
+ my @signers = ();
+ my @all_commits = ();
+ my @commits = ();
+ my $total_commits;
+ my $total_lines;
+
+ $vcs_used = vcs_exists();
+ return if (!$vcs_used);
+
+ @all_commits = vcs_blame($file);
+ @commits = uniq(@all_commits);
+ $total_commits = @commits;
+ $total_lines = @all_commits;
+
+ if ($email_git_blame_signatures) {
+ if (vcs_is_hg()) {
+ my $commit_count;
+ my @commit_signers = ();
+ my $commit = join(" -r ", @commits);
+ my $cmd;
+
+ $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
+
+ ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+
+ push(@signers, @commit_signers);
+ } else {
+ foreach my $commit (@commits) {
+ my $commit_count;
+ my @commit_signers = ();
+ my $cmd;
+
+ $cmd = $VCS_cmds{"find_commit_signers_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
+
+ ($commit_count, @commit_signers) = vcs_find_signers($cmd);
+
+ push(@signers, @commit_signers);
+ }
+ }
+ }
+
+ if ($from_filename) {
+ if ($output_rolestats) {
+ my @blame_signers;
+ if (vcs_is_hg()) {{ # Double brace for last exit
+ my $commit_count;
+ my @commit_signers = ();
+ @commits = uniq(@commits);
+ @commits = sort(@commits);
+ my $commit = join(" -r ", @commits);
+ my $cmd;
+
+ $cmd = $VCS_cmds{"find_commit_author_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #substitute variables in $cmd
+
+ my @lines = ();
+
+ @lines = &{$VCS_cmds{"execute_cmd"}}($cmd);
+
+ last if !@lines;
+
+ my @authors = ();
+ foreach my $line (@lines) {
+ if ($line =~ m/$VCS_cmds{"author_pattern"}/) {
+ my $author = $1;
+ $author = deduplicate_email($author);
+ push(@authors, $author);
+ }
+ }
+
+ save_commits_by_author(@lines) if ($interactive);
+ save_commits_by_signer(@lines) if ($interactive);
+
+ push(@signers, @authors);
+ }}
+ else {
+ foreach my $commit (@commits) {
+ my $i;
+ my $cmd = $VCS_cmds{"find_commit_author_cmd"};
+ $cmd =~ s/(\$\w+)/$1/eeg; #interpolate $cmd
+ my @author = vcs_find_author($cmd);
+ next if !@author;
+
+ my $formatted_author = deduplicate_email($author[0]);
+
+ my $count = grep(/$commit/, @all_commits);
+ for ($i = 0; $i < $count ; $i++) {
+ push(@blame_signers, $formatted_author);
+ }
+ }
+ }
+ if (@blame_signers) {
+ vcs_assign("authored lines", $total_lines, @blame_signers);
+ }
+ }
+ foreach my $signer (@signers) {
+ $signer = deduplicate_email($signer);
+ }
+ vcs_assign("commits", $total_commits, @signers);
+ } else {
+ foreach my $signer (@signers) {
+ $signer = deduplicate_email($signer);
+ }
+ vcs_assign("modified commits", $total_commits, @signers);
+ }
+}
+
+sub uniq {
+ my (@parms) = @_;
+
+ my %saw;
+ @parms = grep(!$saw{$_}++, @parms);
+ return @parms;
+}
+
+sub sort_and_uniq {
+ my (@parms) = @_;
+
+ my %saw;
+ @parms = sort @parms;
+ @parms = grep(!$saw{$_}++, @parms);
+ return @parms;
+}
+
+sub clean_file_emails {
+ my (@file_emails) = @_;
+ my @fmt_emails = ();
+
+ foreach my $email (@file_emails) {
+ $email =~ s/[\(\<\{]{0,1}([A-Za-z0-9_\.\+-]+\@[A-Za-z0-9\.-]+)[\)\>\}]{0,1}/\<$1\>/g;
+ my ($name, $address) = parse_email($email);
+ if ($name eq '"[,\.]"') {
+ $name = "";
+ }
+
+ my @nw = split(/[^A-Za-zÀ-ÿ\'\,\.\+-]/, $name);
+ if (@nw > 2) {
+ my $first = $nw[@nw - 3];
+ my $middle = $nw[@nw - 2];
+ my $last = $nw[@nw - 1];
+
+ if (((length($first) == 1 && $first =~ m/[A-Za-z]/) ||
+ (length($first) == 2 && substr($first, -1) eq ".")) ||
+ (length($middle) == 1 ||
+ (length($middle) == 2 && substr($middle, -1) eq "."))) {
+ $name = "$first $middle $last";
+ } else {
+ $name = "$middle $last";
+ }
+ }
+
+ if (substr($name, -1) =~ /[,\.]/) {
+ $name = substr($name, 0, length($name) - 1);
+ } elsif (substr($name, -2) =~ /[,\.]"/) {
+ $name = substr($name, 0, length($name) - 2) . '"';
+ }
+
+ if (substr($name, 0, 1) =~ /[,\.]/) {
+ $name = substr($name, 1, length($name) - 1);
+ } elsif (substr($name, 0, 2) =~ /"[,\.]/) {
+ $name = '"' . substr($name, 2, length($name) - 2);
+ }
+
+ my $fmt_email = format_email($name, $address, $email_usename);
+ push(@fmt_emails, $fmt_email);
+ }
+ return @fmt_emails;
+}
+
+sub merge_email {
+ my @lines;
+ my %saw;
+
+ for (@_) {
+ my ($address, $role) = @$_;
+ if (!$saw{$address}) {
+ if ($output_roles) {
+ push(@lines, "$address ($role)");
+ } else {
+ push(@lines, $address);
+ }
+ $saw{$address} = 1;
+ }
+ }
+
+ return @lines;
+}
+
+sub output {
+ my (@parms) = @_;
+
+ if ($output_multiline) {
+ foreach my $line (@parms) {
+ print("${line}\n");
+ }
+ } else {
+ print(join($output_separator, @parms));
+ print("\n");
+ }
+}
+
+my $rfc822re;
+
+sub make_rfc822re {
+# Basic lexical tokens are specials, domain_literal, quoted_string, atom, and
+# comment. We must allow for rfc822_lwsp (or comments) after each of these.
+# This regexp will only work on addresses which have had comments stripped
+# and replaced with rfc822_lwsp.
+
+ my $specials = '()<>@,;:\\\\".\\[\\]';
+ my $controls = '\\000-\\037\\177';
+
+ my $dtext = "[^\\[\\]\\r\\\\]";
+ my $domain_literal = "\\[(?:$dtext|\\\\.)*\\]$rfc822_lwsp*";
+
+ my $quoted_string = "\"(?:[^\\\"\\r\\\\]|\\\\.|$rfc822_lwsp)*\"$rfc822_lwsp*";
+
+# Use zero-width assertion to spot the limit of an atom. A simple
+# $rfc822_lwsp* causes the regexp engine to hang occasionally.
+ my $atom = "[^$specials $controls]+(?:$rfc822_lwsp+|\\Z|(?=[\\[\"$specials]))";
+ my $word = "(?:$atom|$quoted_string)";
+ my $localpart = "$word(?:\\.$rfc822_lwsp*$word)*";
+
+ my $sub_domain = "(?:$atom|$domain_literal)";
+ my $domain = "$sub_domain(?:\\.$rfc822_lwsp*$sub_domain)*";
+
+ my $addr_spec = "$localpart\@$rfc822_lwsp*$domain";
+
+ my $phrase = "$word*";
+ my $route = "(?:\@$domain(?:,\@$rfc822_lwsp*$domain)*:$rfc822_lwsp*)";
+ my $route_addr = "\\<$rfc822_lwsp*$route?$addr_spec\\>$rfc822_lwsp*";
+ my $mailbox = "(?:$addr_spec|$phrase$route_addr)";
+
+ my $group = "$phrase:$rfc822_lwsp*(?:$mailbox(?:,\\s*$mailbox)*)?;\\s*";
+ my $address = "(?:$mailbox|$group)";
+
+ return "$rfc822_lwsp*$address";
+}
+
+sub rfc822_strip_comments {
+ my $s = shift;
+# Recursively remove comments, and replace with a single space. The simpler
+# regexps in the Email Addressing FAQ are imperfect - they will miss escaped
+# chars in atoms, for example.
+
+ while ($s =~ s/^((?:[^"\\]|\\.)*
+ (?:"(?:[^"\\]|\\.)*"(?:[^"\\]|\\.)*)*)
+ \((?:[^()\\]|\\.)*\)/$1 /osx) {}
+ return $s;
+}
+
+# valid: returns true if the parameter is an RFC822 valid address
+#
+sub rfc822_valid {
+ my $s = rfc822_strip_comments(shift);
+
+ if (!$rfc822re) {
+ $rfc822re = make_rfc822re();
+ }
+
+ return $s =~ m/^$rfc822re$/so && $s =~ m/^$rfc822_char*$/;
+}
+
+# validlist: In scalar context, returns true if the parameter is an RFC822
+# valid list of addresses.
+#
+# In list context, returns an empty list on failure (an invalid
+# address was found); otherwise a list whose first element is the
+# number of addresses found and whose remaining elements are the
+# addresses. This is needed to disambiguate failure (invalid)
+# from success with no addresses found, because an empty string is
+# a valid list.
+
+sub rfc822_validlist {
+ my $s = rfc822_strip_comments(shift);
+
+ if (!$rfc822re) {
+ $rfc822re = make_rfc822re();
+ }
+ # * null list items are valid according to the RFC
+ # * the '1' business is to aid in distinguishing failure from no results
+
+ my @r;
+ if ($s =~ m/^(?:$rfc822re)?(?:,(?:$rfc822re)?)*$/so &&
+ $s =~ m/^$rfc822_char*$/) {
+ while ($s =~ m/(?:^|,$rfc822_lwsp*)($rfc822re)/gos) {
+ push(@r, $1);
+ }
+ return wantarray ? (scalar(@r), @r) : 1;
+ }
+ return wantarray ? () : 0;
+}
diff --git a/scripts/gtester-cat b/scripts/gtester-cat
new file mode 100755
index 00000000..061a952c
--- /dev/null
+++ b/scripts/gtester-cat
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+# Copyright IBM, Corp. 2012
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPLv2 or later.
+# See the COPYING file in the top-level directory.
+
+cat <<EOF
+<?xml version="1.0"?>
+<gtester>
+ <info>
+ <package>qemu</package>
+ <version>0.0</version>
+ <revision>rev</revision>
+ </info>
+EOF
+
+sed \
+ -e '/<?xml/d' \
+ -e '/^<gtester>$/d' \
+ -e '/<info>/,/<\/info>/d' \
+ -e '$b' \
+ -e '/^<\/gtester>$/d' "$@"
diff --git a/scripts/hxtool b/scripts/hxtool
new file mode 100644
index 00000000..995bb7f0
--- /dev/null
+++ b/scripts/hxtool
@@ -0,0 +1,105 @@
+#!/bin/sh
+
+hxtoh()
+{
+ flag=1
+ while read -r str; do
+ case $str in
+ HXCOMM*)
+ ;;
+ STEXI*|ETEXI*|SQMP*|EQMP*) flag=$(($flag^1))
+ ;;
+ *)
+ test $flag -eq 1 && printf "%s\n" "$str"
+ ;;
+ esac
+ done
+}
+
+hxtotexi()
+{
+ flag=0
+ line=1
+ while read -r str; do
+ case "$str" in
+ HXCOMM*)
+ ;;
+ STEXI*)
+ if test $flag -eq 1 ; then
+ echo "line $line: syntax error: expected ETEXI, found $str" >&2
+ exit 1
+ fi
+ flag=1
+ ;;
+ ETEXI*)
+ if test $flag -ne 1 ; then
+ echo "line $line: syntax error: expected STEXI, found $str" >&2
+ exit 1
+ fi
+ flag=0
+ ;;
+ SQMP*|EQMP*)
+ if test $flag -eq 1 ; then
+ echo "line $line: syntax error: expected ETEXI, found $str" >&2
+ exit 1
+ fi
+ ;;
+ DEFHEADING*)
+ echo "$(expr "$str" : "DEFHEADING(\(.*\))")"
+ ;;
+ ARCHHEADING*)
+ echo "$(expr "$str" : "ARCHHEADING(\(.*\),.*)")"
+ ;;
+ *)
+ test $flag -eq 1 && echo "$str"
+ ;;
+ esac
+ line=$((line+1))
+ done
+}
+
+hxtoqmp()
+{
+ IFS=
+ flag=0
+ line=1
+ while read -r str; do
+ case "$str" in
+ HXCOMM*)
+ ;;
+ SQMP*)
+ if test $flag -eq 1 ; then
+ echo "line $line: syntax error: expected EQMP, found $str" >&2
+ exit 1
+ fi
+ flag=1
+ ;;
+ EQMP*)
+ if test $flag -ne 1 ; then
+ echo "line $line: syntax error: expected SQMP, found $str" >&2
+ exit 1
+ fi
+ flag=0
+ ;;
+ STEXI*|ETEXI*)
+ if test $flag -eq 1 ; then
+ echo "line $line: syntax error: expected EQMP, found $str" >&2
+ exit 1
+ fi
+ ;;
+ *)
+ test $flag -eq 1 && echo "$str"
+ ;;
+ esac
+ line=$((line+1))
+ done
+}
+
+case "$1" in
+"-h") hxtoh ;;
+"-t") hxtotexi ;;
+"-q") hxtoqmp ;;
+*) exit 1 ;;
+esac
+
+exit 0
diff --git a/scripts/kvm/kvm_flightrecorder b/scripts/kvm/kvm_flightrecorder
new file mode 100755
index 00000000..7fb1c2d1
--- /dev/null
+++ b/scripts/kvm/kvm_flightrecorder
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# KVM Flight Recorder - ring buffer tracing script
+#
+# Copyright (C) 2012 IBM Corp
+#
+# Author: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+#
+# This script provides a command-line interface to kvm ftrace and is designed
+# to be used as a flight recorder that is always running. To start in-memory
+# recording:
+#
+# sudo kvm_flightrecorder start 8192 # 8 MB per-cpu ring buffers
+#
+# The per-cpu ring buffer size can be given in KB as an optional argument to
+# the 'start' subcommand.
+#
+# To stop the flight recorder:
+#
+# sudo kvm_flightrecorder stop
+#
+# To dump the contents of the flight recorder (this can be done when the
+# recorder is stopped or while it is running):
+#
+# sudo kvm_flightrecorder dump >/path/to/dump.txt
+#
+# To observe the trace while it is running, use the 'tail' subcommand:
+#
+# sudo kvm_flightrecorder tail
+#
+# Note that the flight recorder may impact overall system performance by
+# consuming CPU cycles. No disk I/O is performed since the ring buffer holds a
+# fixed-size in-memory trace.
+
+import sys
+import os
+
+tracing_dir = '/sys/kernel/debug/tracing'
+
+def trace_path(*args):
+ return os.path.join(tracing_dir, *args)
+
+def write_file(path, data):
+ open(path, 'wb').write(data)
+
+def enable_event(subsystem, event, enable):
+ write_file(trace_path('events', subsystem, event, 'enable'), '1' if enable else '0')
+
+def enable_subsystem(subsystem, enable):
+ write_file(trace_path('events', subsystem, 'enable'), '1' if enable else '0')
+
+def start_tracing():
+ enable_subsystem('kvm', True)
+ write_file(trace_path('tracing_on'), '1')
+
+def stop_tracing():
+ write_file(trace_path('tracing_on'), '0')
+ enable_subsystem('kvm', False)
+ write_file(trace_path('events', 'enable'), '0')
+ write_file(trace_path('current_tracer'), 'nop')
+
+def dump_trace():
+ tracefile = open(trace_path('trace'), 'r')
+ try:
+ lines = True
+ while lines:
+ lines = tracefile.readlines(64 * 1024)
+ sys.stdout.writelines(lines)
+ except KeyboardInterrupt:
+ pass
+
+def tail_trace():
+ try:
+ for line in open(trace_path('trace_pipe'), 'r'):
+ sys.stdout.write(line)
+ except KeyboardInterrupt:
+ pass
+
+def usage():
+ print 'Usage: %s start [buffer_size_kb] | stop | dump | tail' % sys.argv[0]
+ print 'Control the KVM flight recorder tracing.'
+ sys.exit(0)
+
+def main():
+ if len(sys.argv) < 2:
+ usage()
+
+ cmd = sys.argv[1]
+ if cmd == '--version':
+ print 'kvm_flightrecorder version 1.0'
+ sys.exit(0)
+
+ if not os.path.isdir(tracing_dir):
+ print 'Unable to tracing debugfs directory, try:'
+ print 'mount -t debugfs none /sys/kernel/debug'
+ sys.exit(1)
+ if not os.access(tracing_dir, os.W_OK):
+ print 'Unable to write to tracing debugfs directory, please run as root'
+ sys.exit(1)
+
+ if cmd == 'start':
+ stop_tracing() # clean up first
+
+ if len(sys.argv) == 3:
+ try:
+ buffer_size_kb = int(sys.argv[2])
+ except ValueError:
+ print 'Invalid per-cpu trace buffer size in KB'
+ sys.exit(1)
+ write_file(trace_path('buffer_size_kb'), str(buffer_size_kb))
+ print 'Per-CPU ring buffer size set to %d KB' % buffer_size_kb
+
+ start_tracing()
+ print 'KVM flight recorder enabled'
+ elif cmd == 'stop':
+ stop_tracing()
+ print 'KVM flight recorder disabled'
+ elif cmd == 'dump':
+ dump_trace()
+ elif cmd == 'tail':
+ tail_trace()
+ else:
+ usage()
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/scripts/kvm/kvm_stat b/scripts/kvm/kvm_stat
new file mode 100755
index 00000000..7e5d2561
--- /dev/null
+++ b/scripts/kvm/kvm_stat
@@ -0,0 +1,646 @@
+#!/usr/bin/python
+#
+# top-like utility for displaying kvm statistics
+#
+# Copyright 2006-2008 Qumranet Technologies
+# Copyright 2008-2011 Red Hat, Inc.
+#
+# Authors:
+# Avi Kivity <avi@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+
+import curses
+import sys, os, time, optparse, ctypes
+from ctypes import *
+
+class DebugfsProvider(object):
+ def __init__(self):
+ self.base = '/sys/kernel/debug/kvm'
+ self._fields = os.listdir(self.base)
+ def fields(self):
+ return self._fields
+ def select(self, fields):
+ self._fields = fields
+ def read(self):
+ def val(key):
+ return int(file(self.base + '/' + key).read())
+ return dict([(key, val(key)) for key in self._fields])
+
+vmx_exit_reasons = {
+ 0: 'EXCEPTION_NMI',
+ 1: 'EXTERNAL_INTERRUPT',
+ 2: 'TRIPLE_FAULT',
+ 7: 'PENDING_INTERRUPT',
+ 8: 'NMI_WINDOW',
+ 9: 'TASK_SWITCH',
+ 10: 'CPUID',
+ 12: 'HLT',
+ 14: 'INVLPG',
+ 15: 'RDPMC',
+ 16: 'RDTSC',
+ 18: 'VMCALL',
+ 19: 'VMCLEAR',
+ 20: 'VMLAUNCH',
+ 21: 'VMPTRLD',
+ 22: 'VMPTRST',
+ 23: 'VMREAD',
+ 24: 'VMRESUME',
+ 25: 'VMWRITE',
+ 26: 'VMOFF',
+ 27: 'VMON',
+ 28: 'CR_ACCESS',
+ 29: 'DR_ACCESS',
+ 30: 'IO_INSTRUCTION',
+ 31: 'MSR_READ',
+ 32: 'MSR_WRITE',
+ 33: 'INVALID_STATE',
+ 36: 'MWAIT_INSTRUCTION',
+ 39: 'MONITOR_INSTRUCTION',
+ 40: 'PAUSE_INSTRUCTION',
+ 41: 'MCE_DURING_VMENTRY',
+ 43: 'TPR_BELOW_THRESHOLD',
+ 44: 'APIC_ACCESS',
+ 48: 'EPT_VIOLATION',
+ 49: 'EPT_MISCONFIG',
+ 54: 'WBINVD',
+ 55: 'XSETBV',
+ 56: 'APIC_WRITE',
+ 58: 'INVPCID',
+}
+
+svm_exit_reasons = {
+ 0x000: 'READ_CR0',
+ 0x003: 'READ_CR3',
+ 0x004: 'READ_CR4',
+ 0x008: 'READ_CR8',
+ 0x010: 'WRITE_CR0',
+ 0x013: 'WRITE_CR3',
+ 0x014: 'WRITE_CR4',
+ 0x018: 'WRITE_CR8',
+ 0x020: 'READ_DR0',
+ 0x021: 'READ_DR1',
+ 0x022: 'READ_DR2',
+ 0x023: 'READ_DR3',
+ 0x024: 'READ_DR4',
+ 0x025: 'READ_DR5',
+ 0x026: 'READ_DR6',
+ 0x027: 'READ_DR7',
+ 0x030: 'WRITE_DR0',
+ 0x031: 'WRITE_DR1',
+ 0x032: 'WRITE_DR2',
+ 0x033: 'WRITE_DR3',
+ 0x034: 'WRITE_DR4',
+ 0x035: 'WRITE_DR5',
+ 0x036: 'WRITE_DR6',
+ 0x037: 'WRITE_DR7',
+ 0x040: 'EXCP_BASE',
+ 0x060: 'INTR',
+ 0x061: 'NMI',
+ 0x062: 'SMI',
+ 0x063: 'INIT',
+ 0x064: 'VINTR',
+ 0x065: 'CR0_SEL_WRITE',
+ 0x066: 'IDTR_READ',
+ 0x067: 'GDTR_READ',
+ 0x068: 'LDTR_READ',
+ 0x069: 'TR_READ',
+ 0x06a: 'IDTR_WRITE',
+ 0x06b: 'GDTR_WRITE',
+ 0x06c: 'LDTR_WRITE',
+ 0x06d: 'TR_WRITE',
+ 0x06e: 'RDTSC',
+ 0x06f: 'RDPMC',
+ 0x070: 'PUSHF',
+ 0x071: 'POPF',
+ 0x072: 'CPUID',
+ 0x073: 'RSM',
+ 0x074: 'IRET',
+ 0x075: 'SWINT',
+ 0x076: 'INVD',
+ 0x077: 'PAUSE',
+ 0x078: 'HLT',
+ 0x079: 'INVLPG',
+ 0x07a: 'INVLPGA',
+ 0x07b: 'IOIO',
+ 0x07c: 'MSR',
+ 0x07d: 'TASK_SWITCH',
+ 0x07e: 'FERR_FREEZE',
+ 0x07f: 'SHUTDOWN',
+ 0x080: 'VMRUN',
+ 0x081: 'VMMCALL',
+ 0x082: 'VMLOAD',
+ 0x083: 'VMSAVE',
+ 0x084: 'STGI',
+ 0x085: 'CLGI',
+ 0x086: 'SKINIT',
+ 0x087: 'RDTSCP',
+ 0x088: 'ICEBP',
+ 0x089: 'WBINVD',
+ 0x08a: 'MONITOR',
+ 0x08b: 'MWAIT',
+ 0x08c: 'MWAIT_COND',
+ 0x08d: 'XSETBV',
+ 0x400: 'NPF',
+}
+
+# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h)
+aarch64_exit_reasons = {
+ 0x00: 'UNKNOWN',
+ 0x01: 'WFI',
+ 0x03: 'CP15_32',
+ 0x04: 'CP15_64',
+ 0x05: 'CP14_MR',
+ 0x06: 'CP14_LS',
+ 0x07: 'FP_ASIMD',
+ 0x08: 'CP10_ID',
+ 0x0C: 'CP14_64',
+ 0x0E: 'ILL_ISS',
+ 0x11: 'SVC32',
+ 0x12: 'HVC32',
+ 0x13: 'SMC32',
+ 0x15: 'SVC64',
+ 0x16: 'HVC64',
+ 0x17: 'SMC64',
+ 0x18: 'SYS64',
+ 0x20: 'IABT',
+ 0x21: 'IABT_HYP',
+ 0x22: 'PC_ALIGN',
+ 0x24: 'DABT',
+ 0x25: 'DABT_HYP',
+ 0x26: 'SP_ALIGN',
+ 0x28: 'FP_EXC32',
+ 0x2C: 'FP_EXC64',
+ 0x2F: 'SERROR',
+ 0x30: 'BREAKPT',
+ 0x31: 'BREAKPT_HYP',
+ 0x32: 'SOFTSTP',
+ 0x33: 'SOFTSTP_HYP',
+ 0x34: 'WATCHPT',
+ 0x35: 'WATCHPT_HYP',
+ 0x38: 'BKPT32',
+ 0x3A: 'VECTOR32',
+ 0x3C: 'BRK64',
+}
+
+# From include/uapi/linux/kvm.h, KVM_EXIT_xxx
+userspace_exit_reasons = {
+ 0: 'UNKNOWN',
+ 1: 'EXCEPTION',
+ 2: 'IO',
+ 3: 'HYPERCALL',
+ 4: 'DEBUG',
+ 5: 'HLT',
+ 6: 'MMIO',
+ 7: 'IRQ_WINDOW_OPEN',
+ 8: 'SHUTDOWN',
+ 9: 'FAIL_ENTRY',
+ 10: 'INTR',
+ 11: 'SET_TPR',
+ 12: 'TPR_ACCESS',
+ 13: 'S390_SIEIC',
+ 14: 'S390_RESET',
+ 15: 'DCR',
+ 16: 'NMI',
+ 17: 'INTERNAL_ERROR',
+ 18: 'OSI',
+ 19: 'PAPR_HCALL',
+ 20: 'S390_UCONTROL',
+ 21: 'WATCHDOG',
+ 22: 'S390_TSCH',
+ 23: 'EPR',
+ 24: 'SYSTEM_EVENT',
+}
+
+x86_exit_reasons = {
+ 'vmx': vmx_exit_reasons,
+ 'svm': svm_exit_reasons,
+}
+
+sc_perf_evt_open = None
+exit_reasons = None
+
+ioctl_numbers = {
+ 'SET_FILTER' : 0x40082406,
+ 'ENABLE' : 0x00002400,
+ 'DISABLE' : 0x00002401,
+ 'RESET' : 0x00002403,
+}
+
+def x86_init(flag):
+ globals().update({
+ 'sc_perf_evt_open' : 298,
+ 'exit_reasons' : x86_exit_reasons[flag],
+ })
+
+def s390_init():
+ globals().update({
+ 'sc_perf_evt_open' : 331
+ })
+
+def ppc_init():
+ globals().update({
+ 'sc_perf_evt_open' : 319,
+ 'ioctl_numbers' : {
+ 'SET_FILTER' : 0x80002406 | (ctypes.sizeof(ctypes.c_char_p) << 16),
+ 'ENABLE' : 0x20002400,
+ 'DISABLE' : 0x20002401,
+ }
+ })
+
+def aarch64_init():
+ globals().update({
+ 'sc_perf_evt_open' : 241,
+ 'exit_reasons' : aarch64_exit_reasons,
+ })
+
+def detect_platform():
+ if os.uname()[4].startswith('ppc'):
+ ppc_init()
+ return
+ elif os.uname()[4].startswith('aarch64'):
+ aarch64_init()
+ return
+
+ for line in file('/proc/cpuinfo').readlines():
+ if line.startswith('flags'):
+ for flag in line.split():
+ if flag in x86_exit_reasons:
+ x86_init(flag)
+ return
+ elif line.startswith('vendor_id'):
+ for flag in line.split():
+ if flag == 'IBM/S390':
+ s390_init()
+ return
+
+detect_platform()
+
+def invert(d):
+ return dict((x[1], x[0]) for x in d.iteritems())
+
+filters = {}
+filters['kvm_userspace_exit'] = ('reason', invert(userspace_exit_reasons))
+if exit_reasons:
+ filters['kvm_exit'] = ('exit_reason', invert(exit_reasons))
+
+import struct, array
+
+libc = ctypes.CDLL('libc.so.6')
+syscall = libc.syscall
+get_errno = libc.__errno_location
+get_errno.restype = POINTER(c_int)
+
+class perf_event_attr(ctypes.Structure):
+ _fields_ = [('type', ctypes.c_uint32),
+ ('size', ctypes.c_uint32),
+ ('config', ctypes.c_uint64),
+ ('sample_freq', ctypes.c_uint64),
+ ('sample_type', ctypes.c_uint64),
+ ('read_format', ctypes.c_uint64),
+ ('flags', ctypes.c_uint64),
+ ('wakeup_events', ctypes.c_uint32),
+ ('bp_type', ctypes.c_uint32),
+ ('bp_addr', ctypes.c_uint64),
+ ('bp_len', ctypes.c_uint64),
+ ]
+def _perf_event_open(attr, pid, cpu, group_fd, flags):
+ return syscall(sc_perf_evt_open, ctypes.pointer(attr), ctypes.c_int(pid),
+ ctypes.c_int(cpu), ctypes.c_int(group_fd),
+ ctypes.c_long(flags))
+
+PERF_TYPE_HARDWARE = 0
+PERF_TYPE_SOFTWARE = 1
+PERF_TYPE_TRACEPOINT = 2
+PERF_TYPE_HW_CACHE = 3
+PERF_TYPE_RAW = 4
+PERF_TYPE_BREAKPOINT = 5
+
+PERF_SAMPLE_IP = 1 << 0
+PERF_SAMPLE_TID = 1 << 1
+PERF_SAMPLE_TIME = 1 << 2
+PERF_SAMPLE_ADDR = 1 << 3
+PERF_SAMPLE_READ = 1 << 4
+PERF_SAMPLE_CALLCHAIN = 1 << 5
+PERF_SAMPLE_ID = 1 << 6
+PERF_SAMPLE_CPU = 1 << 7
+PERF_SAMPLE_PERIOD = 1 << 8
+PERF_SAMPLE_STREAM_ID = 1 << 9
+PERF_SAMPLE_RAW = 1 << 10
+
+PERF_FORMAT_TOTAL_TIME_ENABLED = 1 << 0
+PERF_FORMAT_TOTAL_TIME_RUNNING = 1 << 1
+PERF_FORMAT_ID = 1 << 2
+PERF_FORMAT_GROUP = 1 << 3
+
+import re
+
+sys_tracing = '/sys/kernel/debug/tracing'
+
+class Group(object):
+ def __init__(self, cpu):
+ self.events = []
+ self.group_leader = None
+ self.cpu = cpu
+ def add_event(self, name, event_set, tracepoint, filter = None):
+ self.events.append(Event(group = self,
+ name = name, event_set = event_set,
+ tracepoint = tracepoint, filter = filter))
+ if len(self.events) == 1:
+ self.file = os.fdopen(self.events[0].fd)
+ def read(self):
+ bytes = 8 * (1 + len(self.events))
+ fmt = 'xxxxxxxx' + 'q' * len(self.events)
+ return dict(zip([event.name for event in self.events],
+ struct.unpack(fmt, self.file.read(bytes))))
+
+class Event(object):
+ def __init__(self, group, name, event_set, tracepoint, filter = None):
+ self.name = name
+ attr = perf_event_attr()
+ attr.type = PERF_TYPE_TRACEPOINT
+ attr.size = ctypes.sizeof(attr)
+ id_path = os.path.join(sys_tracing, 'events', event_set,
+ tracepoint, 'id')
+ id = int(file(id_path).read())
+ attr.config = id
+ attr.sample_type = (PERF_SAMPLE_RAW
+ | PERF_SAMPLE_TIME
+ | PERF_SAMPLE_CPU)
+ attr.sample_period = 1
+ attr.read_format = PERF_FORMAT_GROUP
+ group_leader = -1
+ if group.events:
+ group_leader = group.events[0].fd
+ fd = _perf_event_open(attr, -1, group.cpu, group_leader, 0)
+ if fd == -1:
+ err = get_errno()[0]
+ raise Exception('perf_event_open failed, errno = ' + err.__str__())
+ if filter:
+ import fcntl
+ fcntl.ioctl(fd, ioctl_numbers['SET_FILTER'], filter)
+ self.fd = fd
+ def enable(self):
+ import fcntl
+ fcntl.ioctl(self.fd, ioctl_numbers['ENABLE'], 0)
+ def disable(self):
+ import fcntl
+ fcntl.ioctl(self.fd, ioctl_numbers['DISABLE'], 0)
+ def reset(self):
+ import fcntl
+ fcntl.ioctl(self.fd, ioctl_numbers['RESET'], 0)
+
+class TracepointProvider(object):
+ def __init__(self):
+ path = os.path.join(sys_tracing, 'events', 'kvm')
+ fields = [f
+ for f in os.listdir(path)
+ if os.path.isdir(os.path.join(path, f))]
+ extra = []
+ for f in fields:
+ if f in filters:
+ subfield, values = filters[f]
+ for name, number in values.iteritems():
+ extra.append(f + '(' + name + ')')
+ fields += extra
+ self._setup(fields)
+ self.select(fields)
+ def fields(self):
+ return self._fields
+
+ def _online_cpus(self):
+ l = []
+ pattern = r'cpu([0-9]+)'
+ basedir = '/sys/devices/system/cpu'
+ for entry in os.listdir(basedir):
+ match = re.match(pattern, entry)
+ if not match:
+ continue
+ path = os.path.join(basedir, entry, 'online')
+ if os.path.exists(path) and open(path).read().strip() != '1':
+ continue
+ l.append(int(match.group(1)))
+ return l
+
+ def _setup(self, _fields):
+ self._fields = _fields
+ cpus = self._online_cpus()
+ import resource
+ nfiles = len(cpus) * 1000
+ resource.setrlimit(resource.RLIMIT_NOFILE, (nfiles, nfiles))
+ events = []
+ self.group_leaders = []
+ for cpu in cpus:
+ group = Group(cpu)
+ for name in _fields:
+ tracepoint = name
+ filter = None
+ m = re.match(r'(.*)\((.*)\)', name)
+ if m:
+ tracepoint, sub = m.groups()
+ filter = '%s==%d\0' % (filters[tracepoint][0],
+ filters[tracepoint][1][sub])
+ event = group.add_event(name, event_set = 'kvm',
+ tracepoint = tracepoint,
+ filter = filter)
+ self.group_leaders.append(group)
+ def select(self, fields):
+ for group in self.group_leaders:
+ for event in group.events:
+ if event.name in fields:
+ event.reset()
+ event.enable()
+ else:
+ event.disable()
+ def read(self):
+ from collections import defaultdict
+ ret = defaultdict(int)
+ for group in self.group_leaders:
+ for name, val in group.read().iteritems():
+ ret[name] += val
+ return ret
+
+class Stats:
+ def __init__(self, providers, fields = None):
+ self.providers = providers
+ self.fields_filter = fields
+ self._update()
+ def _update(self):
+ def wanted(key):
+ import re
+ if not self.fields_filter:
+ return True
+ return re.match(self.fields_filter, key) is not None
+ self.values = dict()
+ for d in providers:
+ provider_fields = [key for key in d.fields() if wanted(key)]
+ for key in provider_fields:
+ self.values[key] = None
+ d.select(provider_fields)
+ def set_fields_filter(self, fields_filter):
+ self.fields_filter = fields_filter
+ self._update()
+ def get(self):
+ for d in providers:
+ new = d.read()
+ for key in d.fields():
+ oldval = self.values.get(key, (0, 0))
+ newval = new[key]
+ newdelta = None
+ if oldval is not None:
+ newdelta = newval - oldval[0]
+ self.values[key] = (newval, newdelta)
+ return self.values
+
+if not os.access('/sys/kernel/debug', os.F_OK):
+ print 'Please enable CONFIG_DEBUG_FS in your kernel'
+ sys.exit(1)
+if not os.access('/sys/kernel/debug/kvm', os.F_OK):
+ print "Please mount debugfs ('mount -t debugfs debugfs /sys/kernel/debug')"
+ print "and ensure the kvm modules are loaded"
+ sys.exit(1)
+
+label_width = 40
+number_width = 10
+
+def tui(screen, stats):
+ curses.use_default_colors()
+ curses.noecho()
+ drilldown = False
+ fields_filter = stats.fields_filter
+ def update_drilldown():
+ if not fields_filter:
+ if drilldown:
+ stats.set_fields_filter(None)
+ else:
+ stats.set_fields_filter(r'^[^\(]*$')
+ update_drilldown()
+ def refresh(sleeptime):
+ screen.erase()
+ screen.addstr(0, 0, 'kvm statistics')
+ screen.addstr(2, 1, 'Event')
+ screen.addstr(2, 1 + label_width + number_width - len('Total'), 'Total')
+ screen.addstr(2, 1 + label_width + number_width + 8 - len('Current'), 'Current')
+ row = 3
+ s = stats.get()
+ def sortkey(x):
+ if s[x][1]:
+ return (-s[x][1], -s[x][0])
+ else:
+ return (0, -s[x][0])
+ for key in sorted(s.keys(), key = sortkey):
+ if row >= screen.getmaxyx()[0]:
+ break
+ values = s[key]
+ if not values[0] and not values[1]:
+ break
+ col = 1
+ screen.addstr(row, col, key)
+ col += label_width
+ screen.addstr(row, col, '%10d' % (values[0],))
+ col += number_width
+ if values[1] is not None:
+ screen.addstr(row, col, '%8d' % (values[1] / sleeptime,))
+ row += 1
+ screen.refresh()
+
+ sleeptime = 0.25
+ while True:
+ refresh(sleeptime)
+ curses.halfdelay(int(sleeptime * 10))
+ sleeptime = 3
+ try:
+ c = screen.getkey()
+ if c == 'x':
+ drilldown = not drilldown
+ update_drilldown()
+ if c == 'q':
+ break
+ except KeyboardInterrupt:
+ break
+ except curses.error:
+ continue
+
+def batch(stats):
+ s = stats.get()
+ time.sleep(1)
+ s = stats.get()
+ for key in sorted(s.keys()):
+ values = s[key]
+ print '%-22s%10d%10d' % (key, values[0], values[1])
+
+def log(stats):
+ keys = sorted(stats.get().iterkeys())
+ def banner():
+ for k in keys:
+ print '%10s' % k[0:9],
+ print
+ def statline():
+ s = stats.get()
+ for k in keys:
+ print ' %9d' % s[k][1],
+ print
+ line = 0
+ banner_repeat = 20
+ while True:
+ time.sleep(1)
+ if line % banner_repeat == 0:
+ banner()
+ statline()
+ line += 1
+
+options = optparse.OptionParser()
+options.add_option('-1', '--once', '--batch',
+ action = 'store_true',
+ default = False,
+ dest = 'once',
+ help = 'run in batch mode for one second',
+ )
+options.add_option('-l', '--log',
+ action = 'store_true',
+ default = False,
+ dest = 'log',
+ help = 'run in logging mode (like vmstat)',
+ )
+options.add_option('-t', '--tracepoints',
+ action = 'store_true',
+ default = False,
+ dest = 'tracepoints',
+ help = 'retrieve statistics from tracepoints',
+ )
+options.add_option('-d', '--debugfs',
+ action = 'store_true',
+ default = False,
+ dest = 'debugfs',
+ help = 'retrieve statistics from debugfs',
+ )
+options.add_option('-f', '--fields',
+ action = 'store',
+ default = None,
+ dest = 'fields',
+ help = 'fields to display (regex)',
+ )
+(options, args) = options.parse_args(sys.argv)
+
+providers = []
+if options.tracepoints:
+ providers.append(TracepointProvider())
+if options.debugfs:
+ providers.append(DebugfsProvider())
+
+if len(providers) == 0:
+ try:
+ providers = [TracepointProvider()]
+ except:
+ providers = [DebugfsProvider()]
+
+stats = Stats(providers, fields = options.fields)
+
+if options.log:
+ log(stats)
+elif not options.once:
+ import curses.wrapper
+ curses.wrapper(tui, stats)
+else:
+ batch(stats)
diff --git a/scripts/kvm/kvm_stat.texi b/scripts/kvm/kvm_stat.texi
new file mode 100644
index 00000000..6ce00d80
--- /dev/null
+++ b/scripts/kvm/kvm_stat.texi
@@ -0,0 +1,55 @@
+@example
+@c man begin SYNOPSIS
+usage: kvm_stat [OPTION]...
+@c man end
+@end example
+
+@c man begin DESCRIPTION
+
+kvm_stat prints counts of KVM kernel module trace events. These events signify
+state transitions such as guest mode entry and exit.
+
+This tool is useful for observing guest behavior from the host perspective.
+Often conclusions about performance or buggy behavior can be drawn from the
+output.
+
+The set of KVM kernel module trace events may be specific to the kernel version
+or architecture. It is best to check the KVM kernel module source code for the
+meaning of events.
+
+Note that trace events are counted globally across all running guests.
+
+@c man end
+
+@c man begin OPTIONS
+@table @option
+@item -1, --once, --batch
+ run in batch mode for one second
+@item -l, --log
+ run in logging mode (like vmstat)
+@item -t, --tracepoints
+ retrieve statistics from tracepoints
+@item -d, --debugfs
+ retrieve statistics from debugfs
+@item -f, --fields=@var{fields}
+ fields to display (regex)
+@item -h, --help
+ show help message
+@end table
+
+@c man end
+
+@ignore
+
+@setfilename kvm_stat
+@settitle Report KVM kernel module event counters.
+
+@c man begin AUTHOR
+Stefan Hajnoczi <stefanha@redhat.com>
+@c man end
+
+@c man begin SEEALSO
+perf(1), trace-cmd(1)
+@c man end
+
+@end ignore
diff --git a/scripts/kvm/vmxcap b/scripts/kvm/vmxcap
new file mode 100755
index 00000000..8f0371f4
--- /dev/null
+++ b/scripts/kvm/vmxcap
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+#
+# tool for querying VMX capabilities
+#
+# Copyright 2009-2010 Red Hat, Inc.
+#
+# Authors:
+# Avi Kivity <avi@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+
+MSR_IA32_VMX_BASIC = 0x480
+MSR_IA32_VMX_PINBASED_CTLS = 0x481
+MSR_IA32_VMX_PROCBASED_CTLS = 0x482
+MSR_IA32_VMX_EXIT_CTLS = 0x483
+MSR_IA32_VMX_ENTRY_CTLS = 0x484
+MSR_IA32_VMX_MISC_CTLS = 0x485
+MSR_IA32_VMX_PROCBASED_CTLS2 = 0x48B
+MSR_IA32_VMX_EPT_VPID_CAP = 0x48C
+MSR_IA32_VMX_TRUE_PINBASED_CTLS = 0x48D
+MSR_IA32_VMX_TRUE_PROCBASED_CTLS = 0x48E
+MSR_IA32_VMX_TRUE_EXIT_CTLS = 0x48F
+MSR_IA32_VMX_TRUE_ENTRY_CTLS = 0x490
+MSR_IA32_VMX_VMFUNC = 0x491
+
+class msr(object):
+ def __init__(self):
+ try:
+ self.f = open('/dev/cpu/0/msr', 'r', 0)
+ except:
+ self.f = open('/dev/msr0', 'r', 0)
+ def read(self, index, default = None):
+ import struct
+ self.f.seek(index)
+ try:
+ return struct.unpack('Q', self.f.read(8))[0]
+ except:
+ return default
+
+class Control(object):
+ def __init__(self, name, bits, cap_msr, true_cap_msr = None):
+ self.name = name
+ self.bits = bits
+ self.cap_msr = cap_msr
+ self.true_cap_msr = true_cap_msr
+ def read2(self, nr):
+ m = msr()
+ val = m.read(nr, 0)
+ return (val & 0xffffffff, val >> 32)
+ def show(self):
+ print self.name
+ mbz, mb1 = self.read2(self.cap_msr)
+ tmbz, tmb1 = 0, 0
+ if self.true_cap_msr:
+ tmbz, tmb1 = self.read2(self.true_cap_msr)
+ for bit in sorted(self.bits.keys()):
+ zero = not (mbz & (1 << bit))
+ one = mb1 & (1 << bit)
+ true_zero = not (tmbz & (1 << bit))
+ true_one = tmb1 & (1 << bit)
+ s= '?'
+ if (self.true_cap_msr and true_zero and true_one
+ and one and not zero):
+ s = 'default'
+ elif zero and not one:
+ s = 'no'
+ elif one and not zero:
+ s = 'forced'
+ elif one and zero:
+ s = 'yes'
+ print ' %-40s %s' % (self.bits[bit], s)
+
+class Misc(object):
+ def __init__(self, name, bits, msr):
+ self.name = name
+ self.bits = bits
+ self.msr = msr
+ def show(self):
+ print self.name
+ value = msr().read(self.msr, 0)
+ def first_bit(key):
+ if type(key) is tuple:
+ return key[0]
+ else:
+ return key
+ for bits in sorted(self.bits.keys(), key = first_bit):
+ if type(bits) is tuple:
+ lo, hi = bits
+ fmt = int
+ else:
+ lo = hi = bits
+ def fmt(x):
+ return { True: 'yes', False: 'no' }[x]
+ v = (value >> lo) & ((1 << (hi - lo + 1)) - 1)
+ print ' %-40s %s' % (self.bits[bits], fmt(v))
+
+controls = [
+ Misc(
+ name = 'Basic VMX Information',
+ bits = {
+ (0, 30): 'Revision',
+ (32,44): 'VMCS size',
+ 48: 'VMCS restricted to 32 bit addresses',
+ 49: 'Dual-monitor support',
+ (50, 53): 'VMCS memory type',
+ 54: 'INS/OUTS instruction information',
+ 55: 'IA32_VMX_TRUE_*_CTLS support',
+ },
+ msr = MSR_IA32_VMX_BASIC,
+ ),
+ Control(
+ name = 'pin-based controls',
+ bits = {
+ 0: 'External interrupt exiting',
+ 3: 'NMI exiting',
+ 5: 'Virtual NMIs',
+ 6: 'Activate VMX-preemption timer',
+ 7: 'Process posted interrupts',
+ },
+ cap_msr = MSR_IA32_VMX_PINBASED_CTLS,
+ true_cap_msr = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
+ ),
+
+ Control(
+ name = 'primary processor-based controls',
+ bits = {
+ 2: 'Interrupt window exiting',
+ 3: 'Use TSC offsetting',
+ 7: 'HLT exiting',
+ 9: 'INVLPG exiting',
+ 10: 'MWAIT exiting',
+ 11: 'RDPMC exiting',
+ 12: 'RDTSC exiting',
+ 15: 'CR3-load exiting',
+ 16: 'CR3-store exiting',
+ 19: 'CR8-load exiting',
+ 20: 'CR8-store exiting',
+ 21: 'Use TPR shadow',
+ 22: 'NMI-window exiting',
+ 23: 'MOV-DR exiting',
+ 24: 'Unconditional I/O exiting',
+ 25: 'Use I/O bitmaps',
+ 27: 'Monitor trap flag',
+ 28: 'Use MSR bitmaps',
+ 29: 'MONITOR exiting',
+ 30: 'PAUSE exiting',
+ 31: 'Activate secondary control',
+ },
+ cap_msr = MSR_IA32_VMX_PROCBASED_CTLS,
+ true_cap_msr = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
+ ),
+
+ Control(
+ name = 'secondary processor-based controls',
+ bits = {
+ 0: 'Virtualize APIC accesses',
+ 1: 'Enable EPT',
+ 2: 'Descriptor-table exiting',
+ 3: 'Enable RDTSCP',
+ 4: 'Virtualize x2APIC mode',
+ 5: 'Enable VPID',
+ 6: 'WBINVD exiting',
+ 7: 'Unrestricted guest',
+ 8: 'APIC register emulation',
+ 9: 'Virtual interrupt delivery',
+ 10: 'PAUSE-loop exiting',
+ 11: 'RDRAND exiting',
+ 12: 'Enable INVPCID',
+ 13: 'Enable VM functions',
+ 14: 'VMCS shadowing',
+ 16: 'RDSEED exiting',
+ 18: 'EPT-violation #VE',
+ 20: 'Enable XSAVES/XRSTORS',
+ },
+ cap_msr = MSR_IA32_VMX_PROCBASED_CTLS2,
+ ),
+
+ Control(
+ name = 'VM-Exit controls',
+ bits = {
+ 2: 'Save debug controls',
+ 9: 'Host address-space size',
+ 12: 'Load IA32_PERF_GLOBAL_CTRL',
+ 15: 'Acknowledge interrupt on exit',
+ 18: 'Save IA32_PAT',
+ 19: 'Load IA32_PAT',
+ 20: 'Save IA32_EFER',
+ 21: 'Load IA32_EFER',
+ 22: 'Save VMX-preemption timer value',
+ },
+ cap_msr = MSR_IA32_VMX_EXIT_CTLS,
+ true_cap_msr = MSR_IA32_VMX_TRUE_EXIT_CTLS,
+ ),
+
+ Control(
+ name = 'VM-Entry controls',
+ bits = {
+ 2: 'Load debug controls',
+ 9: 'IA-32e mode guest',
+ 10: 'Entry to SMM',
+ 11: 'Deactivate dual-monitor treatment',
+ 13: 'Load IA32_PERF_GLOBAL_CTRL',
+ 14: 'Load IA32_PAT',
+ 15: 'Load IA32_EFER',
+ },
+ cap_msr = MSR_IA32_VMX_ENTRY_CTLS,
+ true_cap_msr = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
+ ),
+
+ Misc(
+ name = 'Miscellaneous data',
+ bits = {
+ (0,4): 'VMX-preemption timer scale (log2)',
+ 5: 'Store EFER.LMA into IA-32e mode guest control',
+ 6: 'HLT activity state',
+ 7: 'Shutdown activity state',
+ 8: 'Wait-for-SIPI activity state',
+ 15: 'IA32_SMBASE support',
+ (16,24): 'Number of CR3-target values',
+ (25,27): 'MSR-load/store count recommendation',
+ 28: 'IA32_SMM_MONITOR_CTL[2] can be set to 1',
+ 29: 'VMWRITE to VM-exit information fields',
+ (32,63): 'MSEG revision identifier',
+ },
+ msr = MSR_IA32_VMX_MISC_CTLS,
+ ),
+
+ Misc(
+ name = 'VPID and EPT capabilities',
+ bits = {
+ 0: 'Execute-only EPT translations',
+ 6: 'Page-walk length 4',
+ 8: 'Paging-structure memory type UC',
+ 14: 'Paging-structure memory type WB',
+ 16: '2MB EPT pages',
+ 17: '1GB EPT pages',
+ 20: 'INVEPT supported',
+ 21: 'EPT accessed and dirty flags',
+ 25: 'Single-context INVEPT',
+ 26: 'All-context INVEPT',
+ 32: 'INVVPID supported',
+ 40: 'Individual-address INVVPID',
+ 41: 'Single-context INVVPID',
+ 42: 'All-context INVVPID',
+ 43: 'Single-context-retaining-globals INVVPID',
+ },
+ msr = MSR_IA32_VMX_EPT_VPID_CAP,
+ ),
+ Misc(
+ name = 'VM Functions',
+ bits = {
+ 0: 'EPTP Switching',
+ },
+ msr = MSR_IA32_VMX_VMFUNC,
+ ),
+ ]
+
+for c in controls:
+ c.show()
diff --git a/scripts/make-release b/scripts/make-release
new file mode 100755
index 00000000..fa6323fd
--- /dev/null
+++ b/scripts/make-release
@@ -0,0 +1,25 @@
+#!/bin/bash -e
+#
+# QEMU Release Script
+#
+# Copyright IBM, Corp. 2012
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPLv2 or later.
+# See the COPYING file in the top-level directory.
+
+src="$1"
+version="$2"
+destination=qemu-${version}
+
+git clone "${src}" ${destination}
+pushd ${destination}
+git checkout "v${version}"
+git submodule update --init
+(cd roms/seabios && git describe --tags --long --dirty > .version)
+rm -rf .git roms/*/.git dtc/.git pixman/.git
+popd
+tar cfj ${destination}.tar.bz2 ${destination}
+rm -rf ${destination}
diff --git a/scripts/make_device_config.sh b/scripts/make_device_config.sh
new file mode 100644
index 00000000..c1afb3ff
--- /dev/null
+++ b/scripts/make_device_config.sh
@@ -0,0 +1,30 @@
+#! /bin/sh
+# Writes a target device config file to stdout, from a default and from
+# include directives therein. Also emits Makefile dependencies.
+#
+# Usage: make_device_config.sh SRC DEPFILE-NAME DEPFILE-TARGET > DEST
+
+src=$1
+dep=$2
+target=$3
+src_dir=`dirname $src`
+all_includes=
+
+process_includes () {
+ cat $1 | grep '^include' | \
+ while read include file ; do
+ all_includes="$all_includes $src_dir/$file"
+ process_includes $src_dir/$file
+ done
+}
+
+f=$src
+while [ -n "$f" ] ; do
+ f=`cat $f | tr -d '\r' | awk '/^include / {printf "'$src_dir'/%s ", $2}'`
+ [ $? = 0 ] || exit 1
+ all_includes="$all_includes $f"
+done
+process_includes $src
+
+cat $src $all_includes | grep -v '^include'
+echo "$target: $all_includes" > $dep
diff --git a/scripts/ordereddict.py b/scripts/ordereddict.py
new file mode 100644
index 00000000..7242b506
--- /dev/null
+++ b/scripts/ordereddict.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2009 Raymond Hettinger
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+from UserDict import DictMixin
+
+class OrderedDict(dict, DictMixin):
+
+ def __init__(self, *args, **kwds):
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__end
+ except AttributeError:
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self.__end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.__map = {} # key --> [key, prev, next]
+ dict.clear(self)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ end = self.__end
+ curr = end[1]
+ curr[2] = end[1] = self.__map[key] = [key, curr, end]
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ key, prev, next = self.__map.pop(key)
+ prev[2] = next
+ next[1] = prev
+
+ def __iter__(self):
+ end = self.__end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.__end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ def popitem(self, last=True):
+ if not self:
+ raise KeyError('dictionary is empty')
+ if last:
+ key = reversed(self).next()
+ else:
+ key = iter(self).next()
+ value = self.pop(key)
+ return key, value
+
+ def __reduce__(self):
+ items = [[k, self[k]] for k in self]
+ tmp = self.__map, self.__end
+ del self.__map, self.__end
+ inst_dict = vars(self).copy()
+ self.__map, self.__end = tmp
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def keys(self):
+ return list(self)
+
+ setdefault = DictMixin.setdefault
+ update = DictMixin.update
+ pop = DictMixin.pop
+ values = DictMixin.values
+ items = DictMixin.items
+ iterkeys = DictMixin.iterkeys
+ itervalues = DictMixin.itervalues
+ iteritems = DictMixin.iteritems
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+
+ def copy(self):
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedDict):
+ if len(self) != len(other):
+ return False
+ for p, q in zip(self.items(), other.items()):
+ if p != q:
+ return False
+ return True
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/scripts/qapi-commands.py b/scripts/qapi-commands.py
new file mode 100644
index 00000000..ca22acc1
--- /dev/null
+++ b/scripts/qapi-commands.py
@@ -0,0 +1,376 @@
+#
+# QAPI command marshaller generator
+#
+# Copyright IBM, Corp. 2011
+# Copyright (C) 2014-2015 Red Hat, Inc.
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+# Michael Roth <mdroth@linux.vnet.ibm.com>
+# Markus Armbruster <armbru@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.
+# See the COPYING file in the top-level directory.
+
+from ordereddict import OrderedDict
+from qapi import *
+import re
+
+def generate_command_decl(name, args, ret_type):
+ arglist=""
+ for argname, argtype, optional in parse_args(args):
+ argtype = c_type(argtype, is_param=True)
+ if optional:
+ arglist += "bool has_%s, " % c_name(argname)
+ arglist += "%s %s, " % (argtype, c_name(argname))
+ return mcgen('''
+%(ret_type)s qmp_%(name)s(%(args)sError **errp);
+''',
+ ret_type=c_type(ret_type), name=c_name(name),
+ args=arglist).strip()
+
+def gen_err_check(errvar):
+ if errvar:
+ return mcgen('''
+if (local_err) {
+ goto out;
+}
+''')
+ return ''
+
+def gen_sync_call(name, args, ret_type, indent=0):
+ ret = ""
+ arglist=""
+ retval=""
+ if ret_type:
+ retval = "retval = "
+ for argname, argtype, optional in parse_args(args):
+ if optional:
+ arglist += "has_%s, " % c_name(argname)
+ arglist += "%s, " % (c_name(argname))
+ push_indent(indent)
+ ret = mcgen('''
+%(retval)sqmp_%(name)s(%(args)s&local_err);
+
+''',
+ name=c_name(name), args=arglist, retval=retval).rstrip()
+ if ret_type:
+ ret += "\n" + gen_err_check('local_err')
+ ret += "\n" + mcgen(''''
+%(marshal_output_call)s
+''',
+ marshal_output_call=gen_marshal_output_call(name, ret_type)).rstrip()
+ pop_indent(indent)
+ return ret.rstrip()
+
+
+def gen_marshal_output_call(name, ret_type):
+ if not ret_type:
+ return ""
+ return "qmp_marshal_output_%s(retval, ret, &local_err);" % c_name(name)
+
+def gen_visitor_input_containers_decl(args, obj):
+ ret = ""
+
+ push_indent()
+ if len(args) > 0:
+ ret += mcgen('''
+QmpInputVisitor *mi = qmp_input_visitor_new_strict(%(obj)s);
+QapiDeallocVisitor *md;
+Visitor *v;
+''',
+ obj=obj)
+ pop_indent()
+
+ return ret.rstrip()
+
+def gen_visitor_input_vars_decl(args):
+ ret = ""
+ push_indent()
+ for argname, argtype, optional in parse_args(args):
+ if optional:
+ ret += mcgen('''
+bool has_%(argname)s = false;
+''',
+ argname=c_name(argname))
+ if is_c_ptr(argtype):
+ ret += mcgen('''
+%(argtype)s %(argname)s = NULL;
+''',
+ argname=c_name(argname), argtype=c_type(argtype))
+ else:
+ ret += mcgen('''
+%(argtype)s %(argname)s = {0};
+''',
+ argname=c_name(argname), argtype=c_type(argtype))
+
+ pop_indent()
+ return ret.rstrip()
+
+def gen_visitor_input_block(args, dealloc=False):
+ ret = ""
+ errparg = '&local_err'
+ errarg = 'local_err'
+
+ if len(args) == 0:
+ return ret
+
+ push_indent()
+
+ if dealloc:
+ errparg = 'NULL'
+ errarg = None;
+ ret += mcgen('''
+qmp_input_visitor_cleanup(mi);
+md = qapi_dealloc_visitor_new();
+v = qapi_dealloc_get_visitor(md);
+''')
+ else:
+ ret += mcgen('''
+v = qmp_input_get_visitor(mi);
+''')
+
+ for argname, argtype, optional in parse_args(args):
+ if optional:
+ ret += mcgen('''
+visit_optional(v, &has_%(c_name)s, "%(name)s", %(errp)s);
+''',
+ c_name=c_name(argname), name=argname, errp=errparg)
+ ret += gen_err_check(errarg)
+ ret += mcgen('''
+if (has_%(c_name)s) {
+''',
+ c_name=c_name(argname))
+ push_indent()
+ ret += mcgen('''
+visit_type_%(visitor)s(v, &%(c_name)s, "%(name)s", %(errp)s);
+''',
+ c_name=c_name(argname), name=argname, argtype=argtype,
+ visitor=type_name(argtype), errp=errparg)
+ ret += gen_err_check(errarg)
+ if optional:
+ pop_indent()
+ ret += mcgen('''
+}
+''')
+
+ if dealloc:
+ ret += mcgen('''
+qapi_dealloc_visitor_cleanup(md);
+''')
+ pop_indent()
+ return ret.rstrip()
+
+def gen_marshal_output(name, args, ret_type, middle_mode):
+ if not ret_type:
+ return ""
+
+ ret = mcgen('''
+static void qmp_marshal_output_%(c_name)s(%(c_ret_type)s ret_in, QObject **ret_out, Error **errp)
+{
+ Error *local_err = NULL;
+ QmpOutputVisitor *mo = qmp_output_visitor_new();
+ QapiDeallocVisitor *md;
+ Visitor *v;
+
+ v = qmp_output_get_visitor(mo);
+ visit_type_%(visitor)s(v, &ret_in, "unused", &local_err);
+ if (local_err) {
+ goto out;
+ }
+ *ret_out = qmp_output_get_qobject(mo);
+
+out:
+ error_propagate(errp, local_err);
+ qmp_output_visitor_cleanup(mo);
+ md = qapi_dealloc_visitor_new();
+ v = qapi_dealloc_get_visitor(md);
+ visit_type_%(visitor)s(v, &ret_in, "unused", NULL);
+ qapi_dealloc_visitor_cleanup(md);
+}
+''',
+ c_ret_type=c_type(ret_type), c_name=c_name(name),
+ visitor=type_name(ret_type))
+
+ return ret
+
+def gen_marshal_input_decl(name, args, ret_type, middle_mode):
+ ret = 'void qmp_marshal_input_%s(QDict *args, QObject **ret, Error **errp)' % c_name(name)
+ if not middle_mode:
+ ret = "static " + ret
+ return ret
+
+def gen_marshal_input(name, args, ret_type, middle_mode):
+ hdr = gen_marshal_input_decl(name, args, ret_type, middle_mode)
+
+ ret = mcgen('''
+%(header)s
+{
+ Error *local_err = NULL;
+''',
+ header=hdr)
+
+ if ret_type:
+ if is_c_ptr(ret_type):
+ retval = " %s retval = NULL;" % c_type(ret_type)
+ else:
+ retval = " %s retval;" % c_type(ret_type)
+ ret += mcgen('''
+%(retval)s
+''',
+ retval=retval)
+
+ if len(args) > 0:
+ ret += mcgen('''
+%(visitor_input_containers_decl)s
+%(visitor_input_vars_decl)s
+
+%(visitor_input_block)s
+
+''',
+ visitor_input_containers_decl=gen_visitor_input_containers_decl(args, "QOBJECT(args)"),
+ visitor_input_vars_decl=gen_visitor_input_vars_decl(args),
+ visitor_input_block=gen_visitor_input_block(args))
+ else:
+ ret += mcgen('''
+
+ (void)args;
+''')
+
+ ret += mcgen('''
+%(sync_call)s
+''',
+ sync_call=gen_sync_call(name, args, ret_type, indent=4))
+ if re.search('^ *goto out\\;', ret, re.MULTILINE):
+ ret += mcgen('''
+
+out:
+''')
+ ret += mcgen('''
+ error_propagate(errp, local_err);
+%(visitor_input_block_cleanup)s
+}
+''',
+ visitor_input_block_cleanup=gen_visitor_input_block(args,
+ dealloc=True))
+ return ret
+
+def gen_registry(commands):
+ registry=""
+ push_indent()
+ for cmd in commands:
+ options = 'QCO_NO_OPTIONS'
+ if not cmd.get('success-response', True):
+ options = 'QCO_NO_SUCCESS_RESP'
+
+ registry += mcgen('''
+qmp_register_command("%(name)s", qmp_marshal_input_%(c_name)s, %(opts)s);
+''',
+ name=cmd['command'], c_name=c_name(cmd['command']),
+ opts=options)
+ pop_indent()
+ ret = mcgen('''
+static void qmp_init_marshal(void)
+{
+%(registry)s
+}
+
+qapi_init(qmp_init_marshal);
+''',
+ registry=registry.rstrip())
+ return ret
+
+middle_mode = False
+
+(input_file, output_dir, do_c, do_h, prefix, opts) = \
+ parse_command_line("m", ["middle"])
+
+for o, a in opts:
+ if o in ("-m", "--middle"):
+ middle_mode = True
+
+exprs = parse_schema(input_file)
+commands = filter(lambda expr: expr.has_key('command'), exprs)
+commands = filter(lambda expr: not expr.has_key('gen'), commands)
+
+c_comment = '''
+/*
+ * schema-defined QMP->QAPI command dispatch
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+h_comment = '''
+/*
+ * schema-defined QAPI function prototypes
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+
+(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
+ 'qmp-marshal.c', 'qmp-commands.h',
+ c_comment, h_comment)
+
+fdef.write(mcgen('''
+#include "qemu-common.h"
+#include "qemu/module.h"
+#include "qapi/qmp/types.h"
+#include "qapi/qmp/dispatch.h"
+#include "qapi/visitor.h"
+#include "qapi/qmp-output-visitor.h"
+#include "qapi/qmp-input-visitor.h"
+#include "qapi/dealloc-visitor.h"
+#include "%(prefix)sqapi-types.h"
+#include "%(prefix)sqapi-visit.h"
+#include "%(prefix)sqmp-commands.h"
+
+''',
+ prefix=prefix))
+
+fdecl.write(mcgen('''
+#include "%(prefix)sqapi-types.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/error.h"
+
+''',
+ prefix=prefix))
+
+for cmd in commands:
+ arglist = []
+ ret_type = None
+ if cmd.has_key('data'):
+ arglist = cmd['data']
+ if cmd.has_key('returns'):
+ ret_type = cmd['returns']
+ ret = generate_command_decl(cmd['command'], arglist, ret_type) + "\n"
+ fdecl.write(ret)
+ if ret_type:
+ ret = gen_marshal_output(cmd['command'], arglist, ret_type, middle_mode) + "\n"
+ fdef.write(ret)
+
+ if middle_mode:
+ fdecl.write('%s;\n' % gen_marshal_input_decl(cmd['command'], arglist, ret_type, middle_mode))
+
+ ret = gen_marshal_input(cmd['command'], arglist, ret_type, middle_mode) + "\n"
+ fdef.write(ret)
+
+if not middle_mode:
+ ret = gen_registry(commands)
+ fdef.write(ret)
+
+close_output(fdef, fdecl)
diff --git a/scripts/qapi-event.py b/scripts/qapi-event.py
new file mode 100644
index 00000000..56bc602a
--- /dev/null
+++ b/scripts/qapi-event.py
@@ -0,0 +1,299 @@
+#
+# QAPI event generator
+#
+# Copyright (c) 2014 Wenchao Xia
+#
+# Authors:
+# Wenchao Xia <wenchaoqemu@gmail.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.
+# See the COPYING file in the top-level directory.
+
+from ordereddict import OrderedDict
+from qapi import *
+
+def _generate_event_api_name(event_name, params):
+ api_name = "void qapi_event_send_%s(" % c_name(event_name).lower();
+ l = len(api_name)
+
+ if params:
+ for argname, argentry, optional in parse_args(params):
+ if optional:
+ api_name += "bool has_%s,\n" % c_name(argname)
+ api_name += "".ljust(l)
+
+ api_name += "%s %s,\n" % (c_type(argentry, is_param=True),
+ c_name(argname))
+ api_name += "".ljust(l)
+
+ api_name += "Error **errp)"
+ return api_name;
+
+
+# Following are the core functions that generate C APIs to emit event.
+
+def generate_event_declaration(api_name):
+ return mcgen('''
+
+%(api_name)s;
+''',
+ api_name = api_name)
+
+def generate_event_implement(api_name, event_name, params):
+ # step 1: declare any variables
+ ret = mcgen("""
+
+%(api_name)s
+{
+ QDict *qmp;
+ Error *local_err = NULL;
+ QMPEventFuncEmit emit;
+""",
+ api_name = api_name)
+
+ if params:
+ ret += mcgen("""
+ QmpOutputVisitor *qov;
+ Visitor *v;
+ QObject *obj;
+
+""")
+
+ # step 2: check emit function, create a dict
+ ret += mcgen("""
+ emit = qmp_event_get_func_emit();
+ if (!emit) {
+ return;
+ }
+
+ qmp = qmp_event_build_dict("%(event_name)s");
+
+""",
+ event_name = event_name)
+
+ # step 3: visit the params if params != None
+ if params:
+ ret += mcgen("""
+ qov = qmp_output_visitor_new();
+ g_assert(qov);
+
+ v = qmp_output_get_visitor(qov);
+ g_assert(v);
+
+ /* Fake visit, as if all members are under a structure */
+ visit_start_struct(v, NULL, "", "%(event_name)s", 0, &local_err);
+ if (local_err) {
+ goto clean;
+ }
+
+""",
+ event_name = event_name)
+
+ for argname, argentry, optional in parse_args(params):
+ if optional:
+ ret += mcgen("""
+ if (has_%(var)s) {
+""",
+ var = c_name(argname))
+ push_indent()
+
+ if argentry == "str":
+ var_type = "(char **)"
+ else:
+ var_type = ""
+
+ ret += mcgen("""
+ visit_type_%(type)s(v, %(var_type)s&%(var)s, "%(name)s", &local_err);
+ if (local_err) {
+ goto clean;
+ }
+""",
+ var_type = var_type,
+ var = c_name(argname),
+ type = type_name(argentry),
+ name = argname)
+
+ if optional:
+ pop_indent()
+ ret += mcgen("""
+ }
+""")
+
+ ret += mcgen("""
+
+ visit_end_struct(v, &local_err);
+ if (local_err) {
+ goto clean;
+ }
+
+ obj = qmp_output_get_qobject(qov);
+ g_assert(obj != NULL);
+
+ qdict_put_obj(qmp, "data", obj);
+""")
+
+ # step 4: call qmp event api
+ ret += mcgen("""
+ emit(%(event_enum_value)s, qmp, &local_err);
+
+""",
+ event_enum_value = event_enum_value)
+
+ # step 5: clean up
+ if params:
+ ret += mcgen("""
+ clean:
+ qmp_output_visitor_cleanup(qov);
+""")
+ ret += mcgen("""
+ error_propagate(errp, local_err);
+ QDECREF(qmp);
+}
+""")
+
+ return ret
+
+
+# Following are the functions that generate an enum type for all defined
+# events, similar to qapi-types.py. Here we already have enum name and
+# values which were generated before and recorded in event_enum_*. It also
+# works around the issue that "import qapi-types" can't work.
+
+def generate_event_enum_decl(event_enum_name, event_enum_values):
+ lookup_decl = mcgen('''
+
+extern const char *%(event_enum_name)s_lookup[];
+''',
+ event_enum_name = event_enum_name)
+
+ enum_decl = mcgen('''
+typedef enum %(event_enum_name)s
+{
+''',
+ event_enum_name = event_enum_name)
+
+ # append automatically generated _MAX value
+ enum_max_value = c_enum_const(event_enum_name, "MAX")
+ enum_values = event_enum_values + [ enum_max_value ]
+
+ i = 0
+ for value in enum_values:
+ enum_decl += mcgen('''
+ %(value)s = %(i)d,
+''',
+ value = value,
+ i = i)
+ i += 1
+
+ enum_decl += mcgen('''
+} %(event_enum_name)s;
+''',
+ event_enum_name = event_enum_name)
+
+ return lookup_decl + enum_decl
+
+def generate_event_enum_lookup(event_enum_name, event_enum_strings):
+ ret = mcgen('''
+
+const char *%(event_enum_name)s_lookup[] = {
+''',
+ event_enum_name = event_enum_name)
+
+ i = 0
+ for string in event_enum_strings:
+ ret += mcgen('''
+ "%(string)s",
+''',
+ string = string)
+
+ ret += mcgen('''
+ NULL,
+};
+''')
+ return ret
+
+(input_file, output_dir, do_c, do_h, prefix, dummy) = parse_command_line()
+
+c_comment = '''
+/*
+ * schema-defined QAPI event functions
+ *
+ * Copyright (c) 2014 Wenchao Xia
+ *
+ * Authors:
+ * Wenchao Xia <wenchaoqemu@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+h_comment = '''
+/*
+ * schema-defined QAPI event functions
+ *
+ * Copyright (c) 2014 Wenchao Xia
+ *
+ * Authors:
+ * Wenchao Xia <wenchaoqemu@gmail.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+
+(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
+ 'qapi-event.c', 'qapi-event.h',
+ c_comment, h_comment)
+
+fdef.write(mcgen('''
+#include "qemu-common.h"
+#include "%(prefix)sqapi-event.h"
+#include "%(prefix)sqapi-visit.h"
+#include "qapi/qmp-output-visitor.h"
+#include "qapi/qmp-event.h"
+
+''',
+ prefix=prefix))
+
+fdecl.write(mcgen('''
+#include "qapi/error.h"
+#include "qapi/qmp/qdict.h"
+#include "%(prefix)sqapi-types.h"
+
+''',
+ prefix=prefix))
+
+exprs = parse_schema(input_file)
+
+event_enum_name = prefix.upper().replace('-', '_') + "QAPIEvent"
+event_enum_values = []
+event_enum_strings = []
+
+for expr in exprs:
+ if expr.has_key('event'):
+ event_name = expr['event']
+ params = expr.get('data')
+ if params and len(params) == 0:
+ params = None
+
+ api_name = _generate_event_api_name(event_name, params)
+ ret = generate_event_declaration(api_name)
+ fdecl.write(ret)
+
+ # We need an enum value per event
+ event_enum_value = c_enum_const(event_enum_name, event_name)
+ ret = generate_event_implement(api_name, event_name, params)
+ fdef.write(ret)
+
+ # Record it, and generate enum later
+ event_enum_values.append(event_enum_value)
+ event_enum_strings.append(event_name)
+
+ret = generate_event_enum_decl(event_enum_name, event_enum_values)
+fdecl.write(ret)
+ret = generate_event_enum_lookup(event_enum_name, event_enum_strings)
+fdef.write(ret)
+
+close_output(fdef, fdecl)
diff --git a/scripts/qapi-types.py b/scripts/qapi-types.py
new file mode 100644
index 00000000..e6eb4b61
--- /dev/null
+++ b/scripts/qapi-types.py
@@ -0,0 +1,402 @@
+#
+# QAPI types generator
+#
+# Copyright IBM, Corp. 2011
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.
+# See the COPYING file in the top-level directory.
+
+from ordereddict import OrderedDict
+from qapi import *
+
+def generate_fwd_builtin(name):
+ return mcgen('''
+
+typedef struct %(name)sList
+{
+ union {
+ %(type)s value;
+ uint64_t padding;
+ };
+ struct %(name)sList *next;
+} %(name)sList;
+''',
+ type=c_type(name),
+ name=name)
+
+def generate_fwd_struct(name):
+ return mcgen('''
+
+typedef struct %(name)s %(name)s;
+
+typedef struct %(name)sList
+{
+ union {
+ %(name)s *value;
+ uint64_t padding;
+ };
+ struct %(name)sList *next;
+} %(name)sList;
+''',
+ name=c_name(name))
+
+def generate_fwd_enum_struct(name):
+ return mcgen('''
+typedef struct %(name)sList
+{
+ union {
+ %(name)s value;
+ uint64_t padding;
+ };
+ struct %(name)sList *next;
+} %(name)sList;
+''',
+ name=c_name(name))
+
+def generate_struct_fields(members):
+ ret = ''
+
+ for argname, argentry, optional in parse_args(members):
+ if optional:
+ ret += mcgen('''
+ bool has_%(c_name)s;
+''',
+ c_name=c_name(argname))
+ ret += mcgen('''
+ %(c_type)s %(c_name)s;
+''',
+ c_type=c_type(argentry), c_name=c_name(argname))
+
+ return ret
+
+def generate_struct(expr):
+
+ structname = expr.get('struct', "")
+ members = expr['data']
+ base = expr.get('base')
+
+ ret = mcgen('''
+struct %(name)s
+{
+''',
+ name=c_name(structname))
+
+ if base:
+ ret += generate_struct_fields({'base': base})
+
+ ret += generate_struct_fields(members)
+
+ # Make sure that all structs have at least one field; this avoids
+ # potential issues with attempting to malloc space for zero-length structs
+ # in C, and also incompatibility with C++ (where an empty struct is size 1).
+ if not base and not members:
+ ret += mcgen('''
+ char qapi_dummy_field_for_empty_struct;
+''')
+
+ ret += mcgen('''
+};
+''')
+
+ return ret
+
+def generate_enum_lookup(name, values):
+ ret = mcgen('''
+const char * const %(name)s_lookup[] = {
+''',
+ name=c_name(name))
+ i = 0
+ for value in values:
+ index = c_enum_const(name, value)
+ ret += mcgen('''
+ [%(index)s] = "%(value)s",
+''',
+ index = index, value = value)
+
+ max_index = c_enum_const(name, 'MAX')
+ ret += mcgen('''
+ [%(max_index)s] = NULL,
+};
+
+''',
+ max_index=max_index)
+ return ret
+
+def generate_enum(name, values):
+ name = c_name(name)
+ lookup_decl = mcgen('''
+extern const char * const %(name)s_lookup[];
+''',
+ name=name)
+
+ enum_decl = mcgen('''
+typedef enum %(name)s
+{
+''',
+ name=name)
+
+ # append automatically generated _MAX value
+ enum_values = values + [ 'MAX' ]
+
+ i = 0
+ for value in enum_values:
+ enum_full_value = c_enum_const(name, value)
+ enum_decl += mcgen('''
+ %(enum_full_value)s = %(i)d,
+''',
+ enum_full_value = enum_full_value,
+ i=i)
+ i += 1
+
+ enum_decl += mcgen('''
+} %(name)s;
+''',
+ name=name)
+
+ return lookup_decl + enum_decl
+
+def generate_alternate_qtypes(expr):
+
+ name = expr['alternate']
+ members = expr['data']
+
+ ret = mcgen('''
+const int %(name)s_qtypes[QTYPE_MAX] = {
+''',
+ name=c_name(name))
+
+ for key in members:
+ qtype = find_alternate_member_qtype(members[key])
+ assert qtype, "Invalid alternate member"
+
+ ret += mcgen('''
+ [%(qtype)s] = %(enum_const)s,
+''',
+ qtype = qtype,
+ enum_const = c_enum_const(name + 'Kind', key))
+
+ ret += mcgen('''
+};
+''')
+ return ret
+
+
+def generate_union(expr, meta):
+
+ name = c_name(expr[meta])
+ typeinfo = expr['data']
+
+ base = expr.get('base')
+ discriminator = expr.get('discriminator')
+
+ enum_define = discriminator_find_enum_define(expr)
+ if enum_define:
+ discriminator_type_name = enum_define['enum_name']
+ else:
+ discriminator_type_name = '%sKind' % (name)
+
+ ret = mcgen('''
+struct %(name)s
+{
+ %(discriminator_type_name)s kind;
+ union {
+ void *data;
+''',
+ name=name,
+ discriminator_type_name=c_name(discriminator_type_name))
+
+ for key in typeinfo:
+ ret += mcgen('''
+ %(c_type)s %(c_name)s;
+''',
+ c_type=c_type(typeinfo[key]),
+ c_name=c_name(key))
+
+ ret += mcgen('''
+ };
+''')
+
+ if base:
+ assert discriminator
+ base_fields = find_struct(base)['data'].copy()
+ del base_fields[discriminator]
+ ret += generate_struct_fields(base_fields)
+ else:
+ assert not discriminator
+
+ ret += mcgen('''
+};
+''')
+ if meta == 'alternate':
+ ret += mcgen('''
+extern const int %(name)s_qtypes[];
+''',
+ name=name)
+
+
+ return ret
+
+def generate_type_cleanup_decl(name):
+ ret = mcgen('''
+void qapi_free_%(name)s(%(c_type)s obj);
+''',
+ c_type=c_type(name), name=c_name(name))
+ return ret
+
+def generate_type_cleanup(name):
+ ret = mcgen('''
+
+void qapi_free_%(name)s(%(c_type)s obj)
+{
+ QapiDeallocVisitor *md;
+ Visitor *v;
+
+ if (!obj) {
+ return;
+ }
+
+ md = qapi_dealloc_visitor_new();
+ v = qapi_dealloc_get_visitor(md);
+ visit_type_%(name)s(v, &obj, NULL, NULL);
+ qapi_dealloc_visitor_cleanup(md);
+}
+''',
+ c_type=c_type(name), name=c_name(name))
+ return ret
+
+do_builtins = False
+
+(input_file, output_dir, do_c, do_h, prefix, opts) = \
+ parse_command_line("b", ["builtins"])
+
+for o, a in opts:
+ if o in ("-b", "--builtins"):
+ do_builtins = True
+
+c_comment = '''
+/*
+ * deallocation functions for schema-defined QAPI types
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+h_comment = '''
+/*
+ * schema-defined QAPI types
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+
+(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
+ 'qapi-types.c', 'qapi-types.h',
+ c_comment, h_comment)
+
+fdef.write(mcgen('''
+#include "qapi/dealloc-visitor.h"
+#include "%(prefix)sqapi-types.h"
+#include "%(prefix)sqapi-visit.h"
+
+''',
+ prefix=prefix))
+
+fdecl.write(mcgen('''
+#include <stdbool.h>
+#include <stdint.h>
+
+'''))
+
+exprs = parse_schema(input_file)
+
+fdecl.write(guardstart("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
+for typename in builtin_types.keys():
+ fdecl.write(generate_fwd_builtin(typename))
+fdecl.write(guardend("QAPI_TYPES_BUILTIN_STRUCT_DECL"))
+
+for expr in exprs:
+ ret = "\n"
+ if expr.has_key('struct'):
+ ret += generate_fwd_struct(expr['struct'])
+ elif expr.has_key('enum'):
+ ret += generate_enum(expr['enum'], expr['data']) + "\n"
+ ret += generate_fwd_enum_struct(expr['enum'])
+ fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
+ elif expr.has_key('union'):
+ ret += generate_fwd_struct(expr['union']) + "\n"
+ enum_define = discriminator_find_enum_define(expr)
+ if not enum_define:
+ ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
+ fdef.write(generate_enum_lookup('%sKind' % expr['union'],
+ expr['data'].keys()))
+ elif expr.has_key('alternate'):
+ ret += generate_fwd_struct(expr['alternate']) + "\n"
+ ret += generate_enum('%sKind' % expr['alternate'], expr['data'].keys())
+ fdef.write(generate_enum_lookup('%sKind' % expr['alternate'],
+ expr['data'].keys()))
+ fdef.write(generate_alternate_qtypes(expr))
+ else:
+ continue
+ fdecl.write(ret)
+
+# to avoid header dependency hell, we always generate declarations
+# for built-in types in our header files and simply guard them
+fdecl.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
+for typename in builtin_types.keys():
+ fdecl.write(generate_type_cleanup_decl(typename + "List"))
+fdecl.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DECL"))
+
+# ...this doesn't work for cases where we link in multiple objects that
+# have the functions defined, so we use -b option to provide control
+# over these cases
+if do_builtins:
+ fdef.write(guardstart("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
+ for typename in builtin_types.keys():
+ fdef.write(generate_type_cleanup(typename + "List"))
+ fdef.write(guardend("QAPI_TYPES_BUILTIN_CLEANUP_DEF"))
+
+for expr in exprs:
+ ret = "\n"
+ if expr.has_key('struct'):
+ ret += generate_struct(expr) + "\n"
+ ret += generate_type_cleanup_decl(expr['struct'] + "List")
+ fdef.write(generate_type_cleanup(expr['struct'] + "List") + "\n")
+ ret += generate_type_cleanup_decl(expr['struct'])
+ fdef.write(generate_type_cleanup(expr['struct']) + "\n")
+ elif expr.has_key('union'):
+ ret += generate_union(expr, 'union')
+ ret += generate_type_cleanup_decl(expr['union'] + "List")
+ fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
+ ret += generate_type_cleanup_decl(expr['union'])
+ fdef.write(generate_type_cleanup(expr['union']) + "\n")
+ elif expr.has_key('alternate'):
+ ret += generate_union(expr, 'alternate')
+ ret += generate_type_cleanup_decl(expr['alternate'] + "List")
+ fdef.write(generate_type_cleanup(expr['alternate'] + "List") + "\n")
+ ret += generate_type_cleanup_decl(expr['alternate'])
+ fdef.write(generate_type_cleanup(expr['alternate']) + "\n")
+ elif expr.has_key('enum'):
+ ret += generate_type_cleanup_decl(expr['enum'] + "List")
+ fdef.write(generate_type_cleanup(expr['enum'] + "List") + "\n")
+ else:
+ continue
+ fdecl.write(ret)
+
+close_output(fdef, fdecl)
diff --git a/scripts/qapi-visit.py b/scripts/qapi-visit.py
new file mode 100644
index 00000000..5b993364
--- /dev/null
+++ b/scripts/qapi-visit.py
@@ -0,0 +1,484 @@
+#
+# QAPI visitor generator
+#
+# Copyright IBM, Corp. 2011
+# Copyright (C) 2014-2015 Red Hat, Inc.
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+# Michael Roth <mdroth@linux.vnet.ibm.com>
+# Markus Armbruster <armbru@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.
+# See the COPYING file in the top-level directory.
+
+from ordereddict import OrderedDict
+from qapi import *
+import re
+
+implicit_structs = []
+
+def generate_visit_implicit_struct(type):
+ global implicit_structs
+ if type in implicit_structs:
+ return ''
+ implicit_structs.append(type)
+ return mcgen('''
+
+static void visit_type_implicit_%(c_type)s(Visitor *m, %(c_type)s **obj, Error **errp)
+{
+ Error *err = NULL;
+
+ visit_start_implicit_struct(m, (void **)obj, sizeof(%(c_type)s), &err);
+ if (!err) {
+ visit_type_%(c_type)s_fields(m, obj, errp);
+ visit_end_implicit_struct(m, &err);
+ }
+ error_propagate(errp, err);
+}
+''',
+ c_type=type_name(type))
+
+def generate_visit_struct_fields(name, members, base = None):
+ substructs = []
+ ret = ''
+
+ if base:
+ ret += generate_visit_implicit_struct(base)
+
+ ret += mcgen('''
+
+static void visit_type_%(name)s_fields(Visitor *m, %(name)s **obj, Error **errp)
+{
+ Error *err = NULL;
+''',
+ name=c_name(name))
+ push_indent()
+
+ if base:
+ ret += mcgen('''
+visit_type_implicit_%(type)s(m, &(*obj)->%(c_name)s, &err);
+if (err) {
+ goto out;
+}
+''',
+ type=type_name(base), c_name=c_name('base'))
+
+ for argname, argentry, optional in parse_args(members):
+ if optional:
+ ret += mcgen('''
+visit_optional(m, &(*obj)->has_%(c_name)s, "%(name)s", &err);
+if (!err && (*obj)->has_%(c_name)s) {
+''',
+ c_name=c_name(argname), name=argname)
+ push_indent()
+
+ ret += mcgen('''
+visit_type_%(type)s(m, &(*obj)->%(c_name)s, "%(name)s", &err);
+''',
+ type=type_name(argentry), c_name=c_name(argname),
+ name=argname)
+
+ if optional:
+ pop_indent()
+ ret += mcgen('''
+}
+''')
+ ret += mcgen('''
+if (err) {
+ goto out;
+}
+''')
+
+ pop_indent()
+ if re.search('^ *goto out\\;', ret, re.MULTILINE):
+ ret += mcgen('''
+
+out:
+''')
+ ret += mcgen('''
+ error_propagate(errp, err);
+}
+''')
+ return ret
+
+
+def generate_visit_struct_body(name, members):
+ ret = mcgen('''
+ Error *err = NULL;
+
+ visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(c_name)s), &err);
+ if (!err) {
+ if (*obj) {
+ visit_type_%(c_name)s_fields(m, obj, errp);
+ }
+ visit_end_struct(m, &err);
+ }
+ error_propagate(errp, err);
+''',
+ name=name, c_name=c_name(name))
+
+ return ret
+
+def generate_visit_struct(expr):
+
+ name = expr['struct']
+ members = expr['data']
+ base = expr.get('base')
+
+ ret = generate_visit_struct_fields(name, members, base)
+
+ ret += mcgen('''
+
+void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp)
+{
+''',
+ name=c_name(name))
+
+ ret += generate_visit_struct_body(name, members)
+
+ ret += mcgen('''
+}
+''')
+ return ret
+
+def generate_visit_list(name, members):
+ return mcgen('''
+
+void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp)
+{
+ Error *err = NULL;
+ GenericList *i, **prev;
+
+ visit_start_list(m, name, &err);
+ if (err) {
+ goto out;
+ }
+
+ for (prev = (GenericList **)obj;
+ !err && (i = visit_next_list(m, prev, &err)) != NULL;
+ prev = &i) {
+ %(name)sList *native_i = (%(name)sList *)i;
+ visit_type_%(name)s(m, &native_i->value, NULL, &err);
+ }
+
+ error_propagate(errp, err);
+ err = NULL;
+ visit_end_list(m, &err);
+out:
+ error_propagate(errp, err);
+}
+''',
+ name=type_name(name))
+
+def generate_visit_enum(name, members):
+ return mcgen('''
+
+void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **errp)
+{
+ visit_type_enum(m, (int *)obj, %(name)s_lookup, "%(name)s", name, errp);
+}
+''',
+ name=c_name(name))
+
+def generate_visit_alternate(name, members):
+ ret = mcgen('''
+
+void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp)
+{
+ Error *err = NULL;
+
+ visit_start_implicit_struct(m, (void**) obj, sizeof(%(name)s), &err);
+ if (err) {
+ goto out;
+ }
+ visit_get_next_type(m, (int*) &(*obj)->kind, %(name)s_qtypes, name, &err);
+ if (err) {
+ goto out_end;
+ }
+ switch ((*obj)->kind) {
+''',
+ name=c_name(name))
+
+ # For alternate, always use the default enum type automatically generated
+ # as name + 'Kind'
+ disc_type = c_name(name) + 'Kind'
+
+ for key in members:
+ assert (members[key] in builtin_types.keys()
+ or find_struct(members[key])
+ or find_union(members[key])
+ or find_enum(members[key])), "Invalid alternate member"
+
+ enum_full_value = c_enum_const(disc_type, key)
+ ret += mcgen('''
+ case %(enum_full_value)s:
+ visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, name, &err);
+ break;
+''',
+ enum_full_value = enum_full_value,
+ c_type = type_name(members[key]),
+ c_name = c_name(key))
+
+ ret += mcgen('''
+ default:
+ abort();
+ }
+out_end:
+ error_propagate(errp, err);
+ err = NULL;
+ visit_end_implicit_struct(m, &err);
+out:
+ error_propagate(errp, err);
+}
+''')
+
+ return ret
+
+
+def generate_visit_union(expr):
+
+ name = expr['union']
+ members = expr['data']
+
+ base = expr.get('base')
+ discriminator = expr.get('discriminator')
+
+ enum_define = discriminator_find_enum_define(expr)
+ if enum_define:
+ # Use the enum type as discriminator
+ ret = ""
+ disc_type = c_name(enum_define['enum_name'])
+ else:
+ # There will always be a discriminator in the C switch code, by default
+ # it is an enum type generated silently
+ ret = generate_visit_enum(name + 'Kind', members.keys())
+ disc_type = c_name(name) + 'Kind'
+
+ if base:
+ assert discriminator
+ base_fields = find_struct(base)['data'].copy()
+ del base_fields[discriminator]
+ ret += generate_visit_struct_fields(name, base_fields)
+
+ if discriminator:
+ for key in members:
+ ret += generate_visit_implicit_struct(members[key])
+
+ ret += mcgen('''
+
+void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp)
+{
+ Error *err = NULL;
+
+ visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
+ if (err) {
+ goto out;
+ }
+ if (*obj) {
+''',
+ name=c_name(name))
+
+ if base:
+ ret += mcgen('''
+ visit_type_%(name)s_fields(m, obj, &err);
+ if (err) {
+ goto out_obj;
+ }
+''',
+ name=c_name(name))
+
+ if not discriminator:
+ disc_key = "type"
+ else:
+ disc_key = discriminator
+ ret += mcgen('''
+ visit_type_%(disc_type)s(m, &(*obj)->kind, "%(disc_key)s", &err);
+ if (err) {
+ goto out_obj;
+ }
+ if (!visit_start_union(m, !!(*obj)->data, &err) || err) {
+ goto out_obj;
+ }
+ switch ((*obj)->kind) {
+''',
+ disc_type = disc_type,
+ disc_key = disc_key)
+
+ for key in members:
+ if not discriminator:
+ fmt = 'visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);'
+ else:
+ fmt = 'visit_type_implicit_%(c_type)s(m, &(*obj)->%(c_name)s, &err);'
+
+ enum_full_value = c_enum_const(disc_type, key)
+ ret += mcgen('''
+ case %(enum_full_value)s:
+ ''' + fmt + '''
+ break;
+''',
+ enum_full_value = enum_full_value,
+ c_type=type_name(members[key]),
+ c_name=c_name(key))
+
+ ret += mcgen('''
+ default:
+ abort();
+ }
+out_obj:
+ error_propagate(errp, err);
+ err = NULL;
+ visit_end_union(m, !!(*obj)->data, &err);
+ error_propagate(errp, err);
+ err = NULL;
+ }
+ visit_end_struct(m, &err);
+out:
+ error_propagate(errp, err);
+}
+''')
+
+ return ret
+
+def generate_declaration(name, members, builtin_type=False):
+ ret = ""
+ if not builtin_type:
+ name = c_name(name)
+ ret += mcgen('''
+
+void visit_type_%(name)s(Visitor *m, %(name)s **obj, const char *name, Error **errp);
+''',
+ name=name)
+
+ ret += mcgen('''
+void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp);
+''',
+ name=name)
+
+ return ret
+
+def generate_enum_declaration(name, members):
+ ret = mcgen('''
+void visit_type_%(name)sList(Visitor *m, %(name)sList **obj, const char *name, Error **errp);
+''',
+ name=c_name(name))
+
+ return ret
+
+def generate_decl_enum(name, members):
+ return mcgen('''
+
+void visit_type_%(name)s(Visitor *m, %(name)s *obj, const char *name, Error **errp);
+''',
+ name=c_name(name))
+
+do_builtins = False
+
+(input_file, output_dir, do_c, do_h, prefix, opts) = \
+ parse_command_line("b", ["builtins"])
+
+for o, a in opts:
+ if o in ("-b", "--builtins"):
+ do_builtins = True
+
+c_comment = '''
+/*
+ * schema-defined QAPI visitor functions
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+h_comment = '''
+/*
+ * schema-defined QAPI visitor functions
+ *
+ * Copyright IBM, Corp. 2011
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+'''
+
+(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
+ 'qapi-visit.c', 'qapi-visit.h',
+ c_comment, h_comment)
+
+fdef.write(mcgen('''
+#include "qemu-common.h"
+#include "%(prefix)sqapi-visit.h"
+''',
+ prefix = prefix))
+
+fdecl.write(mcgen('''
+#include "qapi/visitor.h"
+#include "%(prefix)sqapi-types.h"
+
+''',
+ prefix=prefix))
+
+exprs = parse_schema(input_file)
+
+# to avoid header dependency hell, we always generate declarations
+# for built-in types in our header files and simply guard them
+fdecl.write(guardstart("QAPI_VISIT_BUILTIN_VISITOR_DECL"))
+for typename in builtin_types.keys():
+ fdecl.write(generate_declaration(typename, None, builtin_type=True))
+fdecl.write(guardend("QAPI_VISIT_BUILTIN_VISITOR_DECL"))
+
+# ...this doesn't work for cases where we link in multiple objects that
+# have the functions defined, so we use -b option to provide control
+# over these cases
+if do_builtins:
+ for typename in builtin_types.keys():
+ fdef.write(generate_visit_list(typename, None))
+
+for expr in exprs:
+ if expr.has_key('struct'):
+ ret = generate_visit_struct(expr)
+ ret += generate_visit_list(expr['struct'], expr['data'])
+ fdef.write(ret)
+
+ ret = generate_declaration(expr['struct'], expr['data'])
+ fdecl.write(ret)
+ elif expr.has_key('union'):
+ ret = generate_visit_union(expr)
+ ret += generate_visit_list(expr['union'], expr['data'])
+ fdef.write(ret)
+
+ enum_define = discriminator_find_enum_define(expr)
+ ret = ""
+ if not enum_define:
+ ret = generate_decl_enum('%sKind' % expr['union'],
+ expr['data'].keys())
+ ret += generate_declaration(expr['union'], expr['data'])
+ fdecl.write(ret)
+ elif expr.has_key('alternate'):
+ ret = generate_visit_alternate(expr['alternate'], expr['data'])
+ ret += generate_visit_list(expr['alternate'], expr['data'])
+ fdef.write(ret)
+
+ ret = generate_decl_enum('%sKind' % expr['alternate'],
+ expr['data'].keys())
+ ret += generate_declaration(expr['alternate'], expr['data'])
+ fdecl.write(ret)
+ elif expr.has_key('enum'):
+ ret = generate_visit_list(expr['enum'], expr['data'])
+ ret += generate_visit_enum(expr['enum'], expr['data'])
+ fdef.write(ret)
+
+ ret = generate_decl_enum(expr['enum'], expr['data'])
+ ret += generate_enum_declaration(expr['enum'], expr['data'])
+ fdecl.write(ret)
+
+close_output(fdef, fdecl)
diff --git a/scripts/qapi.py b/scripts/qapi.py
new file mode 100644
index 00000000..06d7fc28
--- /dev/null
+++ b/scripts/qapi.py
@@ -0,0 +1,1074 @@
+#
+# QAPI helper library
+#
+# Copyright IBM, Corp. 2011
+# Copyright (c) 2013-2015 Red Hat Inc.
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+# Markus Armbruster <armbru@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.
+# See the COPYING file in the top-level directory.
+
+import re
+from ordereddict import OrderedDict
+import errno
+import getopt
+import os
+import sys
+import string
+
+builtin_types = {
+ 'str': 'QTYPE_QSTRING',
+ 'int': 'QTYPE_QINT',
+ 'number': 'QTYPE_QFLOAT',
+ 'bool': 'QTYPE_QBOOL',
+ 'int8': 'QTYPE_QINT',
+ 'int16': 'QTYPE_QINT',
+ 'int32': 'QTYPE_QINT',
+ 'int64': 'QTYPE_QINT',
+ 'uint8': 'QTYPE_QINT',
+ 'uint16': 'QTYPE_QINT',
+ 'uint32': 'QTYPE_QINT',
+ 'uint64': 'QTYPE_QINT',
+ 'size': 'QTYPE_QINT',
+}
+
+# Whitelist of commands allowed to return a non-dictionary
+returns_whitelist = [
+ # From QMP:
+ 'human-monitor-command',
+ 'query-migrate-cache-size',
+ 'query-tpm-models',
+ 'query-tpm-types',
+ 'ringbuf-read',
+
+ # From QGA:
+ 'guest-file-open',
+ 'guest-fsfreeze-freeze',
+ 'guest-fsfreeze-freeze-list',
+ 'guest-fsfreeze-status',
+ 'guest-fsfreeze-thaw',
+ 'guest-get-time',
+ 'guest-set-vcpus',
+ 'guest-sync',
+ 'guest-sync-delimited',
+
+ # From qapi-schema-test:
+ 'user_def_cmd3',
+]
+
+enum_types = []
+struct_types = []
+union_types = []
+events = []
+all_names = {}
+
+#
+# Parsing the schema into expressions
+#
+
+def error_path(parent):
+ res = ""
+ while parent:
+ res = ("In file included from %s:%d:\n" % (parent['file'],
+ parent['line'])) + res
+ parent = parent['parent']
+ return res
+
+class QAPISchemaError(Exception):
+ def __init__(self, schema, msg):
+ self.fname = schema.fname
+ self.msg = msg
+ self.col = 1
+ self.line = schema.line
+ for ch in schema.src[schema.line_pos:schema.pos]:
+ if ch == '\t':
+ self.col = (self.col + 7) % 8 + 1
+ else:
+ self.col += 1
+ self.info = schema.incl_info
+
+ def __str__(self):
+ return error_path(self.info) + \
+ "%s:%d:%d: %s" % (self.fname, self.line, self.col, self.msg)
+
+class QAPIExprError(Exception):
+ def __init__(self, expr_info, msg):
+ self.info = expr_info
+ self.msg = msg
+
+ def __str__(self):
+ return error_path(self.info['parent']) + \
+ "%s:%d: %s" % (self.info['file'], self.info['line'], self.msg)
+
+class QAPISchema:
+
+ def __init__(self, fp, previously_included = [], incl_info = None):
+ abs_fname = os.path.abspath(fp.name)
+ fname = fp.name
+ self.fname = fname
+ previously_included.append(abs_fname)
+ self.incl_info = incl_info
+ self.src = fp.read()
+ if self.src == '' or self.src[-1] != '\n':
+ self.src += '\n'
+ self.cursor = 0
+ self.line = 1
+ self.line_pos = 0
+ self.exprs = []
+ self.accept()
+
+ while self.tok != None:
+ expr_info = {'file': fname, 'line': self.line,
+ 'parent': self.incl_info}
+ expr = self.get_expr(False)
+ if isinstance(expr, dict) and "include" in expr:
+ if len(expr) != 1:
+ raise QAPIExprError(expr_info, "Invalid 'include' directive")
+ include = expr["include"]
+ if not isinstance(include, str):
+ raise QAPIExprError(expr_info,
+ 'Expected a file name (string), got: %s'
+ % include)
+ incl_abs_fname = os.path.join(os.path.dirname(abs_fname),
+ include)
+ # catch inclusion cycle
+ inf = expr_info
+ while inf:
+ if incl_abs_fname == os.path.abspath(inf['file']):
+ raise QAPIExprError(expr_info, "Inclusion loop for %s"
+ % include)
+ inf = inf['parent']
+ # skip multiple include of the same file
+ if incl_abs_fname in previously_included:
+ continue
+ try:
+ fobj = open(incl_abs_fname, 'r')
+ except IOError, e:
+ raise QAPIExprError(expr_info,
+ '%s: %s' % (e.strerror, include))
+ exprs_include = QAPISchema(fobj, previously_included,
+ expr_info)
+ self.exprs.extend(exprs_include.exprs)
+ else:
+ expr_elem = {'expr': expr,
+ 'info': expr_info}
+ self.exprs.append(expr_elem)
+
+ def accept(self):
+ while True:
+ self.tok = self.src[self.cursor]
+ self.pos = self.cursor
+ self.cursor += 1
+ self.val = None
+
+ if self.tok == '#':
+ self.cursor = self.src.find('\n', self.cursor)
+ elif self.tok in ['{', '}', ':', ',', '[', ']']:
+ return
+ elif self.tok == "'":
+ string = ''
+ esc = False
+ while True:
+ ch = self.src[self.cursor]
+ self.cursor += 1
+ if ch == '\n':
+ raise QAPISchemaError(self,
+ 'Missing terminating "\'"')
+ if esc:
+ if ch == 'b':
+ string += '\b'
+ elif ch == 'f':
+ string += '\f'
+ elif ch == 'n':
+ string += '\n'
+ elif ch == 'r':
+ string += '\r'
+ elif ch == 't':
+ string += '\t'
+ elif ch == 'u':
+ value = 0
+ for x in range(0, 4):
+ ch = self.src[self.cursor]
+ self.cursor += 1
+ if ch not in "0123456789abcdefABCDEF":
+ raise QAPISchemaError(self,
+ '\\u escape needs 4 '
+ 'hex digits')
+ value = (value << 4) + int(ch, 16)
+ # If Python 2 and 3 didn't disagree so much on
+ # how to handle Unicode, then we could allow
+ # Unicode string defaults. But most of QAPI is
+ # ASCII-only, so we aren't losing much for now.
+ if not value or value > 0x7f:
+ raise QAPISchemaError(self,
+ 'For now, \\u escape '
+ 'only supports non-zero '
+ 'values up to \\u007f')
+ string += chr(value)
+ elif ch in "\\/'\"":
+ string += ch
+ else:
+ raise QAPISchemaError(self,
+ "Unknown escape \\%s" %ch)
+ esc = False
+ elif ch == "\\":
+ esc = True
+ elif ch == "'":
+ self.val = string
+ return
+ else:
+ string += ch
+ elif self.src.startswith("true", self.pos):
+ self.val = True
+ self.cursor += 3
+ return
+ elif self.src.startswith("false", self.pos):
+ self.val = False
+ self.cursor += 4
+ return
+ elif self.src.startswith("null", self.pos):
+ self.val = None
+ self.cursor += 3
+ return
+ elif self.tok == '\n':
+ if self.cursor == len(self.src):
+ self.tok = None
+ return
+ self.line += 1
+ self.line_pos = self.cursor
+ elif not self.tok.isspace():
+ raise QAPISchemaError(self, 'Stray "%s"' % self.tok)
+
+ def get_members(self):
+ expr = OrderedDict()
+ if self.tok == '}':
+ self.accept()
+ return expr
+ if self.tok != "'":
+ raise QAPISchemaError(self, 'Expected string or "}"')
+ while True:
+ key = self.val
+ self.accept()
+ if self.tok != ':':
+ raise QAPISchemaError(self, 'Expected ":"')
+ self.accept()
+ if key in expr:
+ raise QAPISchemaError(self, 'Duplicate key "%s"' % key)
+ expr[key] = self.get_expr(True)
+ if self.tok == '}':
+ self.accept()
+ return expr
+ if self.tok != ',':
+ raise QAPISchemaError(self, 'Expected "," or "}"')
+ self.accept()
+ if self.tok != "'":
+ raise QAPISchemaError(self, 'Expected string')
+
+ def get_values(self):
+ expr = []
+ if self.tok == ']':
+ self.accept()
+ return expr
+ if not self.tok in "{['tfn":
+ raise QAPISchemaError(self, 'Expected "{", "[", "]", string, '
+ 'boolean or "null"')
+ while True:
+ expr.append(self.get_expr(True))
+ if self.tok == ']':
+ self.accept()
+ return expr
+ if self.tok != ',':
+ raise QAPISchemaError(self, 'Expected "," or "]"')
+ self.accept()
+
+ def get_expr(self, nested):
+ if self.tok != '{' and not nested:
+ raise QAPISchemaError(self, 'Expected "{"')
+ if self.tok == '{':
+ self.accept()
+ expr = self.get_members()
+ elif self.tok == '[':
+ self.accept()
+ expr = self.get_values()
+ elif self.tok in "'tfn":
+ expr = self.val
+ self.accept()
+ else:
+ raise QAPISchemaError(self, 'Expected "{", "[" or string')
+ return expr
+
+#
+# Semantic analysis of schema expressions
+#
+
+def find_base_fields(base):
+ base_struct_define = find_struct(base)
+ if not base_struct_define:
+ return None
+ return base_struct_define['data']
+
+# Return the qtype of an alternate branch, or None on error.
+def find_alternate_member_qtype(qapi_type):
+ if builtin_types.has_key(qapi_type):
+ return builtin_types[qapi_type]
+ elif find_struct(qapi_type):
+ return "QTYPE_QDICT"
+ elif find_enum(qapi_type):
+ return "QTYPE_QSTRING"
+ elif find_union(qapi_type):
+ return "QTYPE_QDICT"
+ return None
+
+# Return the discriminator enum define if discriminator is specified as an
+# enum type, otherwise return None.
+def discriminator_find_enum_define(expr):
+ base = expr.get('base')
+ discriminator = expr.get('discriminator')
+
+ if not (discriminator and base):
+ return None
+
+ base_fields = find_base_fields(base)
+ if not base_fields:
+ return None
+
+ discriminator_type = base_fields.get(discriminator)
+ if not discriminator_type:
+ return None
+
+ return find_enum(discriminator_type)
+
+valid_name = re.compile('^[a-zA-Z_][a-zA-Z0-9_.-]*$')
+def check_name(expr_info, source, name, allow_optional = False,
+ enum_member = False):
+ global valid_name
+ membername = name
+
+ if not isinstance(name, str):
+ raise QAPIExprError(expr_info,
+ "%s requires a string name" % source)
+ if name.startswith('*'):
+ membername = name[1:]
+ if not allow_optional:
+ raise QAPIExprError(expr_info,
+ "%s does not allow optional name '%s'"
+ % (source, name))
+ # Enum members can start with a digit, because the generated C
+ # code always prefixes it with the enum name
+ if enum_member:
+ membername = '_' + membername
+ if not valid_name.match(membername):
+ raise QAPIExprError(expr_info,
+ "%s uses invalid name '%s'" % (source, name))
+
+def add_name(name, info, meta, implicit = False):
+ global all_names
+ check_name(info, "'%s'" % meta, name)
+ if name in all_names:
+ raise QAPIExprError(info,
+ "%s '%s' is already defined"
+ % (all_names[name], name))
+ if not implicit and name[-4:] == 'Kind':
+ raise QAPIExprError(info,
+ "%s '%s' should not end in 'Kind'"
+ % (meta, name))
+ all_names[name] = meta
+
+def add_struct(definition, info):
+ global struct_types
+ name = definition['struct']
+ add_name(name, info, 'struct')
+ struct_types.append(definition)
+
+def find_struct(name):
+ global struct_types
+ for struct in struct_types:
+ if struct['struct'] == name:
+ return struct
+ return None
+
+def add_union(definition, info):
+ global union_types
+ name = definition['union']
+ add_name(name, info, 'union')
+ union_types.append(definition)
+
+def find_union(name):
+ global union_types
+ for union in union_types:
+ if union['union'] == name:
+ return union
+ return None
+
+def add_enum(name, info, enum_values = None, implicit = False):
+ global enum_types
+ add_name(name, info, 'enum', implicit)
+ enum_types.append({"enum_name": name, "enum_values": enum_values})
+
+def find_enum(name):
+ global enum_types
+ for enum in enum_types:
+ if enum['enum_name'] == name:
+ return enum
+ return None
+
+def is_enum(name):
+ return find_enum(name) != None
+
+def check_type(expr_info, source, value, allow_array = False,
+ allow_dict = False, allow_optional = False,
+ allow_star = False, allow_metas = []):
+ global all_names
+ orig_value = value
+
+ if value is None:
+ return
+
+ if allow_star and value == '**':
+ return
+
+ # Check if array type for value is okay
+ if isinstance(value, list):
+ if not allow_array:
+ raise QAPIExprError(expr_info,
+ "%s cannot be an array" % source)
+ if len(value) != 1 or not isinstance(value[0], str):
+ raise QAPIExprError(expr_info,
+ "%s: array type must contain single type name"
+ % source)
+ value = value[0]
+ orig_value = "array of %s" %value
+
+ # Check if type name for value is okay
+ if isinstance(value, str):
+ if value == '**':
+ raise QAPIExprError(expr_info,
+ "%s uses '**' but did not request 'gen':false"
+ % source)
+ if not value in all_names:
+ raise QAPIExprError(expr_info,
+ "%s uses unknown type '%s'"
+ % (source, orig_value))
+ if not all_names[value] in allow_metas:
+ raise QAPIExprError(expr_info,
+ "%s cannot use %s type '%s'"
+ % (source, all_names[value], orig_value))
+ return
+
+ # value is a dictionary, check that each member is okay
+ if not isinstance(value, OrderedDict):
+ raise QAPIExprError(expr_info,
+ "%s should be a dictionary" % source)
+ if not allow_dict:
+ raise QAPIExprError(expr_info,
+ "%s should be a type name" % source)
+ for (key, arg) in value.items():
+ check_name(expr_info, "Member of %s" % source, key,
+ allow_optional=allow_optional)
+ # Todo: allow dictionaries to represent default values of
+ # an optional argument.
+ check_type(expr_info, "Member '%s' of %s" % (key, source), arg,
+ allow_array=True, allow_star=allow_star,
+ allow_metas=['built-in', 'union', 'alternate', 'struct',
+ 'enum'])
+
+def check_member_clash(expr_info, base_name, data, source = ""):
+ base = find_struct(base_name)
+ assert base
+ base_members = base['data']
+ for key in data.keys():
+ if key.startswith('*'):
+ key = key[1:]
+ if key in base_members or "*" + key in base_members:
+ raise QAPIExprError(expr_info,
+ "Member name '%s'%s clashes with base '%s'"
+ % (key, source, base_name))
+ if base.get('base'):
+ check_member_clash(expr_info, base['base'], data, source)
+
+def check_command(expr, expr_info):
+ name = expr['command']
+ allow_star = expr.has_key('gen')
+
+ check_type(expr_info, "'data' for command '%s'" % name,
+ expr.get('data'), allow_dict=True, allow_optional=True,
+ allow_metas=['union', 'struct'], allow_star=allow_star)
+ returns_meta = ['union', 'struct']
+ if name in returns_whitelist:
+ returns_meta += ['built-in', 'alternate', 'enum']
+ check_type(expr_info, "'returns' for command '%s'" % name,
+ expr.get('returns'), allow_array=True, allow_dict=True,
+ allow_optional=True, allow_metas=returns_meta,
+ allow_star=allow_star)
+
+def check_event(expr, expr_info):
+ global events
+ name = expr['event']
+ params = expr.get('data')
+
+ if name.upper() == 'MAX':
+ raise QAPIExprError(expr_info, "Event name 'MAX' cannot be created")
+ events.append(name)
+ check_type(expr_info, "'data' for event '%s'" % name,
+ expr.get('data'), allow_dict=True, allow_optional=True,
+ allow_metas=['union', 'struct'])
+
+def check_union(expr, expr_info):
+ name = expr['union']
+ base = expr.get('base')
+ discriminator = expr.get('discriminator')
+ members = expr['data']
+ values = { 'MAX': '(automatic)' }
+
+ # If the object has a member 'base', its value must name a struct,
+ # and there must be a discriminator.
+ if base is not None:
+ if discriminator is None:
+ raise QAPIExprError(expr_info,
+ "Union '%s' requires a discriminator to go "
+ "along with base" %name)
+
+ # Two types of unions, determined by discriminator.
+
+ # With no discriminator it is a simple union.
+ if discriminator is None:
+ enum_define = None
+ allow_metas=['built-in', 'union', 'alternate', 'struct', 'enum']
+ if base is not None:
+ raise QAPIExprError(expr_info,
+ "Simple union '%s' must not have a base"
+ % name)
+
+ # Else, it's a flat union.
+ else:
+ # The object must have a string member 'base'.
+ if not isinstance(base, str):
+ raise QAPIExprError(expr_info,
+ "Flat union '%s' must have a string base field"
+ % name)
+ base_fields = find_base_fields(base)
+ if not base_fields:
+ raise QAPIExprError(expr_info,
+ "Base '%s' is not a valid struct"
+ % base)
+
+ # The value of member 'discriminator' must name a non-optional
+ # member of the base struct.
+ check_name(expr_info, "Discriminator of flat union '%s'" % name,
+ discriminator)
+ discriminator_type = base_fields.get(discriminator)
+ if not discriminator_type:
+ raise QAPIExprError(expr_info,
+ "Discriminator '%s' is not a member of base "
+ "struct '%s'"
+ % (discriminator, base))
+ enum_define = find_enum(discriminator_type)
+ allow_metas=['struct']
+ # Do not allow string discriminator
+ if not enum_define:
+ raise QAPIExprError(expr_info,
+ "Discriminator '%s' must be of enumeration "
+ "type" % discriminator)
+
+ # Check every branch
+ for (key, value) in members.items():
+ check_name(expr_info, "Member of union '%s'" % name, key)
+
+ # Each value must name a known type; furthermore, in flat unions,
+ # branches must be a struct with no overlapping member names
+ check_type(expr_info, "Member '%s' of union '%s'" % (key, name),
+ value, allow_array=not base, allow_metas=allow_metas)
+ if base:
+ branch_struct = find_struct(value)
+ assert branch_struct
+ check_member_clash(expr_info, base, branch_struct['data'],
+ " of branch '%s'" % key)
+
+ # If the discriminator names an enum type, then all members
+ # of 'data' must also be members of the enum type.
+ if enum_define:
+ if not key in enum_define['enum_values']:
+ raise QAPIExprError(expr_info,
+ "Discriminator value '%s' is not found in "
+ "enum '%s'" %
+ (key, enum_define["enum_name"]))
+
+ # Otherwise, check for conflicts in the generated enum
+ else:
+ c_key = camel_to_upper(key)
+ if c_key in values:
+ raise QAPIExprError(expr_info,
+ "Union '%s' member '%s' clashes with '%s'"
+ % (name, key, values[c_key]))
+ values[c_key] = key
+
+def check_alternate(expr, expr_info):
+ name = expr['alternate']
+ members = expr['data']
+ values = { 'MAX': '(automatic)' }
+ types_seen = {}
+
+ # Check every branch
+ for (key, value) in members.items():
+ check_name(expr_info, "Member of alternate '%s'" % name, key)
+
+ # Check for conflicts in the generated enum
+ c_key = camel_to_upper(key)
+ if c_key in values:
+ raise QAPIExprError(expr_info,
+ "Alternate '%s' member '%s' clashes with '%s'"
+ % (name, key, values[c_key]))
+ values[c_key] = key
+
+ # Ensure alternates have no type conflicts.
+ check_type(expr_info, "Member '%s' of alternate '%s'" % (key, name),
+ value,
+ allow_metas=['built-in', 'union', 'struct', 'enum'])
+ qtype = find_alternate_member_qtype(value)
+ assert qtype
+ if qtype in types_seen:
+ raise QAPIExprError(expr_info,
+ "Alternate '%s' member '%s' can't "
+ "be distinguished from member '%s'"
+ % (name, key, types_seen[qtype]))
+ types_seen[qtype] = key
+
+def check_enum(expr, expr_info):
+ name = expr['enum']
+ members = expr.get('data')
+ values = { 'MAX': '(automatic)' }
+
+ if not isinstance(members, list):
+ raise QAPIExprError(expr_info,
+ "Enum '%s' requires an array for 'data'" % name)
+ for member in members:
+ check_name(expr_info, "Member of enum '%s'" %name, member,
+ enum_member=True)
+ key = camel_to_upper(member)
+ if key in values:
+ raise QAPIExprError(expr_info,
+ "Enum '%s' member '%s' clashes with '%s'"
+ % (name, member, values[key]))
+ values[key] = member
+
+def check_struct(expr, expr_info):
+ name = expr['struct']
+ members = expr['data']
+
+ check_type(expr_info, "'data' for struct '%s'" % name, members,
+ allow_dict=True, allow_optional=True)
+ check_type(expr_info, "'base' for struct '%s'" % name, expr.get('base'),
+ allow_metas=['struct'])
+ if expr.get('base'):
+ check_member_clash(expr_info, expr['base'], expr['data'])
+
+def check_keys(expr_elem, meta, required, optional=[]):
+ expr = expr_elem['expr']
+ info = expr_elem['info']
+ name = expr[meta]
+ if not isinstance(name, str):
+ raise QAPIExprError(info,
+ "'%s' key must have a string value" % meta)
+ required = required + [ meta ]
+ for (key, value) in expr.items():
+ if not key in required and not key in optional:
+ raise QAPIExprError(info,
+ "Unknown key '%s' in %s '%s'"
+ % (key, meta, name))
+ if (key == 'gen' or key == 'success-response') and value != False:
+ raise QAPIExprError(info,
+ "'%s' of %s '%s' should only use false value"
+ % (key, meta, name))
+ for key in required:
+ if not expr.has_key(key):
+ raise QAPIExprError(info,
+ "Key '%s' is missing from %s '%s'"
+ % (key, meta, name))
+
+def check_exprs(exprs):
+ global all_names
+
+ # Learn the types and check for valid expression keys
+ for builtin in builtin_types.keys():
+ all_names[builtin] = 'built-in'
+ for expr_elem in exprs:
+ expr = expr_elem['expr']
+ info = expr_elem['info']
+ if expr.has_key('enum'):
+ check_keys(expr_elem, 'enum', ['data'])
+ add_enum(expr['enum'], info, expr['data'])
+ elif expr.has_key('union'):
+ check_keys(expr_elem, 'union', ['data'],
+ ['base', 'discriminator'])
+ add_union(expr, info)
+ elif expr.has_key('alternate'):
+ check_keys(expr_elem, 'alternate', ['data'])
+ add_name(expr['alternate'], info, 'alternate')
+ elif expr.has_key('struct'):
+ check_keys(expr_elem, 'struct', ['data'], ['base'])
+ add_struct(expr, info)
+ elif expr.has_key('command'):
+ check_keys(expr_elem, 'command', [],
+ ['data', 'returns', 'gen', 'success-response'])
+ add_name(expr['command'], info, 'command')
+ elif expr.has_key('event'):
+ check_keys(expr_elem, 'event', [], ['data'])
+ add_name(expr['event'], info, 'event')
+ else:
+ raise QAPIExprError(expr_elem['info'],
+ "Expression is missing metatype")
+
+ # Try again for hidden UnionKind enum
+ for expr_elem in exprs:
+ expr = expr_elem['expr']
+ if expr.has_key('union'):
+ if not discriminator_find_enum_define(expr):
+ add_enum('%sKind' % expr['union'], expr_elem['info'],
+ implicit=True)
+ elif expr.has_key('alternate'):
+ add_enum('%sKind' % expr['alternate'], expr_elem['info'],
+ implicit=True)
+
+ # Validate that exprs make sense
+ for expr_elem in exprs:
+ expr = expr_elem['expr']
+ info = expr_elem['info']
+
+ if expr.has_key('enum'):
+ check_enum(expr, info)
+ elif expr.has_key('union'):
+ check_union(expr, info)
+ elif expr.has_key('alternate'):
+ check_alternate(expr, info)
+ elif expr.has_key('struct'):
+ check_struct(expr, info)
+ elif expr.has_key('command'):
+ check_command(expr, info)
+ elif expr.has_key('event'):
+ check_event(expr, info)
+ else:
+ assert False, 'unexpected meta type'
+
+ return map(lambda expr_elem: expr_elem['expr'], exprs)
+
+def parse_schema(fname):
+ try:
+ schema = QAPISchema(open(fname, "r"))
+ return check_exprs(schema.exprs)
+ except (QAPISchemaError, QAPIExprError), e:
+ print >>sys.stderr, e
+ exit(1)
+
+#
+# Code generation helpers
+#
+
+def parse_args(typeinfo):
+ if isinstance(typeinfo, str):
+ struct = find_struct(typeinfo)
+ assert struct != None
+ typeinfo = struct['data']
+
+ for member in typeinfo:
+ argname = member
+ argentry = typeinfo[member]
+ optional = False
+ if member.startswith('*'):
+ argname = member[1:]
+ optional = True
+ # Todo: allow argentry to be OrderedDict, for providing the
+ # value of an optional argument.
+ yield (argname, argentry, optional)
+
+def camel_case(name):
+ new_name = ''
+ first = True
+ for ch in name:
+ if ch in ['_', '-']:
+ first = True
+ elif first:
+ new_name += ch.upper()
+ first = False
+ else:
+ new_name += ch.lower()
+ return new_name
+
+# ENUMName -> ENUM_NAME, EnumName1 -> ENUM_NAME1
+# ENUM_NAME -> ENUM_NAME, ENUM_NAME1 -> ENUM_NAME1, ENUM_Name2 -> ENUM_NAME2
+# ENUM24_Name -> ENUM24_NAME
+def camel_to_upper(value):
+ c_fun_str = c_name(value, False)
+ if value.isupper():
+ return c_fun_str
+
+ new_name = ''
+ l = len(c_fun_str)
+ for i in range(l):
+ c = c_fun_str[i]
+ # When c is upper and no "_" appears before, do more checks
+ if c.isupper() and (i > 0) and c_fun_str[i - 1] != "_":
+ # Case 1: next string is lower
+ # Case 2: previous string is digit
+ if (i < (l - 1) and c_fun_str[i + 1].islower()) or \
+ c_fun_str[i - 1].isdigit():
+ new_name += '_'
+ new_name += c
+ return new_name.lstrip('_').upper()
+
+def c_enum_const(type_name, const_name):
+ return camel_to_upper(type_name + '_' + const_name)
+
+c_name_trans = string.maketrans('.-', '__')
+
+# Map @name to a valid C identifier.
+# If @protect, avoid returning certain ticklish identifiers (like
+# C keywords) by prepending "q_".
+#
+# Used for converting 'name' from a 'name':'type' qapi definition
+# into a generated struct member, as well as converting type names
+# into substrings of a generated C function name.
+# '__a.b_c' -> '__a_b_c', 'x-foo' -> 'x_foo'
+# protect=True: 'int' -> 'q_int'; protect=False: 'int' -> 'int'
+def c_name(name, protect=True):
+ # ANSI X3J11/88-090, 3.1.1
+ c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
+ 'default', 'do', 'double', 'else', 'enum', 'extern', 'float',
+ 'for', 'goto', 'if', 'int', 'long', 'register', 'return',
+ 'short', 'signed', 'sizeof', 'static', 'struct', 'switch',
+ 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'])
+ # ISO/IEC 9899:1999, 6.4.1
+ c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
+ # ISO/IEC 9899:2011, 6.4.1
+ c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn',
+ '_Static_assert', '_Thread_local'])
+ # GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
+ # excluding _.*
+ gcc_words = set(['asm', 'typeof'])
+ # C++ ISO/IEC 14882:2003 2.11
+ cpp_words = set(['bool', 'catch', 'class', 'const_cast', 'delete',
+ 'dynamic_cast', 'explicit', 'false', 'friend', 'mutable',
+ 'namespace', 'new', 'operator', 'private', 'protected',
+ 'public', 'reinterpret_cast', 'static_cast', 'template',
+ 'this', 'throw', 'true', 'try', 'typeid', 'typename',
+ 'using', 'virtual', 'wchar_t',
+ # alternative representations
+ 'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not',
+ 'not_eq', 'or', 'or_eq', 'xor', 'xor_eq'])
+ # namespace pollution:
+ polluted_words = set(['unix', 'errno'])
+ if protect and (name in c89_words | c99_words | c11_words | gcc_words | cpp_words | polluted_words):
+ return "q_" + name
+ return name.translate(c_name_trans)
+
+# Map type @name to the C typedef name for the list form.
+#
+# ['Name'] -> 'NameList', ['x-Foo'] -> 'x_FooList', ['int'] -> 'intList'
+def c_list_type(name):
+ return type_name(name) + 'List'
+
+# Map type @value to the C typedef form.
+#
+# Used for converting 'type' from a 'member':'type' qapi definition
+# into the alphanumeric portion of the type for a generated C parameter,
+# as well as generated C function names. See c_type() for the rest of
+# the conversion such as adding '*' on pointer types.
+# 'int' -> 'int', '[x-Foo]' -> 'x_FooList', '__a.b_c' -> '__a_b_c'
+def type_name(value):
+ if type(value) == list:
+ return c_list_type(value[0])
+ if value in builtin_types.keys():
+ return value
+ return c_name(value)
+
+eatspace = '\033EATSPACE.'
+pointer_suffix = ' *' + eatspace
+
+# Map type @name to its C type expression.
+# If @is_param, const-qualify the string type.
+#
+# This function is used for computing the full C type of 'member':'name'.
+# A special suffix is added in c_type() for pointer types, and it's
+# stripped in mcgen(). So please notice this when you check the return
+# value of c_type() outside mcgen().
+def c_type(value, is_param=False):
+ if value == 'str':
+ if is_param:
+ return 'const char' + pointer_suffix
+ return 'char' + pointer_suffix
+
+ elif value == 'int':
+ return 'int64_t'
+ elif (value == 'int8' or value == 'int16' or value == 'int32' or
+ value == 'int64' or value == 'uint8' or value == 'uint16' or
+ value == 'uint32' or value == 'uint64'):
+ return value + '_t'
+ elif value == 'size':
+ return 'uint64_t'
+ elif value == 'bool':
+ return 'bool'
+ elif value == 'number':
+ return 'double'
+ elif type(value) == list:
+ return c_list_type(value[0]) + pointer_suffix
+ elif is_enum(value):
+ return c_name(value)
+ elif value == None:
+ return 'void'
+ elif value in events:
+ return camel_case(value) + 'Event' + pointer_suffix
+ else:
+ # complex type name
+ assert isinstance(value, str) and value != ""
+ return c_name(value) + pointer_suffix
+
+def is_c_ptr(value):
+ return c_type(value).endswith(pointer_suffix)
+
+def genindent(count):
+ ret = ""
+ for i in range(count):
+ ret += " "
+ return ret
+
+indent_level = 0
+
+def push_indent(indent_amount=4):
+ global indent_level
+ indent_level += indent_amount
+
+def pop_indent(indent_amount=4):
+ global indent_level
+ indent_level -= indent_amount
+
+def cgen(code, **kwds):
+ indent = genindent(indent_level)
+ lines = code.split('\n')
+ lines = map(lambda x: indent + x, lines)
+ return '\n'.join(lines) % kwds + '\n'
+
+def mcgen(code, **kwds):
+ raw = cgen('\n'.join(code.split('\n')[1:-1]), **kwds)
+ return re.sub(re.escape(eatspace) + ' *', '', raw)
+
+def basename(filename):
+ return filename.split("/")[-1]
+
+def guardname(filename):
+ guard = basename(filename).rsplit(".", 1)[0]
+ for substr in [".", " ", "-"]:
+ guard = guard.replace(substr, "_")
+ return guard.upper() + '_H'
+
+def guardstart(name):
+ return mcgen('''
+
+#ifndef %(name)s
+#define %(name)s
+
+''',
+ name=guardname(name))
+
+def guardend(name):
+ return mcgen('''
+
+#endif /* %(name)s */
+
+''',
+ name=guardname(name))
+
+#
+# Common command line parsing
+#
+
+def parse_command_line(extra_options = "", extra_long_options = []):
+
+ try:
+ opts, args = getopt.gnu_getopt(sys.argv[1:],
+ "chp:o:" + extra_options,
+ ["source", "header", "prefix=",
+ "output-dir="] + extra_long_options)
+ except getopt.GetoptError, err:
+ print >>sys.stderr, "%s: %s" % (sys.argv[0], str(err))
+ sys.exit(1)
+
+ output_dir = ""
+ prefix = ""
+ do_c = False
+ do_h = False
+ extra_opts = []
+
+ for oa in opts:
+ o, a = oa
+ if o in ("-p", "--prefix"):
+ prefix = a
+ elif o in ("-o", "--output-dir"):
+ output_dir = a + "/"
+ elif o in ("-c", "--source"):
+ do_c = True
+ elif o in ("-h", "--header"):
+ do_h = True
+ else:
+ extra_opts.append(oa)
+
+ if not do_c and not do_h:
+ do_c = True
+ do_h = True
+
+ if len(args) != 1:
+ print >>sys.stderr, "%s: need exactly one argument" % sys.argv[0]
+ sys.exit(1)
+ fname = args[0]
+
+ return (fname, output_dir, do_c, do_h, prefix, extra_opts)
+
+#
+# Generate output files with boilerplate
+#
+
+def open_output(output_dir, do_c, do_h, prefix, c_file, h_file,
+ c_comment, h_comment):
+ c_file = output_dir + prefix + c_file
+ h_file = output_dir + prefix + h_file
+
+ try:
+ os.makedirs(output_dir)
+ except os.error, e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def maybe_open(really, name, opt):
+ if really:
+ return open(name, opt)
+ else:
+ import StringIO
+ return StringIO.StringIO()
+
+ fdef = maybe_open(do_c, c_file, 'w')
+ fdecl = maybe_open(do_h, h_file, 'w')
+
+ fdef.write(mcgen('''
+/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
+%(comment)s
+''',
+ comment = c_comment))
+
+ fdecl.write(mcgen('''
+/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
+%(comment)s
+#ifndef %(guard)s
+#define %(guard)s
+
+''',
+ comment = h_comment, guard = guardname(h_file)))
+
+ return (fdef, fdecl)
+
+def close_output(fdef, fdecl):
+ fdecl.write('''
+#endif
+''')
+ fdecl.close()
+ fdef.close()
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
new file mode 100644
index 00000000..289b1a39
--- /dev/null
+++ b/scripts/qemu-binfmt-conf.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390 program execution by the kernel
+
+# load the binfmt_misc module
+if [ ! -d /proc/sys/fs/binfmt_misc ]; then
+ /sbin/modprobe binfmt_misc
+fi
+if [ ! -f /proc/sys/fs/binfmt_misc/register ]; then
+ mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
+fi
+
+# probe cpu type
+cpu=`uname -m`
+case "$cpu" in
+ i386|i486|i586|i686|i86pc|BePC|x86_64)
+ cpu="i386"
+ ;;
+ m68k)
+ cpu="m68k"
+ ;;
+ mips*)
+ cpu="mips"
+ ;;
+ "Power Macintosh"|ppc|ppc64)
+ cpu="ppc"
+ ;;
+ armv[4-9]*)
+ cpu="arm"
+ ;;
+esac
+
+# register the interpreter for each cpu except for the native one
+if [ $cpu != "i386" ] ; then
+ echo ':i386:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-i386:' > /proc/sys/fs/binfmt_misc/register
+ echo ':i486:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06\x00:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-i386:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "alpha" ] ; then
+ echo ':alpha:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x26\x90:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-alpha:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "arm" ] ; then
+ echo ':arm:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-arm:' > /proc/sys/fs/binfmt_misc/register
+ echo ':armeb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-armeb:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "aarch64" ] ; then
+ echo ':aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-aarch64:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "sparc" ] ; then
+ echo ':sparc:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sparc:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "ppc" ] ; then
+ echo ':ppc:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x14:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-ppc:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "m68k" ] ; then
+ echo 'Please check cpu value and header information for m68k!'
+ echo ':m68k:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x04:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-m68k:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "mips" ] ; then
+ # FIXME: We could use the other endianness on a MIPS host.
+ echo ':mips:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-mips:' > /proc/sys/fs/binfmt_misc/register
+ echo ':mipsel:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-mipsel:' > /proc/sys/fs/binfmt_misc/register
+ echo ':mipsn32:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-mipsn32:' > /proc/sys/fs/binfmt_misc/register
+ echo ':mipsn32el:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-mipsn32el:' > /proc/sys/fs/binfmt_misc/register
+ echo ':mips64:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-mips64:' > /proc/sys/fs/binfmt_misc/register
+ echo ':mips64el:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-mips64el:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "sh" ] ; then
+ echo ':sh4:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-sh4:' > /proc/sys/fs/binfmt_misc/register
+ echo ':sh4eb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sh4eb:' > /proc/sys/fs/binfmt_misc/register
+fi
+if [ $cpu != "s390x" ] ; then
+ echo ':s390x:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-s390x:' > /proc/sys/fs/binfmt_misc/register
+fi
diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py
new file mode 100644
index 00000000..6c7f4fbe
--- /dev/null
+++ b/scripts/qemu-gdb.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+
+# GDB debugging support
+#
+# Copyright 2012 Red Hat, Inc. and/or its affiliates
+#
+# Authors:
+# Avi Kivity <avi@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Contributions after 2012-01-13 are licensed under the terms of the
+# GNU GPL, version 2 or (at your option) any later version.
+
+
+import gdb
+
+def isnull(ptr):
+ return ptr == gdb.Value(0).cast(ptr.type)
+
+def int128(p):
+ return long(p['lo']) + (long(p['hi']) << 64)
+
+def get_fs_base():
+ '''Fetch %fs base value using arch_prctl(ARCH_GET_FS)'''
+ # %rsp - 120 is scratch space according to the SystemV ABI
+ old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
+ gdb.execute('call arch_prctl(0x1003, $rsp - 120)', False, True)
+ fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
+ gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
+ return fs_base
+
+def get_glibc_pointer_guard():
+ '''Fetch glibc pointer guard value'''
+ fs_base = get_fs_base()
+ return gdb.parse_and_eval('*(uint64_t*)((uint64_t)%s + 0x30)' % fs_base)
+
+def glibc_ptr_demangle(val, pointer_guard):
+ '''Undo effect of glibc's PTR_MANGLE()'''
+ return gdb.parse_and_eval('(((uint64_t)%s >> 0x11) | ((uint64_t)%s << (64 - 0x11))) ^ (uint64_t)%s' % (val, val, pointer_guard))
+
+def bt_jmpbuf(jmpbuf):
+ '''Backtrace a jmpbuf'''
+ JB_RBX = 0
+ JB_RBP = 1
+ JB_R12 = 2
+ JB_R13 = 3
+ JB_R14 = 4
+ JB_R15 = 5
+ JB_RSP = 6
+ JB_PC = 7
+
+ old_rbx = gdb.parse_and_eval('(uint64_t)$rbx')
+ old_rbp = gdb.parse_and_eval('(uint64_t)$rbp')
+ old_rsp = gdb.parse_and_eval('(uint64_t)$rsp')
+ old_r12 = gdb.parse_and_eval('(uint64_t)$r12')
+ old_r13 = gdb.parse_and_eval('(uint64_t)$r13')
+ old_r14 = gdb.parse_and_eval('(uint64_t)$r14')
+ old_r15 = gdb.parse_and_eval('(uint64_t)$r15')
+ old_rip = gdb.parse_and_eval('(uint64_t)$rip')
+
+ pointer_guard = get_glibc_pointer_guard()
+ gdb.execute('set $rbx = %s' % jmpbuf[JB_RBX])
+ gdb.execute('set $rbp = %s' % glibc_ptr_demangle(jmpbuf[JB_RBP], pointer_guard))
+ gdb.execute('set $rsp = %s' % glibc_ptr_demangle(jmpbuf[JB_RSP], pointer_guard))
+ gdb.execute('set $r12 = %s' % jmpbuf[JB_R12])
+ gdb.execute('set $r13 = %s' % jmpbuf[JB_R13])
+ gdb.execute('set $r14 = %s' % jmpbuf[JB_R14])
+ gdb.execute('set $r15 = %s' % jmpbuf[JB_R15])
+ gdb.execute('set $rip = %s' % glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard))
+
+ gdb.execute('bt')
+
+ gdb.execute('set $rbx = %s' % old_rbx)
+ gdb.execute('set $rbp = %s' % old_rbp)
+ gdb.execute('set $rsp = %s' % old_rsp)
+ gdb.execute('set $r12 = %s' % old_r12)
+ gdb.execute('set $r13 = %s' % old_r13)
+ gdb.execute('set $r14 = %s' % old_r14)
+ gdb.execute('set $r15 = %s' % old_r15)
+ gdb.execute('set $rip = %s' % old_rip)
+
+class QemuCommand(gdb.Command):
+ '''Prefix for QEMU debug support commands'''
+ def __init__(self):
+ gdb.Command.__init__(self, 'qemu', gdb.COMMAND_DATA,
+ gdb.COMPLETE_NONE, True)
+
+class CoroutineCommand(gdb.Command):
+ '''Display coroutine backtrace'''
+ def __init__(self):
+ gdb.Command.__init__(self, 'qemu coroutine', gdb.COMMAND_DATA,
+ gdb.COMPLETE_NONE)
+
+ def invoke(self, arg, from_tty):
+ argv = gdb.string_to_argv(arg)
+ if len(argv) != 1:
+ gdb.write('usage: qemu coroutine <coroutine-pointer>\n')
+ return
+
+ coroutine_pointer = gdb.parse_and_eval(argv[0]).cast(gdb.lookup_type('CoroutineUContext').pointer())
+ bt_jmpbuf(coroutine_pointer['env']['__jmpbuf'])
+
+class MtreeCommand(gdb.Command):
+ '''Display the memory tree hierarchy'''
+ def __init__(self):
+ gdb.Command.__init__(self, 'qemu mtree', gdb.COMMAND_DATA,
+ gdb.COMPLETE_NONE)
+ self.queue = []
+ def invoke(self, arg, from_tty):
+ self.seen = set()
+ self.queue_root('address_space_memory')
+ self.queue_root('address_space_io')
+ self.process_queue()
+ def queue_root(self, varname):
+ ptr = gdb.parse_and_eval(varname)['root']
+ self.queue.append(ptr)
+ def process_queue(self):
+ while self.queue:
+ ptr = self.queue.pop(0)
+ if long(ptr) in self.seen:
+ continue
+ self.print_item(ptr)
+ def print_item(self, ptr, offset = gdb.Value(0), level = 0):
+ self.seen.add(long(ptr))
+ addr = ptr['addr']
+ addr += offset
+ size = int128(ptr['size'])
+ alias = ptr['alias']
+ klass = ''
+ if not isnull(alias):
+ klass = ' (alias)'
+ elif not isnull(ptr['ops']):
+ klass = ' (I/O)'
+ elif bool(ptr['ram']):
+ klass = ' (RAM)'
+ gdb.write('%s%016x-%016x %s%s (@ %s)\n'
+ % (' ' * level,
+ long(addr),
+ long(addr + (size - 1)),
+ ptr['name'].string(),
+ klass,
+ ptr,
+ ),
+ gdb.STDOUT)
+ if not isnull(alias):
+ gdb.write('%s alias: %s@%016x (@ %s)\n' %
+ (' ' * level,
+ alias['name'].string(),
+ ptr['alias_offset'],
+ alias,
+ ),
+ gdb.STDOUT)
+ self.queue.append(alias)
+ subregion = ptr['subregions']['tqh_first']
+ level += 1
+ while not isnull(subregion):
+ self.print_item(subregion, addr, level)
+ subregion = subregion['subregions_link']['tqe_next']
+
+QemuCommand()
+CoroutineCommand()
+MtreeCommand()
diff --git a/scripts/qemu-guest-agent/fsfreeze-hook b/scripts/qemu-guest-agent/fsfreeze-hook
new file mode 100755
index 00000000..c27b29f2
--- /dev/null
+++ b/scripts/qemu-guest-agent/fsfreeze-hook
@@ -0,0 +1,33 @@
+#!/bin/sh
+
+# This script is executed when a guest agent receives fsfreeze-freeze and
+# fsfreeze-thaw command, if it is specified in --fsfreeze-hook (-F)
+# option of qemu-ga or placed in default path (/etc/qemu/fsfreeze-hook).
+# When the agent receives fsfreeze-freeze request, this script is issued with
+# "freeze" argument before the filesystem is frozen. And for fsfreeze-thaw
+# request, it is issued with "thaw" argument after filesystem is thawed.
+
+LOGFILE=/var/log/qga-fsfreeze-hook.log
+FSFREEZE_D=$(dirname -- "$0")/fsfreeze-hook.d
+
+# Check whether file $1 is a backup or rpm-generated file and should be ignored
+is_ignored_file() {
+ case "$1" in
+ *~ | *.bak | *.orig | *.rpmnew | *.rpmorig | *.rpmsave | *.sample)
+ return 0 ;;
+ esac
+ return 1
+}
+
+# Iterate executables in directory "fsfreeze-hook.d" with the specified args
+[ ! -d "$FSFREEZE_D" ] && exit 0
+for file in "$FSFREEZE_D"/* ; do
+ is_ignored_file "$file" && continue
+ [ -x "$file" ] || continue
+ printf "$(date): execute $file $@\n" >>$LOGFILE
+ "$file" "$@" >>$LOGFILE 2>&1
+ STATUS=$?
+ printf "$(date): $file finished with status=$STATUS\n" >>$LOGFILE
+done
+
+exit 0
diff --git a/scripts/qemu-guest-agent/fsfreeze-hook.d/mysql-flush.sh.sample b/scripts/qemu-guest-agent/fsfreeze-hook.d/mysql-flush.sh.sample
new file mode 100755
index 00000000..2b4fa3ae
--- /dev/null
+++ b/scripts/qemu-guest-agent/fsfreeze-hook.d/mysql-flush.sh.sample
@@ -0,0 +1,56 @@
+#!/bin/sh
+
+# Flush MySQL tables to the disk before the filesystem is frozen.
+# At the same time, this keeps a read lock in order to avoid write accesses
+# from the other clients until the filesystem is thawed.
+
+MYSQL="/usr/bin/mysql"
+MYSQL_OPTS="-uroot" #"-prootpassword"
+FIFO=/var/run/mysql-flush.fifo
+
+# Check mysql is installed and the server running
+[ -x "$MYSQL" ] && "$MYSQL" $MYSQL_OPTS < /dev/null || exit 0
+
+flush_and_wait() {
+ printf "FLUSH TABLES WITH READ LOCK \\G\n"
+ trap 'printf "$(date): $0 is killed\n">&2' HUP INT QUIT ALRM TERM
+ read < $FIFO
+ printf "UNLOCK TABLES \\G\n"
+ rm -f $FIFO
+}
+
+case "$1" in
+ freeze)
+ mkfifo $FIFO || exit 1
+ flush_and_wait | "$MYSQL" $MYSQL_OPTS &
+ # wait until every block is flushed
+ while [ "$(echo 'SHOW STATUS LIKE "Key_blocks_not_flushed"' |\
+ "$MYSQL" $MYSQL_OPTS | tail -1 | cut -f 2)" -gt 0 ]; do
+ sleep 1
+ done
+ # for InnoDB, wait until every log is flushed
+ INNODB_STATUS=$(mktemp /tmp/mysql-flush.XXXXXX)
+ [ $? -ne 0 ] && exit 2
+ trap "rm -f $INNODB_STATUS; exit 1" HUP INT QUIT ALRM TERM
+ while :; do
+ printf "SHOW ENGINE INNODB STATUS \\G" |\
+ "$MYSQL" $MYSQL_OPTS > $INNODB_STATUS
+ LOG_CURRENT=$(grep 'Log sequence number' $INNODB_STATUS |\
+ tr -s ' ' | cut -d' ' -f4)
+ LOG_FLUSHED=$(grep 'Log flushed up to' $INNODB_STATUS |\
+ tr -s ' ' | cut -d' ' -f5)
+ [ "$LOG_CURRENT" = "$LOG_FLUSHED" ] && break
+ sleep 1
+ done
+ rm -f $INNODB_STATUS
+ ;;
+
+ thaw)
+ [ ! -p $FIFO ] && exit 1
+ echo > $FIFO
+ ;;
+
+ *)
+ exit 1
+ ;;
+esac
diff --git a/scripts/qmp/qemu-ga-client b/scripts/qmp/qemu-ga-client
new file mode 100755
index 00000000..9908f210
--- /dev/null
+++ b/scripts/qmp/qemu-ga-client
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+
+# QEMU Guest Agent Client
+#
+# Copyright (C) 2012 Ryota Ozaki <ozaki.ryota@gmail.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Usage:
+#
+# Start QEMU with:
+#
+# # qemu [...] -chardev socket,path=/tmp/qga.sock,server,nowait,id=qga0 \
+# -device virtio-serial -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0
+#
+# Run the script:
+#
+# $ qemu-ga-client --address=/tmp/qga.sock <command> [args...]
+#
+# or
+#
+# $ export QGA_CLIENT_ADDRESS=/tmp/qga.sock
+# $ qemu-ga-client <command> [args...]
+#
+# For example:
+#
+# $ qemu-ga-client cat /etc/resolv.conf
+# # Generated by NetworkManager
+# nameserver 10.0.2.3
+# $ qemu-ga-client fsfreeze status
+# thawed
+# $ qemu-ga-client fsfreeze freeze
+# 2 filesystems frozen
+#
+# See also: http://wiki.qemu-project.org/Features/QAPI/GuestAgent
+#
+
+import base64
+import random
+
+import qmp
+
+
+class QemuGuestAgent(qmp.QEMUMonitorProtocol):
+ def __getattr__(self, name):
+ def wrapper(**kwds):
+ return self.command('guest-' + name.replace('_', '-'), **kwds)
+ return wrapper
+
+
+class QemuGuestAgentClient:
+ error = QemuGuestAgent.error
+
+ def __init__(self, address):
+ self.qga = QemuGuestAgent(address)
+ self.qga.connect(negotiate=False)
+
+ def sync(self, timeout=3):
+ # Avoid being blocked forever
+ if not self.ping(timeout):
+ raise EnvironmentError('Agent seems not alive')
+ uid = random.randint(0, (1 << 32) - 1)
+ while True:
+ ret = self.qga.sync(id=uid)
+ if isinstance(ret, int) and int(ret) == uid:
+ break
+
+ def __file_read_all(self, handle):
+ eof = False
+ data = ''
+ while not eof:
+ ret = self.qga.file_read(handle=handle, count=1024)
+ _data = base64.b64decode(ret['buf-b64'])
+ data += _data
+ eof = ret['eof']
+ return data
+
+ def read(self, path):
+ handle = self.qga.file_open(path=path)
+ try:
+ data = self.__file_read_all(handle)
+ finally:
+ self.qga.file_close(handle=handle)
+ return data
+
+ def info(self):
+ info = self.qga.info()
+
+ msgs = []
+ msgs.append('version: ' + info['version'])
+ msgs.append('supported_commands:')
+ enabled = [c['name'] for c in info['supported_commands'] if c['enabled']]
+ msgs.append('\tenabled: ' + ', '.join(enabled))
+ disabled = [c['name'] for c in info['supported_commands'] if not c['enabled']]
+ msgs.append('\tdisabled: ' + ', '.join(disabled))
+
+ return '\n'.join(msgs)
+
+ def __gen_ipv4_netmask(self, prefixlen):
+ mask = int('1' * prefixlen + '0' * (32 - prefixlen), 2)
+ return '.'.join([str(mask >> 24),
+ str((mask >> 16) & 0xff),
+ str((mask >> 8) & 0xff),
+ str(mask & 0xff)])
+
+ def ifconfig(self):
+ nifs = self.qga.network_get_interfaces()
+
+ msgs = []
+ for nif in nifs:
+ msgs.append(nif['name'] + ':')
+ if 'ip-addresses' in nif:
+ for ipaddr in nif['ip-addresses']:
+ if ipaddr['ip-address-type'] == 'ipv4':
+ addr = ipaddr['ip-address']
+ mask = self.__gen_ipv4_netmask(int(ipaddr['prefix']))
+ msgs.append("\tinet %s netmask %s" % (addr, mask))
+ elif ipaddr['ip-address-type'] == 'ipv6':
+ addr = ipaddr['ip-address']
+ prefix = ipaddr['prefix']
+ msgs.append("\tinet6 %s prefixlen %s" % (addr, prefix))
+ if nif['hardware-address'] != '00:00:00:00:00:00':
+ msgs.append("\tether " + nif['hardware-address'])
+
+ return '\n'.join(msgs)
+
+ def ping(self, timeout):
+ self.qga.settimeout(timeout)
+ try:
+ self.qga.ping()
+ except self.qga.timeout:
+ return False
+ return True
+
+ def fsfreeze(self, cmd):
+ if cmd not in ['status', 'freeze', 'thaw']:
+ raise StandardError('Invalid command: ' + cmd)
+
+ return getattr(self.qga, 'fsfreeze' + '_' + cmd)()
+
+ def fstrim(self, minimum=0):
+ return getattr(self.qga, 'fstrim')(minimum=minimum)
+
+ def suspend(self, mode):
+ if mode not in ['disk', 'ram', 'hybrid']:
+ raise StandardError('Invalid mode: ' + mode)
+
+ try:
+ getattr(self.qga, 'suspend' + '_' + mode)()
+ # On error exception will raise
+ except self.qga.timeout:
+ # On success command will timed out
+ return
+
+ def shutdown(self, mode='powerdown'):
+ if mode not in ['powerdown', 'halt', 'reboot']:
+ raise StandardError('Invalid mode: ' + mode)
+
+ try:
+ self.qga.shutdown(mode=mode)
+ except self.qga.timeout:
+ return
+
+
+def _cmd_cat(client, args):
+ if len(args) != 1:
+ print('Invalid argument')
+ print('Usage: cat <file>')
+ sys.exit(1)
+ print(client.read(args[0]))
+
+
+def _cmd_fsfreeze(client, args):
+ usage = 'Usage: fsfreeze status|freeze|thaw'
+ if len(args) != 1:
+ print('Invalid argument')
+ print(usage)
+ sys.exit(1)
+ if args[0] not in ['status', 'freeze', 'thaw']:
+ print('Invalid command: ' + args[0])
+ print(usage)
+ sys.exit(1)
+ cmd = args[0]
+ ret = client.fsfreeze(cmd)
+ if cmd == 'status':
+ print(ret)
+ elif cmd == 'freeze':
+ print("%d filesystems frozen" % ret)
+ else:
+ print("%d filesystems thawed" % ret)
+
+
+def _cmd_fstrim(client, args):
+ if len(args) == 0:
+ minimum = 0
+ else:
+ minimum = int(args[0])
+ print(client.fstrim(minimum))
+
+
+def _cmd_ifconfig(client, args):
+ print(client.ifconfig())
+
+
+def _cmd_info(client, args):
+ print(client.info())
+
+
+def _cmd_ping(client, args):
+ if len(args) == 0:
+ timeout = 3
+ else:
+ timeout = float(args[0])
+ alive = client.ping(timeout)
+ if not alive:
+ print("Not responded in %s sec" % args[0])
+ sys.exit(1)
+
+
+def _cmd_suspend(client, args):
+ usage = 'Usage: suspend disk|ram|hybrid'
+ if len(args) != 1:
+ print('Less argument')
+ print(usage)
+ sys.exit(1)
+ if args[0] not in ['disk', 'ram', 'hybrid']:
+ print('Invalid command: ' + args[0])
+ print(usage)
+ sys.exit(1)
+ client.suspend(args[0])
+
+
+def _cmd_shutdown(client, args):
+ client.shutdown()
+_cmd_powerdown = _cmd_shutdown
+
+
+def _cmd_halt(client, args):
+ client.shutdown('halt')
+
+
+def _cmd_reboot(client, args):
+ client.shutdown('reboot')
+
+
+commands = [m.replace('_cmd_', '') for m in dir() if '_cmd_' in m]
+
+
+def main(address, cmd, args):
+ if not os.path.exists(address):
+ print('%s not found' % address)
+ sys.exit(1)
+
+ if cmd not in commands:
+ print('Invalid command: ' + cmd)
+ print('Available commands: ' + ', '.join(commands))
+ sys.exit(1)
+
+ try:
+ client = QemuGuestAgentClient(address)
+ except QemuGuestAgent.error, e:
+ import errno
+
+ print(e)
+ if e.errno == errno.ECONNREFUSED:
+ print('Hint: qemu is not running?')
+ sys.exit(1)
+
+ if cmd == 'fsfreeze' and args[0] == 'freeze':
+ client.sync(60)
+ elif cmd != 'ping':
+ client.sync()
+
+ globals()['_cmd_' + cmd](client, args)
+
+
+if __name__ == '__main__':
+ import sys
+ import os
+ import optparse
+
+ address = os.environ['QGA_CLIENT_ADDRESS'] if 'QGA_CLIENT_ADDRESS' in os.environ else None
+
+ usage = "%prog [--address=<unix_path>|<ipv4_address>] <command> [args...]\n"
+ usage += '<command>: ' + ', '.join(commands)
+ parser = optparse.OptionParser(usage=usage)
+ parser.add_option('--address', action='store', type='string',
+ default=address, help='Specify a ip:port pair or a unix socket path')
+ options, args = parser.parse_args()
+
+ address = options.address
+ if address is None:
+ parser.error('address is not specified')
+ sys.exit(1)
+
+ if len(args) == 0:
+ parser.error('Less argument')
+ sys.exit(1)
+
+ main(address, args[0], args[1:])
diff --git a/scripts/qmp/qmp b/scripts/qmp/qmp
new file mode 100755
index 00000000..1db3c7ff
--- /dev/null
+++ b/scripts/qmp/qmp
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+#
+# QMP command line tool
+#
+# Copyright IBM, Corp. 2011
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPLv2 or later.
+# See the COPYING file in the top-level directory.
+
+import sys, os
+from qmp import QEMUMonitorProtocol
+
+def print_response(rsp, prefix=[]):
+ if type(rsp) == list:
+ i = 0
+ for item in rsp:
+ if prefix == []:
+ prefix = ['item']
+ print_response(item, prefix[:-1] + ['%s[%d]' % (prefix[-1], i)])
+ i += 1
+ elif type(rsp) == dict:
+ for key in rsp.keys():
+ print_response(rsp[key], prefix + [key])
+ else:
+ if len(prefix):
+ print '%s: %s' % ('.'.join(prefix), rsp)
+ else:
+ print '%s' % (rsp)
+
+def main(args):
+ path = None
+
+ # Use QMP_PATH if it's set
+ if os.environ.has_key('QMP_PATH'):
+ path = os.environ['QMP_PATH']
+
+ while len(args):
+ arg = args[0]
+
+ if arg.startswith('--'):
+ arg = arg[2:]
+ if arg.find('=') == -1:
+ value = True
+ else:
+ arg, value = arg.split('=', 1)
+
+ if arg in ['path']:
+ if type(value) == str:
+ path = value
+ elif arg in ['help']:
+ os.execlp('man', 'man', 'qmp')
+ else:
+ print 'Unknown argument "%s"' % arg
+
+ args = args[1:]
+ else:
+ break
+
+ if not path:
+ print "QMP path isn't set, use --path=qmp-monitor-address or set QMP_PATH"
+ return 1
+
+ if len(args):
+ command, args = args[0], args[1:]
+ else:
+ print 'No command found'
+ print 'Usage: "qmp [--path=qmp-monitor-address] qmp-cmd arguments"'
+ return 1
+
+ if command in ['help']:
+ os.execlp('man', 'man', 'qmp')
+
+ srv = QEMUMonitorProtocol(path)
+ srv.connect()
+
+ def do_command(srv, cmd, **kwds):
+ rsp = srv.cmd(cmd, kwds)
+ if rsp.has_key('error'):
+ raise Exception(rsp['error']['desc'])
+ return rsp['return']
+
+ commands = map(lambda x: x['name'], do_command(srv, 'query-commands'))
+
+ srv.close()
+
+ if command not in commands:
+ fullcmd = 'qmp-%s' % command
+ try:
+ os.environ['QMP_PATH'] = path
+ os.execvp(fullcmd, [fullcmd] + args)
+ except OSError, (errno, msg):
+ if errno == 2:
+ print 'Command "%s" not found.' % (fullcmd)
+ return 1
+ raise
+ return 0
+
+ srv = QEMUMonitorProtocol(path)
+ srv.connect()
+
+ arguments = {}
+ for arg in args:
+ if not arg.startswith('--'):
+ print 'Unknown argument "%s"' % arg
+ return 1
+
+ arg = arg[2:]
+ if arg.find('=') == -1:
+ value = True
+ else:
+ arg, value = arg.split('=', 1)
+
+ if arg in ['help']:
+ os.execlp('man', 'man', 'qmp-%s' % command)
+ return 1
+
+ arguments[arg] = value
+
+ rsp = do_command(srv, command, **arguments)
+ print_response(rsp)
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/scripts/qmp/qmp-shell b/scripts/qmp/qmp-shell
new file mode 100755
index 00000000..65280d29
--- /dev/null
+++ b/scripts/qmp/qmp-shell
@@ -0,0 +1,390 @@
+#!/usr/bin/python
+#
+# Low-level QEMU shell on top of QMP.
+#
+# Copyright (C) 2009, 2010 Red Hat Inc.
+#
+# Authors:
+# Luiz Capitulino <lcapitulino@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Usage:
+#
+# Start QEMU with:
+#
+# # qemu [...] -qmp unix:./qmp-sock,server
+#
+# Run the shell:
+#
+# $ qmp-shell ./qmp-sock
+#
+# Commands have the following format:
+#
+# < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ]
+#
+# For example:
+#
+# (QEMU) device_add driver=e1000 id=net1
+# {u'return': {}}
+# (QEMU)
+
+import qmp
+import json
+import ast
+import readline
+import sys
+import pprint
+
+class QMPCompleter(list):
+ def complete(self, text, state):
+ for cmd in self:
+ if cmd.startswith(text):
+ if not state:
+ return cmd
+ else:
+ state -= 1
+
+class QMPShellError(Exception):
+ pass
+
+class QMPShellBadPort(QMPShellError):
+ pass
+
+class FuzzyJSON(ast.NodeTransformer):
+ '''This extension of ast.NodeTransformer filters literal "true/false/null"
+ values in an AST and replaces them by proper "True/False/None" values that
+ Python can properly evaluate.'''
+ def visit_Name(self, node):
+ if node.id == 'true':
+ node.id = 'True'
+ if node.id == 'false':
+ node.id = 'False'
+ if node.id == 'null':
+ node.id = 'None'
+ return node
+
+# TODO: QMPShell's interface is a bit ugly (eg. _fill_completion() and
+# _execute_cmd()). Let's design a better one.
+class QMPShell(qmp.QEMUMonitorProtocol):
+ def __init__(self, address, pp=None):
+ qmp.QEMUMonitorProtocol.__init__(self, self.__get_address(address))
+ self._greeting = None
+ self._completer = None
+ self._pp = pp
+ self._transmode = False
+ self._actions = list()
+
+ def __get_address(self, arg):
+ """
+ Figure out if the argument is in the port:host form, if it's not it's
+ probably a file path.
+ """
+ addr = arg.split(':')
+ if len(addr) == 2:
+ try:
+ port = int(addr[1])
+ except ValueError:
+ raise QMPShellBadPort
+ return ( addr[0], port )
+ # socket path
+ return arg
+
+ def _fill_completion(self):
+ for cmd in self.cmd('query-commands')['return']:
+ self._completer.append(cmd['name'])
+
+ def __completer_setup(self):
+ self._completer = QMPCompleter()
+ self._fill_completion()
+ readline.set_completer(self._completer.complete)
+ readline.parse_and_bind("tab: complete")
+ # XXX: default delimiters conflict with some command names (eg. query-),
+ # clearing everything as it doesn't seem to matter
+ readline.set_completer_delims('')
+
+ def __parse_value(self, val):
+ try:
+ return int(val)
+ except ValueError:
+ pass
+
+ if val.lower() == 'true':
+ return True
+ if val.lower() == 'false':
+ return False
+ if val.startswith(('{', '[')):
+ # Try first as pure JSON:
+ try:
+ return json.loads(val)
+ except ValueError:
+ pass
+ # Try once again as FuzzyJSON:
+ try:
+ st = ast.parse(val, mode='eval')
+ return ast.literal_eval(FuzzyJSON().visit(st))
+ except SyntaxError:
+ pass
+ except ValueError:
+ pass
+ return val
+
+ def __cli_expr(self, tokens, parent):
+ for arg in tokens:
+ (key, _, val) = arg.partition('=')
+ if not val:
+ raise QMPShellError("Expected a key=value pair, got '%s'" % arg)
+
+ value = self.__parse_value(val)
+ optpath = key.split('.')
+ curpath = []
+ for p in optpath[:-1]:
+ curpath.append(p)
+ d = parent.get(p, {})
+ if type(d) is not dict:
+ raise QMPShellError('Cannot use "%s" as both leaf and non-leaf key' % '.'.join(curpath))
+ parent[p] = d
+ parent = d
+ if optpath[-1] in parent:
+ if type(parent[optpath[-1]]) is dict:
+ raise QMPShellError('Cannot use "%s" as both leaf and non-leaf key' % '.'.join(curpath))
+ else:
+ raise QMPShellError('Cannot set "%s" multiple times' % key)
+ parent[optpath[-1]] = value
+
+ def __build_cmd(self, cmdline):
+ """
+ Build a QMP input object from a user provided command-line in the
+ following format:
+
+ < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ]
+ """
+ cmdargs = cmdline.split()
+
+ # Transactional CLI entry/exit:
+ if cmdargs[0] == 'transaction(':
+ self._transmode = True
+ cmdargs.pop(0)
+ elif cmdargs[0] == ')' and self._transmode:
+ self._transmode = False
+ if len(cmdargs) > 1:
+ raise QMPShellError("Unexpected input after close of Transaction sub-shell")
+ qmpcmd = { 'execute': 'transaction',
+ 'arguments': { 'actions': self._actions } }
+ self._actions = list()
+ return qmpcmd
+
+ # Nothing to process?
+ if not cmdargs:
+ return None
+
+ # Parse and then cache this Transactional Action
+ if self._transmode:
+ finalize = False
+ action = { 'type': cmdargs[0], 'data': {} }
+ if cmdargs[-1] == ')':
+ cmdargs.pop(-1)
+ finalize = True
+ self.__cli_expr(cmdargs[1:], action['data'])
+ self._actions.append(action)
+ return self.__build_cmd(')') if finalize else None
+
+ # Standard command: parse and return it to be executed.
+ qmpcmd = { 'execute': cmdargs[0], 'arguments': {} }
+ self.__cli_expr(cmdargs[1:], qmpcmd['arguments'])
+ return qmpcmd
+
+ def _print(self, qmp):
+ jsobj = json.dumps(qmp)
+ if self._pp is not None:
+ self._pp.pprint(jsobj)
+ else:
+ print str(jsobj)
+
+ def _execute_cmd(self, cmdline):
+ try:
+ qmpcmd = self.__build_cmd(cmdline)
+ except Exception, e:
+ print 'Error while parsing command line: %s' % e
+ print 'command format: <command-name> ',
+ print '[arg-name1=arg1] ... [arg-nameN=argN]'
+ return True
+ # For transaction mode, we may have just cached the action:
+ if qmpcmd is None:
+ return True
+ if self._verbose:
+ self._print(qmpcmd)
+ resp = self.cmd_obj(qmpcmd)
+ if resp is None:
+ print 'Disconnected'
+ return False
+ self._print(resp)
+ return True
+
+ def connect(self):
+ self._greeting = qmp.QEMUMonitorProtocol.connect(self)
+ self.__completer_setup()
+
+ def show_banner(self, msg='Welcome to the QMP low-level shell!'):
+ print msg
+ version = self._greeting['QMP']['version']['qemu']
+ print 'Connected to QEMU %d.%d.%d\n' % (version['major'],version['minor'],version['micro'])
+
+ def get_prompt(self):
+ if self._transmode:
+ return "TRANS> "
+ return "(QEMU) "
+
+ def read_exec_command(self, prompt):
+ """
+ Read and execute a command.
+
+ @return True if execution was ok, return False if disconnected.
+ """
+ try:
+ cmdline = raw_input(prompt)
+ except EOFError:
+ print
+ return False
+ if cmdline == '':
+ for ev in self.get_events():
+ print ev
+ self.clear_events()
+ return True
+ else:
+ return self._execute_cmd(cmdline)
+
+ def set_verbosity(self, verbose):
+ self._verbose = verbose
+
+class HMPShell(QMPShell):
+ def __init__(self, address):
+ QMPShell.__init__(self, address)
+ self.__cpu_index = 0
+
+ def __cmd_completion(self):
+ for cmd in self.__cmd_passthrough('help')['return'].split('\r\n'):
+ if cmd and cmd[0] != '[' and cmd[0] != '\t':
+ name = cmd.split()[0] # drop help text
+ if name == 'info':
+ continue
+ if name.find('|') != -1:
+ # Command in the form 'foobar|f' or 'f|foobar', take the
+ # full name
+ opt = name.split('|')
+ if len(opt[0]) == 1:
+ name = opt[1]
+ else:
+ name = opt[0]
+ self._completer.append(name)
+ self._completer.append('help ' + name) # help completion
+
+ def __info_completion(self):
+ for cmd in self.__cmd_passthrough('info')['return'].split('\r\n'):
+ if cmd:
+ self._completer.append('info ' + cmd.split()[1])
+
+ def __other_completion(self):
+ # special cases
+ self._completer.append('help info')
+
+ def _fill_completion(self):
+ self.__cmd_completion()
+ self.__info_completion()
+ self.__other_completion()
+
+ def __cmd_passthrough(self, cmdline, cpu_index = 0):
+ return self.cmd_obj({ 'execute': 'human-monitor-command', 'arguments':
+ { 'command-line': cmdline,
+ 'cpu-index': cpu_index } })
+
+ def _execute_cmd(self, cmdline):
+ if cmdline.split()[0] == "cpu":
+ # trap the cpu command, it requires special setting
+ try:
+ idx = int(cmdline.split()[1])
+ if not 'return' in self.__cmd_passthrough('info version', idx):
+ print 'bad CPU index'
+ return True
+ self.__cpu_index = idx
+ except ValueError:
+ print 'cpu command takes an integer argument'
+ return True
+ resp = self.__cmd_passthrough(cmdline, self.__cpu_index)
+ if resp is None:
+ print 'Disconnected'
+ return False
+ assert 'return' in resp or 'error' in resp
+ if 'return' in resp:
+ # Success
+ if len(resp['return']) > 0:
+ print resp['return'],
+ else:
+ # Error
+ print '%s: %s' % (resp['error']['class'], resp['error']['desc'])
+ return True
+
+ def show_banner(self):
+ QMPShell.show_banner(self, msg='Welcome to the HMP shell!')
+
+def die(msg):
+ sys.stderr.write('ERROR: %s\n' % msg)
+ sys.exit(1)
+
+def fail_cmdline(option=None):
+ if option:
+ sys.stderr.write('ERROR: bad command-line option \'%s\'\n' % option)
+ sys.stderr.write('qemu-shell [ -v ] [ -p ] [ -H ] < UNIX socket path> | < TCP address:port >\n')
+ sys.exit(1)
+
+def main():
+ addr = ''
+ qemu = None
+ hmp = False
+ pp = None
+ verbose = False
+
+ try:
+ for arg in sys.argv[1:]:
+ if arg == "-H":
+ if qemu is not None:
+ fail_cmdline(arg)
+ hmp = True
+ elif arg == "-p":
+ if pp is not None:
+ fail_cmdline(arg)
+ pp = pprint.PrettyPrinter(indent=4)
+ elif arg == "-v":
+ verbose = True
+ else:
+ if qemu is not None:
+ fail_cmdline(arg)
+ if hmp:
+ qemu = HMPShell(arg)
+ else:
+ qemu = QMPShell(arg, pp)
+ addr = arg
+
+ if qemu is None:
+ fail_cmdline()
+ except QMPShellBadPort:
+ die('bad port number in command-line')
+
+ try:
+ qemu.connect()
+ except qmp.QMPConnectError:
+ die('Didn\'t get QMP greeting message')
+ except qmp.QMPCapabilitiesError:
+ die('Could not negotiate capabilities')
+ except qemu.error:
+ die('Could not connect to %s' % addr)
+
+ qemu.show_banner()
+ qemu.set_verbosity(verbose)
+ while qemu.read_exec_command(qemu.get_prompt()):
+ pass
+ qemu.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/qmp/qmp.py b/scripts/qmp/qmp.py
new file mode 100644
index 00000000..1d38e3e9
--- /dev/null
+++ b/scripts/qmp/qmp.py
@@ -0,0 +1,236 @@
+# QEMU Monitor Protocol Python class
+#
+# Copyright (C) 2009, 2010 Red Hat Inc.
+#
+# Authors:
+# Luiz Capitulino <lcapitulino@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+
+import json
+import errno
+import socket
+
+class QMPError(Exception):
+ pass
+
+class QMPConnectError(QMPError):
+ pass
+
+class QMPCapabilitiesError(QMPError):
+ pass
+
+class QMPTimeoutError(QMPError):
+ pass
+
+class QEMUMonitorProtocol:
+ def __init__(self, address, server=False):
+ """
+ Create a QEMUMonitorProtocol class.
+
+ @param address: QEMU address, can be either a unix socket path (string)
+ or a tuple in the form ( address, port ) for a TCP
+ connection
+ @param server: server mode listens on the socket (bool)
+ @raise socket.error on socket connection errors
+ @note No connection is established, this is done by the connect() or
+ accept() methods
+ """
+ self.__events = []
+ self.__address = address
+ self.__sock = self.__get_sock()
+ if server:
+ self.__sock.bind(self.__address)
+ self.__sock.listen(1)
+
+ def __get_sock(self):
+ if isinstance(self.__address, tuple):
+ family = socket.AF_INET
+ else:
+ family = socket.AF_UNIX
+ return socket.socket(family, socket.SOCK_STREAM)
+
+ def __negotiate_capabilities(self):
+ greeting = self.__json_read()
+ if greeting is None or not greeting.has_key('QMP'):
+ raise QMPConnectError
+ # Greeting seems ok, negotiate capabilities
+ resp = self.cmd('qmp_capabilities')
+ if "return" in resp:
+ return greeting
+ raise QMPCapabilitiesError
+
+ def __json_read(self, only_event=False):
+ while True:
+ data = self.__sockfile.readline()
+ if not data:
+ return
+ resp = json.loads(data)
+ if 'event' in resp:
+ self.__events.append(resp)
+ if not only_event:
+ continue
+ return resp
+
+ error = socket.error
+
+ def __get_events(self, wait=False):
+ """
+ Check for new events in the stream and cache them in __events.
+
+ @param wait (bool): block until an event is available.
+ @param wait (float): If wait is a float, treat it as a timeout value.
+
+ @raise QMPTimeoutError: If a timeout float is provided and the timeout
+ period elapses.
+ @raise QMPConnectError: If wait is True but no events could be retrieved
+ or if some other error occurred.
+ """
+
+ # Check for new events regardless and pull them into the cache:
+ self.__sock.setblocking(0)
+ try:
+ self.__json_read()
+ except socket.error, err:
+ if err[0] == errno.EAGAIN:
+ # No data available
+ pass
+ self.__sock.setblocking(1)
+
+ # Wait for new events, if needed.
+ # if wait is 0.0, this means "no wait" and is also implicitly false.
+ if not self.__events and wait:
+ if isinstance(wait, float):
+ self.__sock.settimeout(wait)
+ try:
+ ret = self.__json_read(only_event=True)
+ except socket.timeout:
+ raise QMPTimeoutError("Timeout waiting for event")
+ except:
+ raise QMPConnectError("Error while reading from socket")
+ if ret is None:
+ raise QMPConnectError("Error while reading from socket")
+ self.__sock.settimeout(None)
+
+ def connect(self, negotiate=True):
+ """
+ Connect to the QMP Monitor and perform capabilities negotiation.
+
+ @return QMP greeting dict
+ @raise socket.error on socket connection errors
+ @raise QMPConnectError if the greeting is not received
+ @raise QMPCapabilitiesError if fails to negotiate capabilities
+ """
+ self.__sock.connect(self.__address)
+ self.__sockfile = self.__sock.makefile()
+ if negotiate:
+ return self.__negotiate_capabilities()
+
+ def accept(self):
+ """
+ Await connection from QMP Monitor and perform capabilities negotiation.
+
+ @return QMP greeting dict
+ @raise socket.error on socket connection errors
+ @raise QMPConnectError if the greeting is not received
+ @raise QMPCapabilitiesError if fails to negotiate capabilities
+ """
+ self.__sock, _ = self.__sock.accept()
+ self.__sockfile = self.__sock.makefile()
+ return self.__negotiate_capabilities()
+
+ def cmd_obj(self, qmp_cmd):
+ """
+ Send a QMP command to the QMP Monitor.
+
+ @param qmp_cmd: QMP command to be sent as a Python dict
+ @return QMP response as a Python dict or None if the connection has
+ been closed
+ """
+ try:
+ self.__sock.sendall(json.dumps(qmp_cmd))
+ except socket.error, err:
+ if err[0] == errno.EPIPE:
+ return
+ raise socket.error(err)
+ return self.__json_read()
+
+ def cmd(self, name, args=None, id=None):
+ """
+ Build a QMP command and send it to the QMP Monitor.
+
+ @param name: command name (string)
+ @param args: command arguments (dict)
+ @param id: command id (dict, list, string or int)
+ """
+ qmp_cmd = { 'execute': name }
+ if args:
+ qmp_cmd['arguments'] = args
+ if id:
+ qmp_cmd['id'] = id
+ return self.cmd_obj(qmp_cmd)
+
+ def command(self, cmd, **kwds):
+ ret = self.cmd(cmd, kwds)
+ if ret.has_key('error'):
+ raise Exception(ret['error']['desc'])
+ return ret['return']
+
+ def pull_event(self, wait=False):
+ """
+ Get and delete the first available QMP event.
+
+ @param wait (bool): block until an event is available.
+ @param wait (float): If wait is a float, treat it as a timeout value.
+
+ @raise QMPTimeoutError: If a timeout float is provided and the timeout
+ period elapses.
+ @raise QMPConnectError: If wait is True but no events could be retrieved
+ or if some other error occurred.
+
+ @return The first available QMP event, or None.
+ """
+ self.__get_events(wait)
+
+ if self.__events:
+ return self.__events.pop(0)
+ return None
+
+ def get_events(self, wait=False):
+ """
+ Get a list of available QMP events.
+
+ @param wait (bool): block until an event is available.
+ @param wait (float): If wait is a float, treat it as a timeout value.
+
+ @raise QMPTimeoutError: If a timeout float is provided and the timeout
+ period elapses.
+ @raise QMPConnectError: If wait is True but no events could be retrieved
+ or if some other error occurred.
+
+ @return The list of available QMP events.
+ """
+ self.__get_events(wait)
+ return self.__events
+
+ def clear_events(self):
+ """
+ Clear current list of pending events.
+ """
+ self.__events = []
+
+ def close(self):
+ self.__sock.close()
+ self.__sockfile.close()
+
+ timeout = socket.timeout
+
+ def settimeout(self, timeout):
+ self.__sock.settimeout(timeout)
+
+ def get_sock_fd(self):
+ return self.__sock.fileno()
+
+ def is_scm_available(self):
+ return self.__sock.family == socket.AF_UNIX
diff --git a/scripts/qmp/qom-fuse b/scripts/qmp/qom-fuse
new file mode 100755
index 00000000..5c6754aa
--- /dev/null
+++ b/scripts/qmp/qom-fuse
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+##
+# QEMU Object Model test tools
+#
+# Copyright IBM, Corp. 2012
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later. See
+# the COPYING file in the top-level directory.
+##
+
+import fuse, stat
+from fuse import Fuse
+import os, posix
+from errno import *
+from qmp import QEMUMonitorProtocol
+
+fuse.fuse_python_api = (0, 2)
+
+class QOMFS(Fuse):
+ def __init__(self, qmp, *args, **kwds):
+ Fuse.__init__(self, *args, **kwds)
+ self.qmp = qmp
+ self.qmp.connect()
+ self.ino_map = {}
+ self.ino_count = 1
+
+ def get_ino(self, path):
+ if self.ino_map.has_key(path):
+ return self.ino_map[path]
+ self.ino_map[path] = self.ino_count
+ self.ino_count += 1
+ return self.ino_map[path]
+
+ def is_object(self, path):
+ try:
+ items = self.qmp.command('qom-list', path=path)
+ return True
+ except:
+ return False
+
+ def is_property(self, path):
+ try:
+ path, prop = path.rsplit('/', 1)
+ for item in self.qmp.command('qom-list', path=path):
+ if item['name'] == prop:
+ return True
+ return False
+ except:
+ return False
+
+ def is_link(self, path):
+ try:
+ path, prop = path.rsplit('/', 1)
+ for item in self.qmp.command('qom-list', path=path):
+ if item['name'] == prop:
+ if item['type'].startswith('link<'):
+ return True
+ return False
+ return False
+ except:
+ return False
+
+ def read(self, path, length, offset):
+ if not self.is_property(path):
+ return -ENOENT
+
+ path, prop = path.rsplit('/', 1)
+ try:
+ data = str(self.qmp.command('qom-get', path=path, property=prop))
+ data += '\n' # make values shell friendly
+ except:
+ return -EPERM
+
+ if offset > len(data):
+ return ''
+
+ return str(data[offset:][:length])
+
+ def readlink(self, path):
+ if not self.is_link(path):
+ return False
+ path, prop = path.rsplit('/', 1)
+ prefix = '/'.join(['..'] * (len(path.split('/')) - 1))
+ return prefix + str(self.qmp.command('qom-get', path=path,
+ property=prop))
+
+ def getattr(self, path):
+ if self.is_link(path):
+ value = posix.stat_result((0755 | stat.S_IFLNK,
+ self.get_ino(path),
+ 0,
+ 2,
+ 1000,
+ 1000,
+ 4096,
+ 0,
+ 0,
+ 0))
+ elif self.is_object(path):
+ value = posix.stat_result((0755 | stat.S_IFDIR,
+ self.get_ino(path),
+ 0,
+ 2,
+ 1000,
+ 1000,
+ 4096,
+ 0,
+ 0,
+ 0))
+ elif self.is_property(path):
+ value = posix.stat_result((0644 | stat.S_IFREG,
+ self.get_ino(path),
+ 0,
+ 1,
+ 1000,
+ 1000,
+ 4096,
+ 0,
+ 0,
+ 0))
+ else:
+ value = -ENOENT
+ return value
+
+ def readdir(self, path, offset):
+ yield fuse.Direntry('.')
+ yield fuse.Direntry('..')
+ for item in self.qmp.command('qom-list', path=path):
+ yield fuse.Direntry(str(item['name']))
+
+if __name__ == '__main__':
+ import sys, os
+
+ fs = QOMFS(QEMUMonitorProtocol(os.environ['QMP_SOCKET']))
+ fs.main(sys.argv)
diff --git a/scripts/qmp/qom-get b/scripts/qmp/qom-get
new file mode 100755
index 00000000..0172c694
--- /dev/null
+++ b/scripts/qmp/qom-get
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+##
+# QEMU Object Model test tools
+#
+# Copyright IBM, Corp. 2011
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later. See
+# the COPYING file in the top-level directory.
+##
+
+import sys
+import os
+from qmp import QEMUMonitorProtocol
+
+cmd, args = sys.argv[0], sys.argv[1:]
+socket_path = None
+path = None
+prop = None
+
+def usage():
+ return '''environment variables:
+ QMP_SOCKET=<path | addr:port>
+usage:
+ %s [-h] [-s <QMP socket path | addr:port>] <path>.<property>
+''' % cmd
+
+def usage_error(error_msg = "unspecified error"):
+ sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg))
+ exit(1)
+
+if len(args) > 0:
+ if args[0] == "-h":
+ print usage()
+ exit(0);
+ elif args[0] == "-s":
+ try:
+ socket_path = args[1]
+ except:
+ usage_error("missing argument: QMP socket path or address");
+ args = args[2:]
+
+if not socket_path:
+ if os.environ.has_key('QMP_SOCKET'):
+ socket_path = os.environ['QMP_SOCKET']
+ else:
+ usage_error("no QMP socket path or address given");
+
+if len(args) > 0:
+ try:
+ path, prop = args[0].rsplit('.', 1)
+ except:
+ usage_error("invalid format for path/property/value")
+else:
+ usage_error("not enough arguments")
+
+srv = QEMUMonitorProtocol(socket_path)
+srv.connect()
+
+rsp = srv.command('qom-get', path=path, property=prop)
+if type(rsp) == dict:
+ for i in rsp.keys():
+ print '%s: %s' % (i, rsp[i])
+else:
+ print rsp
diff --git a/scripts/qmp/qom-list b/scripts/qmp/qom-list
new file mode 100755
index 00000000..1e7cc6cb
--- /dev/null
+++ b/scripts/qmp/qom-list
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+##
+# QEMU Object Model test tools
+#
+# Copyright IBM, Corp. 2011
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later. See
+# the COPYING file in the top-level directory.
+##
+
+import sys
+import os
+from qmp import QEMUMonitorProtocol
+
+cmd, args = sys.argv[0], sys.argv[1:]
+socket_path = None
+path = None
+prop = None
+
+def usage():
+ return '''environment variables:
+ QMP_SOCKET=<path | addr:port>
+usage:
+ %s [-h] [-s <QMP socket path | addr:port>] [<path>]
+''' % cmd
+
+def usage_error(error_msg = "unspecified error"):
+ sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg))
+ exit(1)
+
+if len(args) > 0:
+ if args[0] == "-h":
+ print usage()
+ exit(0);
+ elif args[0] == "-s":
+ try:
+ socket_path = args[1]
+ except:
+ usage_error("missing argument: QMP socket path or address");
+ args = args[2:]
+
+if not socket_path:
+ if os.environ.has_key('QMP_SOCKET'):
+ socket_path = os.environ['QMP_SOCKET']
+ else:
+ usage_error("no QMP socket path or address given");
+
+srv = QEMUMonitorProtocol(socket_path)
+srv.connect()
+
+if len(args) == 0:
+ print '/'
+ sys.exit(0)
+
+for item in srv.command('qom-list', path=args[0]):
+ if item['type'].startswith('child<'):
+ print '%s/' % item['name']
+ elif item['type'].startswith('link<'):
+ print '@%s/' % item['name']
+ else:
+ print '%s' % item['name']
diff --git a/scripts/qmp/qom-set b/scripts/qmp/qom-set
new file mode 100755
index 00000000..54ecfecc
--- /dev/null
+++ b/scripts/qmp/qom-set
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+##
+# QEMU Object Model test tools
+#
+# Copyright IBM, Corp. 2011
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later. See
+# the COPYING file in the top-level directory.
+##
+
+import sys
+import os
+from qmp import QEMUMonitorProtocol
+
+cmd, args = sys.argv[0], sys.argv[1:]
+socket_path = None
+path = None
+prop = None
+value = None
+
+def usage():
+ return '''environment variables:
+ QMP_SOCKET=<path | addr:port>
+usage:
+ %s [-h] [-s <QMP socket path | addr:port>] <path>.<property> <value>
+''' % cmd
+
+def usage_error(error_msg = "unspecified error"):
+ sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg))
+ exit(1)
+
+if len(args) > 0:
+ if args[0] == "-h":
+ print usage()
+ exit(0);
+ elif args[0] == "-s":
+ try:
+ socket_path = args[1]
+ except:
+ usage_error("missing argument: QMP socket path or address");
+ args = args[2:]
+
+if not socket_path:
+ if os.environ.has_key('QMP_SOCKET'):
+ socket_path = os.environ['QMP_SOCKET']
+ else:
+ usage_error("no QMP socket path or address given");
+
+if len(args) > 1:
+ try:
+ path, prop = args[0].rsplit('.', 1)
+ except:
+ usage_error("invalid format for path/property/value")
+ value = args[1]
+else:
+ usage_error("not enough arguments")
+
+srv = QEMUMonitorProtocol(socket_path)
+srv.connect()
+
+print srv.command('qom-set', path=path, property=prop, value=sys.argv[2])
diff --git a/scripts/qmp/qom-tree b/scripts/qmp/qom-tree
new file mode 100755
index 00000000..906fcd26
--- /dev/null
+++ b/scripts/qmp/qom-tree
@@ -0,0 +1,75 @@
+#!/usr/bin/python
+##
+# QEMU Object Model test tools
+#
+# Copyright IBM, Corp. 2011
+# Copyright (c) 2013 SUSE LINUX Products GmbH
+#
+# Authors:
+# Anthony Liguori <aliguori@amazon.com>
+# Andreas Faerber <afaerber@suse.de>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later. See
+# the COPYING file in the top-level directory.
+##
+
+import sys
+import os
+from qmp import QEMUMonitorProtocol
+
+cmd, args = sys.argv[0], sys.argv[1:]
+socket_path = None
+path = None
+prop = None
+
+def usage():
+ return '''environment variables:
+ QMP_SOCKET=<path | addr:port>
+usage:
+ %s [-h] [-s <QMP socket path | addr:port>] [<path>]
+''' % cmd
+
+def usage_error(error_msg = "unspecified error"):
+ sys.stderr.write('%s\nERROR: %s\n' % (usage(), error_msg))
+ exit(1)
+
+if len(args) > 0:
+ if args[0] == "-h":
+ print usage()
+ exit(0);
+ elif args[0] == "-s":
+ try:
+ socket_path = args[1]
+ except:
+ usage_error("missing argument: QMP socket path or address");
+ args = args[2:]
+
+if not socket_path:
+ if os.environ.has_key('QMP_SOCKET'):
+ socket_path = os.environ['QMP_SOCKET']
+ else:
+ usage_error("no QMP socket path or address given");
+
+srv = QEMUMonitorProtocol(socket_path)
+srv.connect()
+
+def list_node(path):
+ print '%s' % path
+ items = srv.command('qom-list', path=path)
+ for item in items:
+ if not item['type'].startswith('child<'):
+ try:
+ print ' %s: %s (%s)' % (item['name'], srv.command('qom-get', path=path, property=item['name']), item['type'])
+ except:
+ print ' %s: <EXCEPTION> (%s)' % (item['name'], item['type'])
+ print ''
+ for item in items:
+ if item['type'].startswith('child<'):
+ list_node((path if (path != '/') else '') + '/' + item['name'])
+
+if len(args) == 0:
+ path = '/'
+else:
+ path = args[0]
+
+list_node(path)
diff --git a/scripts/qtest.py b/scripts/qtest.py
new file mode 100644
index 00000000..a9714453
--- /dev/null
+++ b/scripts/qtest.py
@@ -0,0 +1,71 @@
+# QEMU qtest library
+#
+# Copyright (C) 2015 Red Hat Inc.
+#
+# Authors:
+# Fam Zheng <famz@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Based on qmp.py.
+#
+
+import errno
+import socket
+
+class QEMUQtestProtocol(object):
+ def __init__(self, address, server=False):
+ """
+ Create a QEMUQtestProtocol object.
+
+ @param address: QEMU address, can be either a unix socket path (string)
+ or a tuple in the form ( address, port ) for a TCP
+ connection
+ @param server: server mode, listens on the socket (bool)
+ @raise socket.error on socket connection errors
+ @note No connection is established, this is done by the connect() or
+ accept() methods
+ """
+ self._address = address
+ self._sock = self._get_sock()
+ if server:
+ self._sock.bind(self._address)
+ self._sock.listen(1)
+
+ def _get_sock(self):
+ if isinstance(self._address, tuple):
+ family = socket.AF_INET
+ else:
+ family = socket.AF_UNIX
+ return socket.socket(family, socket.SOCK_STREAM)
+
+ def connect(self):
+ """
+ Connect to the qtest socket.
+
+ @raise socket.error on socket connection errors
+ """
+ self._sock.connect(self._address)
+
+ def accept(self):
+ """
+ Await connection from QEMU.
+
+ @raise socket.error on socket connection errors
+ """
+ self._sock, _ = self._sock.accept()
+
+ def cmd(self, qtest_cmd):
+ """
+ Send a qtest command on the wire.
+
+ @param qtest_cmd: qtest command text to be sent
+ """
+ self._sock.sendall(qtest_cmd + "\n")
+
+ def close(self):
+ self._sock.close()
+
+ def settimeout(self, timeout):
+ self._sock.settimeout(timeout)
diff --git a/scripts/refresh-pxe-roms.sh b/scripts/refresh-pxe-roms.sh
new file mode 100755
index 00000000..90fc0b37
--- /dev/null
+++ b/scripts/refresh-pxe-roms.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+# PXE ROM build script
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# Copyright (C) 2011 Red Hat, Inc.
+# Authors: Alex Williamson <alex.williamson@redhat.com>
+#
+# Usage: Run from root of qemu tree
+# ./scripts/refresh-pxe-roms.sh
+
+targets="pxerom"
+if test -x "$(which EfiRom 2>/dev/null)"; then
+ targets="$targets efirom"
+fi
+
+cd roms
+make -j4 $targets || exit 1
+make clean
diff --git a/scripts/shaderinclude.pl b/scripts/shaderinclude.pl
new file mode 100644
index 00000000..81b51463
--- /dev/null
+++ b/scripts/shaderinclude.pl
@@ -0,0 +1,16 @@
+#!/usr/bin/perl
+use strict;
+use warnings;
+
+my $file = shift;
+open FILE, "<", $file or die "open $file: $!";
+my $name = $file;
+$name =~ s|.*/||;
+$name =~ s/[-.]/_/g;
+print "static GLchar ${name}_src[] =\n";
+while (<FILE>) {
+ chomp;
+ printf " \"%s\\n\"\n", $_;
+}
+print " \"\\n\";\n";
+close FILE;
diff --git a/scripts/signrom.py b/scripts/signrom.py
new file mode 100644
index 00000000..f9c35ccf
--- /dev/null
+++ b/scripts/signrom.py
@@ -0,0 +1,40 @@
+#
+# Option ROM signing utility
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+import sys
+import struct
+
+if len(sys.argv) < 3:
+ print('usage: signrom.py input output')
+ sys.exit(1)
+
+fin = open(sys.argv[1], 'rb')
+fout = open(sys.argv[2], 'wb')
+
+fin.seek(2)
+size = ord(fin.read(1)) * 512 - 1
+
+fin.seek(0)
+data = fin.read(size)
+fout.write(data)
+
+checksum = 0
+for b in data:
+ # catch Python 2 vs. 3 differences
+ if isinstance(b, int):
+ checksum += b
+ else:
+ checksum += ord(b)
+checksum = (256 - checksum) % 256
+
+# Python 3 no longer allows chr(checksum)
+fout.write(struct.pack('B', checksum))
+
+fin.close()
+fout.close()
diff --git a/scripts/simpletrace.py b/scripts/simpletrace.py
new file mode 100755
index 00000000..3916c6d1
--- /dev/null
+++ b/scripts/simpletrace.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+#
+# Pretty-printer for simple trace backend binary trace files
+#
+# Copyright IBM, Corp. 2010
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# For help see docs/tracing.txt
+
+import struct
+import re
+import inspect
+from tracetool import _read_events, Event
+from tracetool.backend.simple import is_string
+
+header_event_id = 0xffffffffffffffff
+header_magic = 0xf2b177cb0aa429b4
+dropped_event_id = 0xfffffffffffffffe
+
+log_header_fmt = '=QQQ'
+rec_header_fmt = '=QQII'
+
+def read_header(fobj, hfmt):
+ '''Read a trace record header'''
+ hlen = struct.calcsize(hfmt)
+ hdr = fobj.read(hlen)
+ if len(hdr) != hlen:
+ return None
+ return struct.unpack(hfmt, hdr)
+
+def get_record(edict, rechdr, fobj):
+ """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
+ if rechdr is None:
+ return None
+ rec = (rechdr[0], rechdr[1], rechdr[3])
+ if rechdr[0] != dropped_event_id:
+ event_id = rechdr[0]
+ event = edict[event_id]
+ for type, name in event.args:
+ if is_string(type):
+ l = fobj.read(4)
+ (len,) = struct.unpack('=L', l)
+ s = fobj.read(len)
+ rec = rec + (s,)
+ else:
+ (value,) = struct.unpack('=Q', fobj.read(8))
+ rec = rec + (value,)
+ else:
+ (value,) = struct.unpack('=Q', fobj.read(8))
+ rec = rec + (value,)
+ return rec
+
+
+def read_record(edict, fobj):
+ """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
+ rechdr = read_header(fobj, rec_header_fmt)
+ return get_record(edict, rechdr, fobj) # return tuple of record elements
+
+def read_trace_header(fobj):
+ """Read and verify trace file header"""
+ header = read_header(fobj, log_header_fmt)
+ if header is None or \
+ header[0] != header_event_id or \
+ header[1] != header_magic:
+ raise ValueError('Not a valid trace file!')
+
+ log_version = header[2]
+ if log_version not in [0, 2, 3]:
+ raise ValueError('Unknown version of tracelog format!')
+ if log_version != 3:
+ raise ValueError('Log format %d not supported with this QEMU release!'
+ % log_version)
+
+def read_trace_records(edict, fobj):
+ """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
+ while True:
+ rec = read_record(edict, fobj)
+ if rec is None:
+ break
+
+ yield rec
+
+class Analyzer(object):
+ """A trace file analyzer which processes trace records.
+
+ An analyzer can be passed to run() or process(). The begin() method is
+ invoked, then each trace record is processed, and finally the end() method
+ is invoked.
+
+ If a method matching a trace event name exists, it is invoked to process
+ that trace record. Otherwise the catchall() method is invoked."""
+
+ def begin(self):
+ """Called at the start of the trace."""
+ pass
+
+ def catchall(self, event, rec):
+ """Called if no specific method for processing a trace event has been found."""
+ pass
+
+ def end(self):
+ """Called at the end of the trace."""
+ pass
+
+def process(events, log, analyzer, read_header=True):
+ """Invoke an analyzer on each event in a log."""
+ if isinstance(events, str):
+ events = _read_events(open(events, 'r'))
+ if isinstance(log, str):
+ log = open(log, 'rb')
+
+ if read_header:
+ read_trace_header(log)
+
+ dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
+ edict = {dropped_event_id: dropped_event}
+
+ for num, event in enumerate(events):
+ edict[num] = event
+
+ def build_fn(analyzer, event):
+ if isinstance(event, str):
+ return analyzer.catchall
+
+ fn = getattr(analyzer, event.name, None)
+ if fn is None:
+ return analyzer.catchall
+
+ event_argcount = len(event.args)
+ fn_argcount = len(inspect.getargspec(fn)[0]) - 1
+ if fn_argcount == event_argcount + 1:
+ # Include timestamp as first argument
+ return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
+ elif fn_argcount == event_argcount + 2:
+ # Include timestamp and pid
+ return lambda _, rec: fn(*rec[1:3 + event_argcount])
+ else:
+ # Just arguments, no timestamp or pid
+ return lambda _, rec: fn(*rec[3:3 + event_argcount])
+
+ analyzer.begin()
+ fn_cache = {}
+ for rec in read_trace_records(edict, log):
+ event_num = rec[0]
+ event = edict[event_num]
+ if event_num not in fn_cache:
+ fn_cache[event_num] = build_fn(analyzer, event)
+ fn_cache[event_num](event, rec)
+ analyzer.end()
+
+def run(analyzer):
+ """Execute an analyzer on a trace file given on the command-line.
+
+ This function is useful as a driver for simple analysis scripts. More
+ advanced scripts will want to call process() instead."""
+ import sys
+
+ read_header = True
+ if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
+ read_header = False
+ del sys.argv[1]
+ elif len(sys.argv) != 3:
+ sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
+ '<trace-file>\n' % sys.argv[0])
+ sys.exit(1)
+
+ events = _read_events(open(sys.argv[1], 'r'))
+ process(events, sys.argv[2], analyzer, read_header=read_header)
+
+if __name__ == '__main__':
+ class Formatter(Analyzer):
+ def __init__(self):
+ self.last_timestamp = None
+
+ def catchall(self, event, rec):
+ timestamp = rec[1]
+ if self.last_timestamp is None:
+ self.last_timestamp = timestamp
+ delta_ns = timestamp - self.last_timestamp
+ self.last_timestamp = timestamp
+
+ fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
+ 'pid=%d' % rec[2]]
+ i = 3
+ for type, name in event.args:
+ if is_string(type):
+ fields.append('%s=%s' % (name, rec[i]))
+ else:
+ fields.append('%s=0x%x' % (name, rec[i]))
+ i += 1
+ print ' '.join(fields)
+
+ run(Formatter())
diff --git a/scripts/switch-timer-api b/scripts/switch-timer-api
new file mode 100755
index 00000000..b0e230b9
--- /dev/null
+++ b/scripts/switch-timer-api
@@ -0,0 +1,178 @@
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use Getopt::Long;
+use FindBin;
+
+my @legacy = qw(qemu_clock_ptr qemu_get_clock_ns qemu_get_clock_ms qemu_register_clock_reset_notifier qemu_unregister_clock_reset_notifier qemu_new_timer qemu_free_timer qemu_del_timer qemu_mod_timer_ns qemu_mod_timer qemu_run_timers qemu_new_timer_ns qemu_new_timer_us qemu_new_timer_ms);
+my $legacyre = '\b('.join('|', @legacy).')\b';
+my $option_git;
+my $option_dryrun;
+my $option_quiet;
+my $option_rtc;
+my $suffix=".tmp.$$";
+my @files;
+my $getfiles = 'git grep -l -E \'\b((host|rt|vm|rtc)_clock\b|qemu_\w*timer)\' | egrep \'\.[ch]$\' | egrep -v \'qemu-timer\.c$|include/qemu/timer\.h$\'';
+
+sub Syntax
+{
+ print STDERR <<STOP;
+Usage: $FindBin::Script [options] FILE ...
+
+Translate each FILE to the new QEMU timer API. If no files
+are passed, a reasonable guess is taken.
+
+Options:
+ -q, --quiet Do not show warnings etc
+ -d, --dry-run Do a dry run
+ -g, --git Generate a git commit for each change
+ -r, --rtc Only fix up rtc usage
+ -h, --help Print this message
+
+STOP
+return;
+}
+
+sub ParseOptions
+{
+ if (!GetOptions (
+ "dry-run|d" => \$option_dryrun,
+ "git|g" => \$option_git,
+ "quiet|q" => \$option_quiet,
+ "rtc|r" => \$option_rtc,
+ "help|h" => sub { Syntax(); exit(0); }
+ ))
+ {
+ Syntax();
+ die "Bad options";
+ }
+
+ if ($#ARGV >=0)
+ {
+ @files = @ARGV;
+ }
+ else
+ {
+ @files = split(/\s+/, `$getfiles`);
+ }
+
+ foreach my $file (@files)
+ {
+ die "Cannot find $file" unless (-f $file && -r $file);
+ }
+}
+
+sub DoWarn
+{
+ my $text = shift @_;
+ my $line = shift @_;
+ return if ($option_quiet);
+ chomp ($line);
+ print STDERR "$text\n";
+ print STDERR "$line\n\n";
+}
+
+sub Process
+{
+ my $ifn = shift @_;
+ my $ofn = $ifn.$suffix;
+
+ my $intext;
+ my $outtext;
+ my $linenum = 0;
+
+ open my $input, "<", $ifn || die "Cannot open $ifn for read: $!";
+
+ while (<$input>)
+ {
+ my $line = $_;
+ $intext .= $line;
+ $linenum++;
+
+ # fix the specific uses
+ unless ($option_rtc)
+ {
+ $line =~ s/\bqemu_new_timer(_[num]s)\s*\((vm_|rt_|host_)clock\b/timer_new$1(XXX_$2clock/g;
+ $line =~ s/\bqemu_new_timer\s*\((vm_|rt_|host_)clock\b/timer_new(XXX_$1clock/g;
+ $line =~ s/\bqemu_get_clock(_[num]s)\s*\((vm_|rt_|host_)clock\b/qemu_clock_get$1(XXX_$2clock/g;
+ }
+
+ # rtc is different
+ $line =~ s/\bqemu_new_timer(_[num]s)\s*\(rtc_clock\b/timer_new$1(rtc_clock/g;
+ $line =~ s/\bqemu_new_timer\s*\(rtc_clock\b/timer_new(rtc_clock/g;
+ $line =~ s/\bqemu_get_clock(_[num]s)\s*\(rtc_clock\b/qemu_clock_get$1(rtc_clock/g;
+ $line =~ s/\bqemu_register_clock_reset_notifier\s*\(rtc_clock\b/qemu_register_clock_reset_notifier(qemu_clock_ptr(rtc_clock)/g;
+
+ unless ($option_rtc)
+ {
+ # fix up comments
+ $line =~ s/\b(vm_|rt_|host_)clock\b/XXX_$1clock/g if ($line =~ m,^[/ ]+\*,);
+
+ # spurious fprintf error reporting
+ $line =~ s/: qemu_new_timer_ns failed/: timer_new_ns failed/g;
+
+ # these have just changed name
+ $line =~ s/\bqemu_mod_timer\b/timer_mod/g;
+ $line =~ s/\bqemu_mod_timer_(ns|us|ms)\b/timer_mod_$1/g;
+ $line =~ s/\bqemu_free_timer\b/timer_free/g;
+ $line =~ s/\bqemu_del_timer\b/timer_del/g;
+ }
+
+ # fix up rtc_clock
+ $line =~ s/QEMUClock \*rtc_clock;/QEMUClockType rtc_clock;/g;
+ $line =~ s/\brtc_clock = (vm_|rt_|host_)clock\b/rtc_clock = XXX_$1clock/g;
+
+ unless ($option_rtc)
+ {
+ # replace any more general uses
+ $line =~ s/\b(vm_|rt_|host_)clock\b/qemu_clock_ptr(XXX_$1clock)/g;
+ }
+
+ # fix up the place holders
+ $line =~ s/\bXXX_vm_clock\b/QEMU_CLOCK_VIRTUAL/g;
+ $line =~ s/\bXXX_rt_clock\b/QEMU_CLOCK_REALTIME/g;
+ $line =~ s/\bXXX_host_clock\b/QEMU_CLOCK_HOST/g;
+
+ unless ($option_rtc)
+ {
+ DoWarn("$ifn:$linenum WARNING: timer $1 not fixed up", $line) if ($line =~ /\b((vm_|rt_|host_)clock)\b/);
+ DoWarn("$ifn:$linenum WARNING: function $1 not fixed up", $line) if ($line =~ /\b(qemu_new_timer\w+)\b/);
+ DoWarn("$ifn:$linenum WARNING: legacy function $1 remains", $line) if ($line =~ /$legacyre/o);
+ }
+
+ $outtext .= $line;
+ }
+
+ close $input;
+
+ if ($intext ne $outtext)
+ {
+ print STDERR "Patching $ifn\n" unless ($option_quiet);
+ unless ($option_dryrun)
+ {
+ open my $output, ">", $ofn || die "Cannot open $ofn for write: $!";
+ print $output $outtext;
+ close $output;
+ rename ($ofn, $ifn) || die "Cannot rename temp file to $ifn: $!";
+ return 1;
+ }
+ }
+ return 0;
+}
+
+sub DoCommit
+{
+ my $file = shift @_;
+ open (my $git, "| git commit -F - $file") || die "Cannot run git commit on $file: $!";
+ print $git "timers api: use new timer api in $file\n\nConvert $file to use new timer API.\nThis is an automated commit made by scripts/switch-timer-api\n";
+ close ($git);
+}
+
+ParseOptions;
+
+foreach my $file (@files)
+{
+ my $changed = Process ($file);
+ DoCommit($file) if ($changed && $option_git);
+}
diff --git a/scripts/texi2pod.pl b/scripts/texi2pod.pl
new file mode 100755
index 00000000..94097fb0
--- /dev/null
+++ b/scripts/texi2pod.pl
@@ -0,0 +1,486 @@
+#! /usr/bin/perl -w
+
+# Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
+
+# This file is part of GCC.
+
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING. If not,
+# see <http://www.gnu.org/licenses/>.
+
+# This does trivial (and I mean _trivial_) conversion of Texinfo
+# markup to Perl POD format. It's intended to be used to extract
+# something suitable for a manpage from a Texinfo document.
+
+$output = 0;
+$skipping = 0;
+%sects = ();
+$section = "";
+@icstack = ();
+@endwstack = ();
+@skstack = ();
+@instack = ();
+$shift = "";
+%defs = ();
+$fnno = 1;
+$inf = "";
+$ibase = "";
+@ipath = ();
+$encoding = undef;
+
+while ($_ = shift) {
+ if (/^-D(.*)$/) {
+ if ($1 ne "") {
+ $flag = $1;
+ } else {
+ $flag = shift;
+ }
+ $value = "";
+ ($flag, $value) = ($flag =~ /^([^=]+)(?:=(.+))?/);
+ die "no flag specified for -D\n"
+ unless $flag ne "";
+ die "flags may only contain letters, digits, hyphens, dashes and underscores\n"
+ unless $flag =~ /^[a-zA-Z0-9_-]+$/;
+ $defs{$flag} = $value;
+ } elsif (/^-I(.*)$/) {
+ if ($1 ne "") {
+ $flag = $1;
+ } else {
+ $flag = shift;
+ }
+ push (@ipath, $flag);
+ } elsif (/^-/) {
+ usage();
+ } else {
+ $in = $_, next unless defined $in;
+ $out = $_, next unless defined $out;
+ usage();
+ }
+}
+
+if (defined $in) {
+ $inf = gensym();
+ open($inf, "<$in") or die "opening \"$in\": $!\n";
+ $ibase = $1 if $in =~ m|^(.+)/[^/]+$|;
+} else {
+ $inf = \*STDIN;
+}
+
+if (defined $out) {
+ open(STDOUT, ">$out") or die "opening \"$out\": $!\n";
+}
+
+while(defined $inf) {
+while(<$inf>) {
+ # Certain commands are discarded without further processing.
+ /^\@(?:
+ [a-z]+index # @*index: useful only in complete manual
+ |need # @need: useful only in printed manual
+ |(?:end\s+)?group # @group .. @end group: ditto
+ |page # @page: ditto
+ |node # @node: useful only in .info file
+ |(?:end\s+)?ifnottex # @ifnottex .. @end ifnottex: use contents
+ )\b/x and next;
+
+ chomp;
+
+ # Look for filename and title markers.
+ /^\@setfilename\s+([^.]+)/ and $fn = $1, next;
+ /^\@settitle\s+([^.]+)/ and $tl = postprocess($1), next;
+
+ # Look for document encoding
+ /^\@documentencoding\s+([^.]+)/ and do {
+ $encoding = $1 unless defined $encoding;
+ next;
+ };
+
+ # Identify a man title but keep only the one we are interested in.
+ /^\@c\s+man\s+title\s+([A-Za-z0-9-]+)\s+(.+)/ and do {
+ if (exists $defs{$1}) {
+ $fn = $1;
+ $tl = postprocess($2);
+ }
+ next;
+ };
+
+ # Look for blocks surrounded by @c man begin SECTION ... @c man end.
+ # This really oughta be @ifman ... @end ifman and the like, but such
+ # would require rev'ing all other Texinfo translators.
+ /^\@c\s+man\s+begin\s+([A-Z]+)\s+([A-Za-z0-9-]+)/ and do {
+ $output = 1 if exists $defs{$2};
+ $sect = $1;
+ next;
+ };
+ /^\@c\s+man\s+begin\s+([A-Z]+)/ and $sect = $1, $output = 1, next;
+ /^\@c\s+man\s+end/ and do {
+ $sects{$sect} = "" unless exists $sects{$sect};
+ $sects{$sect} .= postprocess($section);
+ $section = "";
+ $output = 0;
+ next;
+ };
+
+ # handle variables
+ /^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do {
+ $defs{$1} = $2;
+ next;
+ };
+ /^\@clear\s+([a-zA-Z0-9_-]+)/ and do {
+ delete $defs{$1};
+ next;
+ };
+
+ next unless $output;
+
+ # Discard comments. (Can't do it above, because then we'd never see
+ # @c man lines.)
+ /^\@c\b/ and next;
+
+ # End-block handler goes up here because it needs to operate even
+ # if we are skipping.
+ /^\@end\s+([a-z]+)/ and do {
+ # Ignore @end foo, where foo is not an operation which may
+ # cause us to skip, if we are presently skipping.
+ my $ended = $1;
+ next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex|copying)$/;
+
+ die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
+ die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
+
+ $endw = pop @endwstack;
+
+ if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
+ $skipping = pop @skstack;
+ next;
+ } elsif ($ended =~ /^(?:example|smallexample|display)$/) {
+ $shift = "";
+ $_ = ""; # need a paragraph break
+ } elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
+ $_ = "\n=back\n";
+ $ic = pop @icstack;
+ } elsif ($ended eq "multitable") {
+ $_ = "\n=back\n";
+ } else {
+ die "unknown command \@end $ended at line $.\n";
+ }
+ };
+
+ # We must handle commands which can cause skipping even while we
+ # are skipping, otherwise we will not process nested conditionals
+ # correctly.
+ /^\@ifset\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifset";
+ $skipping = 1 unless exists $defs{$1};
+ next;
+ };
+
+ /^\@ifclear\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifclear";
+ $skipping = 1 if exists $defs{$1};
+ next;
+ };
+
+ /^\@(ignore|menu|iftex|copying)\b/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = $1;
+ $skipping = 1;
+ next;
+ };
+
+ next if $skipping;
+
+ # Character entities. First the ones that can be replaced by raw text
+ # or discarded outright:
+ s/\@copyright\{\}/(c)/g;
+ s/\@dots\{\}/.../g;
+ s/\@enddots\{\}/..../g;
+ s/\@([.!? ])/$1/g;
+ s/\@[:-]//g;
+ s/\@bullet(?:\{\})?/*/g;
+ s/\@TeX\{\}/TeX/g;
+ s/\@pounds\{\}/\#/g;
+ s/\@minus(?:\{\})?/-/g;
+ s/\\,/,/g;
+
+ # Now the ones that have to be replaced by special escapes
+ # (which will be turned back into text by unmunge())
+ s/&/&amp;/g;
+ s/\@\{/&lbrace;/g;
+ s/\@\}/&rbrace;/g;
+ s/\@\@/&at;/g;
+
+ # Inside a verbatim block, handle @var specially.
+ if ($shift ne "") {
+ s/\@var\{([^\}]*)\}/<$1>/g;
+ }
+
+ # POD doesn't interpret E<> inside a verbatim block.
+ if ($shift eq "") {
+ s/</&lt;/g;
+ s/>/&gt;/g;
+ } else {
+ s/</&LT;/g;
+ s/>/&GT;/g;
+ }
+
+ # Single line command handlers.
+
+ /^\@include\s+(.+)$/ and do {
+ push @instack, $inf;
+ $inf = gensym();
+ $file = postprocess($1);
+
+ # Try cwd and $ibase, then explicit -I paths.
+ $done = 0;
+ foreach $path ("", $ibase, @ipath) {
+ $mypath = $file;
+ $mypath = $path . "/" . $mypath if ($path ne "");
+ open($inf, "<" . $mypath) and ($done = 1, last);
+ }
+ die "cannot find $file" if !$done;
+ next;
+ };
+
+ /^\@(?:section|unnumbered|unnumberedsec|center)\s+(.+)$/
+ and $_ = "\n=head2 $1\n";
+ /^\@subsection\s+(.+)$/
+ and $_ = "\n=head3 $1\n";
+ /^\@subsubsection\s+(.+)$/
+ and $_ = "\n=head4 $1\n";
+
+ # Block command handlers:
+ /^\@itemize(?:\s+(\@[a-z]+|\*|-))?/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ if (defined $1) {
+ $ic = $1;
+ } else {
+ $ic = '*';
+ }
+ $_ = "\n=over 4\n";
+ $endw = "itemize";
+ };
+
+ /^\@enumerate(?:\s+([a-zA-Z0-9]+))?/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ if (defined $1) {
+ $ic = $1 . ".";
+ } else {
+ $ic = "1.";
+ }
+ $_ = "\n=over 4\n";
+ $endw = "enumerate";
+ };
+
+ /^\@multitable\s.*/ and do {
+ push @endwstack, $endw;
+ $endw = "multitable";
+ $_ = "\n=over 4\n";
+ };
+
+ /^\@([fv]?table)\s+(\@[a-z]+)/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ $endw = $1;
+ $ic = $2;
+ $ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/;
+ $ic =~ s/\@(?:code|kbd)/C/;
+ $ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
+ $ic =~ s/\@(?:file)/F/;
+ $_ = "\n=over 4\n";
+ };
+
+ /^\@((?:small)?example|display)/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $shift = "\t";
+ $_ = ""; # need a paragraph break
+ };
+
+ /^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
+ @columns = ();
+ for $column (split (/\s*\@tab\s*/, $1)) {
+ # @strong{...} is used a @headitem work-alike
+ $column =~ s/^\@strong{(.*)}$/$1/;
+ push @columns, $column;
+ }
+ $_ = "\n=item ".join (" : ", @columns)."\n";
+ };
+
+ /^\@itemx?\s*(.+)?$/ and do {
+ if (defined $1) {
+ # Entity escapes prevent munging by the <> processing below.
+ $_ = "\n=item $ic\&LT;$1\&GT;\n";
+ } else {
+ $_ = "\n=item $ic\n";
+ $ic =~ y/A-Ya-y/B-Zb-z/;
+ $ic =~ s/(\d+)/$1 + 1/eg;
+ }
+ };
+
+ $section .= $shift.$_."\n";
+}
+# End of current file.
+close($inf);
+$inf = pop @instack;
+}
+
+die "No filename or title\n" unless defined $fn && defined $tl;
+
+print "=encoding $encoding\n\n" if defined $encoding;
+
+$sects{NAME} = "$fn \- $tl\n";
+$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
+
+for $sect (qw(NAME SYNOPSIS DESCRIPTION OPTIONS ENVIRONMENT FILES
+ BUGS NOTES FOOTNOTES SEEALSO AUTHOR COPYRIGHT)) {
+ if(exists $sects{$sect}) {
+ $head = $sect;
+ $head =~ s/SEEALSO/SEE ALSO/;
+ print "=head1 $head\n\n";
+ print scalar unmunge ($sects{$sect});
+ print "\n";
+ }
+}
+
+sub usage
+{
+ die "usage: $0 [-D toggle...] [infile [outfile]]\n";
+}
+
+sub postprocess
+{
+ local $_ = $_[0];
+
+ # @value{foo} is replaced by whatever 'foo' is defined as.
+ while (m/(\@value\{([a-zA-Z0-9_-]+)\})/g) {
+ if (! exists $defs{$2}) {
+ print STDERR "Option $2 not defined\n";
+ s/\Q$1\E//;
+ } else {
+ $value = $defs{$2};
+ s/\Q$1\E/$value/;
+ }
+ }
+
+ # Formatting commands.
+ # Temporary escape for @r.
+ s/\@r\{([^\}]*)\}/R<$1>/g;
+ s/\@(?:dfn|var|emph|cite|i)\{([^\}]*)\}/I<$1>/g;
+ s/\@(?:code|kbd)\{([^\}]*)\}/C<$1>/g;
+ s/\@(?:gccoptlist|samp|strong|key|option|env|command|b)\{([^\}]*)\}/B<$1>/g;
+ s/\@sc\{([^\}]*)\}/\U$1/g;
+ s/\@file\{([^\}]*)\}/F<$1>/g;
+ s/\@w\{([^\}]*)\}/S<$1>/g;
+ s/\@(?:dmn|math)\{([^\}]*)\}/$1/g;
+
+ # keep references of the form @ref{...}, print them bold
+ s/\@(?:ref)\{([^\}]*)\}/B<$1>/g;
+
+ # Change double single quotes to double quotes.
+ s/''/"/g;
+ s/``/"/g;
+
+ # Cross references are thrown away, as are @noindent and @refill.
+ # (@noindent is impossible in .pod, and @refill is unnecessary.)
+ # @* is also impossible in .pod; we discard it and any newline that
+ # follows it. Similarly, our macro @gol must be discarded.
+
+ s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
+ s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
+ s/;\s+\@pxref\{(?:[^\}]*)\}//g;
+ s/\@noindent\s*//g;
+ s/\@refill//g;
+ s/\@gol//g;
+ s/\@\*\s*\n?//g;
+
+ # Anchors are thrown away
+ s/\@anchor\{(?:[^\}]*)\}//g;
+
+ # @uref can take one, two, or three arguments, with different
+ # semantics each time. @url and @email are just like @uref with
+ # one argument, for our purposes.
+ s/\@(?:uref|url|email)\{([^\},]*)\}/&lt;B<$1>&gt;/g;
+ s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
+ s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
+
+ # Un-escape <> at this point.
+ s/&LT;/</g;
+ s/&GT;/>/g;
+
+ # Now un-nest all B<>, I<>, R<>. Theoretically we could have
+ # indefinitely deep nesting; in practice, one level suffices.
+ 1 while s/([BIR])<([^<>]*)([BIR])<([^<>]*)>/$1<$2>$3<$4>$1</g;
+
+ # Replace R<...> with bare ...; eliminate empty markup, B<>;
+ # shift white space at the ends of [BI]<...> expressions outside
+ # the expression.
+ s/R<([^<>]*)>/$1/g;
+ s/[BI]<>//g;
+ s/([BI])<(\s+)([^>]+)>/$2$1<$3>/g;
+ s/([BI])<([^>]+?)(\s+)>/$1<$2>$3/g;
+
+ # Extract footnotes. This has to be done after all other
+ # processing because otherwise the regexp will choke on formatting
+ # inside @footnote.
+ while (/\@footnote/g) {
+ s/\@footnote\{([^\}]+)\}/[$fnno]/;
+ add_footnote($1, $fnno);
+ $fnno++;
+ }
+
+ return $_;
+}
+
+sub unmunge
+{
+ # Replace escaped symbols with their equivalents.
+ local $_ = $_[0];
+
+ s/&lt;/E<lt>/g;
+ s/&gt;/E<gt>/g;
+ s/&lbrace;/\{/g;
+ s/&rbrace;/\}/g;
+ s/&at;/\@/g;
+ s/&amp;/&/g;
+ return $_;
+}
+
+sub add_footnote
+{
+ unless (exists $sects{FOOTNOTES}) {
+ $sects{FOOTNOTES} = "\n=over 4\n\n";
+ }
+
+ $sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
+ $sects{FOOTNOTES} .= $_[0];
+ $sects{FOOTNOTES} .= "\n\n";
+}
+
+# stolen from Symbol.pm
+{
+ my $genseq = 0;
+ sub gensym
+ {
+ my $name = "GEN" . $genseq++;
+ my $ref = \*{$name};
+ delete $::{$name};
+ return $ref;
+ }
+}
diff --git a/scripts/tracetool.py b/scripts/tracetool.py
new file mode 100755
index 00000000..83bde7bd
--- /dev/null
+++ b/scripts/tracetool.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Command-line wrapper for the tracetool machinery.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+import sys
+import getopt
+
+from tracetool import error_write, out
+import tracetool.backend
+import tracetool.format
+
+
+_SCRIPT = ""
+
+def error_opt(msg = None):
+ if msg is not None:
+ error_write("Error: " + msg + "\n")
+
+ backend_descr = "\n".join([ " %-15s %s" % (n, d)
+ for n,d in tracetool.backend.get_list() ])
+ format_descr = "\n".join([ " %-15s %s" % (n, d)
+ for n,d in tracetool.format.get_list() ])
+ error_write("""\
+Usage: %(script)s --format=<format> --backends=<backends> [<options>]
+
+Backends:
+%(backends)s
+
+Formats:
+%(formats)s
+
+Options:
+ --help This help message.
+ --list-backends Print list of available backends.
+ --check-backends Check if the given backend is valid.
+ --binary <path> Full path to QEMU binary.
+ --target-type <type> QEMU emulator target type ('system' or 'user').
+ --target-name <name> QEMU emulator target name.
+ --probe-prefix <prefix> Prefix for dtrace probe names
+ (default: qemu-<target-type>-<target-name>).\
+""" % {
+ "script" : _SCRIPT,
+ "backends" : backend_descr,
+ "formats" : format_descr,
+ })
+
+ if msg is None:
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+
+def main(args):
+ global _SCRIPT
+ _SCRIPT = args[0]
+
+ long_opts = ["backends=", "format=", "help", "list-backends",
+ "check-backends"]
+ long_opts += ["binary=", "target-type=", "target-name=", "probe-prefix="]
+
+ try:
+ opts, args = getopt.getopt(args[1:], "", long_opts)
+ except getopt.GetoptError, err:
+ error_opt(str(err))
+
+ check_backends = False
+ arg_backends = []
+ arg_format = ""
+ binary = None
+ target_type = None
+ target_name = None
+ probe_prefix = None
+ for opt, arg in opts:
+ if opt == "--help":
+ error_opt()
+
+ elif opt == "--backends":
+ arg_backends = arg.split(",")
+ elif opt == "--format":
+ arg_format = arg
+
+ elif opt == "--list-backends":
+ public_backends = tracetool.backend.get_list(only_public = True)
+ out(", ".join([ b for b,_ in public_backends ]))
+ sys.exit(0)
+ elif opt == "--check-backends":
+ check_backends = True
+
+ elif opt == "--binary":
+ binary = arg
+ elif opt == '--target-type':
+ target_type = arg
+ elif opt == '--target-name':
+ target_name = arg
+ elif opt == '--probe-prefix':
+ probe_prefix = arg
+
+ else:
+ error_opt("unhandled option: %s" % opt)
+
+ if len(arg_backends) == 0:
+ error_opt("no backends specified")
+
+ if check_backends:
+ for backend in arg_backends:
+ if not tracetool.backend.exists(backend):
+ sys.exit(1)
+ sys.exit(0)
+
+ if arg_format == "stap":
+ if binary is None:
+ error_opt("--binary is required for SystemTAP tapset generator")
+ if probe_prefix is None and target_type is None:
+ error_opt("--target-type is required for SystemTAP tapset generator")
+ if probe_prefix is None and target_name is None:
+ error_opt("--target-name is required for SystemTAP tapset generator")
+
+ if probe_prefix is None:
+ probe_prefix = ".".join(["qemu", target_type, target_name])
+
+ try:
+ tracetool.generate(sys.stdin, arg_format, arg_backends,
+ binary=binary, probe_prefix=probe_prefix)
+ except tracetool.TracetoolError, e:
+ error_opt(str(e))
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py
new file mode 100644
index 00000000..181675f0
--- /dev/null
+++ b/scripts/tracetool/__init__.py
@@ -0,0 +1,366 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Machinery for generating tracing-related intermediate files.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+import re
+import sys
+import weakref
+
+import tracetool.format
+import tracetool.backend
+import tracetool.transform
+
+
+def error_write(*lines):
+ """Write a set of error lines."""
+ sys.stderr.writelines("\n".join(lines) + "\n")
+
+def error(*lines):
+ """Write a set of error lines and exit."""
+ error_write(*lines)
+ sys.exit(1)
+
+
+def out(*lines, **kwargs):
+ """Write a set of output lines.
+
+ You can use kwargs as a shorthand for mapping variables when formating all
+ the strings in lines.
+ """
+ lines = [ l % kwargs for l in lines ]
+ sys.stdout.writelines("\n".join(lines) + "\n")
+
+
+class Arguments:
+ """Event arguments description."""
+
+ def __init__(self, args):
+ """
+ Parameters
+ ----------
+ args :
+ List of (type, name) tuples.
+ """
+ self._args = args
+
+ def copy(self):
+ """Create a new copy."""
+ return Arguments(list(self._args))
+
+ @staticmethod
+ def build(arg_str):
+ """Build and Arguments instance from an argument string.
+
+ Parameters
+ ----------
+ arg_str : str
+ String describing the event arguments.
+ """
+ res = []
+ for arg in arg_str.split(","):
+ arg = arg.strip()
+ if arg == 'void':
+ continue
+
+ if '*' in arg:
+ arg_type, identifier = arg.rsplit('*', 1)
+ arg_type += '*'
+ identifier = identifier.strip()
+ else:
+ arg_type, identifier = arg.rsplit(None, 1)
+
+ res.append((arg_type, identifier))
+ return Arguments(res)
+
+ def __iter__(self):
+ """Iterate over the (type, name) pairs."""
+ return iter(self._args)
+
+ def __len__(self):
+ """Number of arguments."""
+ return len(self._args)
+
+ def __str__(self):
+ """String suitable for declaring function arguments."""
+ if len(self._args) == 0:
+ return "void"
+ else:
+ return ", ".join([ " ".join([t, n]) for t,n in self._args ])
+
+ def __repr__(self):
+ """Evaluable string representation for this object."""
+ return "Arguments(\"%s\")" % str(self)
+
+ def names(self):
+ """List of argument names."""
+ return [ name for _, name in self._args ]
+
+ def types(self):
+ """List of argument types."""
+ return [ type_ for type_, _ in self._args ]
+
+ def transform(self, *trans):
+ """Return a new Arguments instance with transformed types.
+
+ The types in the resulting Arguments instance are transformed according
+ to tracetool.transform.transform_type.
+ """
+ res = []
+ for type_, name in self._args:
+ res.append((tracetool.transform.transform_type(type_, *trans),
+ name))
+ return Arguments(res)
+
+
+class Event(object):
+ """Event description.
+
+ Attributes
+ ----------
+ name : str
+ The event name.
+ fmt : str
+ The event format string.
+ properties : set(str)
+ Properties of the event.
+ args : Arguments
+ The event arguments.
+
+ """
+
+ _CRE = re.compile("((?P<props>[\w\s]+)\s+)?"
+ "(?P<name>\w+)"
+ "\((?P<args>[^)]*)\)"
+ "\s*"
+ "(?:(?:(?P<fmt_trans>\".+),)?\s*(?P<fmt>\".+))?"
+ "\s*")
+
+ _VALID_PROPS = set(["disable", "tcg", "tcg-trans", "tcg-exec"])
+
+ def __init__(self, name, props, fmt, args, orig=None):
+ """
+ Parameters
+ ----------
+ name : string
+ Event name.
+ props : list of str
+ Property names.
+ fmt : str, list of str
+ Event printing format (or formats).
+ args : Arguments
+ Event arguments.
+ orig : Event or None
+ Original Event before transformation.
+
+ """
+ self.name = name
+ self.properties = props
+ self.fmt = fmt
+ self.args = args
+
+ if orig is None:
+ self.original = weakref.ref(self)
+ else:
+ self.original = orig
+
+ unknown_props = set(self.properties) - self._VALID_PROPS
+ if len(unknown_props) > 0:
+ raise ValueError("Unknown properties: %s"
+ % ", ".join(unknown_props))
+ assert isinstance(self.fmt, str) or len(self.fmt) == 2
+
+ def copy(self):
+ """Create a new copy."""
+ return Event(self.name, list(self.properties), self.fmt,
+ self.args.copy(), self)
+
+ @staticmethod
+ def build(line_str):
+ """Build an Event instance from a string.
+
+ Parameters
+ ----------
+ line_str : str
+ Line describing the event.
+ """
+ m = Event._CRE.match(line_str)
+ assert m is not None
+ groups = m.groupdict('')
+
+ name = groups["name"]
+ props = groups["props"].split()
+ fmt = groups["fmt"]
+ fmt_trans = groups["fmt_trans"]
+ if len(fmt_trans) > 0:
+ fmt = [fmt_trans, fmt]
+ args = Arguments.build(groups["args"])
+
+ if "tcg-trans" in props:
+ raise ValueError("Invalid property 'tcg-trans'")
+ if "tcg-exec" in props:
+ raise ValueError("Invalid property 'tcg-exec'")
+ if "tcg" not in props and not isinstance(fmt, str):
+ raise ValueError("Only events with 'tcg' property can have two formats")
+ if "tcg" in props and isinstance(fmt, str):
+ raise ValueError("Events with 'tcg' property must have two formats")
+
+ return Event(name, props, fmt, args)
+
+ def __repr__(self):
+ """Evaluable string representation for this object."""
+ if isinstance(self.fmt, str):
+ fmt = self.fmt
+ else:
+ fmt = "%s, %s" % (self.fmt[0], self.fmt[1])
+ return "Event('%s %s(%s) %s')" % (" ".join(self.properties),
+ self.name,
+ self.args,
+ fmt)
+
+ _FMT = re.compile("(%[\d\.]*\w+|%.*PRI\S+)")
+
+ def formats(self):
+ """List of argument print formats."""
+ assert not isinstance(self.fmt, list)
+ return self._FMT.findall(self.fmt)
+
+ QEMU_TRACE = "trace_%(name)s"
+ QEMU_TRACE_TCG = QEMU_TRACE + "_tcg"
+
+ def api(self, fmt=None):
+ if fmt is None:
+ fmt = Event.QEMU_TRACE
+ return fmt % {"name": self.name}
+
+ def transform(self, *trans):
+ """Return a new Event with transformed Arguments."""
+ return Event(self.name,
+ list(self.properties),
+ self.fmt,
+ self.args.transform(*trans),
+ self)
+
+
+def _read_events(fobj):
+ events = []
+ for line in fobj:
+ if not line.strip():
+ continue
+ if line.lstrip().startswith('#'):
+ continue
+
+ event = Event.build(line)
+
+ # transform TCG-enabled events
+ if "tcg" not in event.properties:
+ events.append(event)
+ else:
+ event_trans = event.copy()
+ event_trans.name += "_trans"
+ event_trans.properties += ["tcg-trans"]
+ event_trans.fmt = event.fmt[0]
+ args_trans = []
+ for atrans, aorig in zip(
+ event_trans.transform(tracetool.transform.TCG_2_HOST).args,
+ event.args):
+ if atrans == aorig:
+ args_trans.append(atrans)
+ event_trans.args = Arguments(args_trans)
+ event_trans = event_trans.copy()
+
+ event_exec = event.copy()
+ event_exec.name += "_exec"
+ event_exec.properties += ["tcg-exec"]
+ event_exec.fmt = event.fmt[1]
+ event_exec = event_exec.transform(tracetool.transform.TCG_2_HOST)
+
+ new_event = [event_trans, event_exec]
+ event.event_trans, event.event_exec = new_event
+
+ events.extend(new_event)
+
+ return events
+
+
+class TracetoolError (Exception):
+ """Exception for calls to generate."""
+ pass
+
+
+def try_import(mod_name, attr_name=None, attr_default=None):
+ """Try to import a module and get an attribute from it.
+
+ Parameters
+ ----------
+ mod_name : str
+ Module name.
+ attr_name : str, optional
+ Name of an attribute in the module.
+ attr_default : optional
+ Default value if the attribute does not exist in the module.
+
+ Returns
+ -------
+ A pair indicating whether the module could be imported and the module or
+ object or attribute value.
+ """
+ try:
+ module = __import__(mod_name, globals(), locals(), ["__package__"])
+ if attr_name is None:
+ return True, module
+ return True, getattr(module, str(attr_name), attr_default)
+ except ImportError:
+ return False, None
+
+
+def generate(fevents, format, backends,
+ binary=None, probe_prefix=None):
+ """Generate the output for the given (format, backends) pair.
+
+ Parameters
+ ----------
+ fevents : file
+ Event description file.
+ format : str
+ Output format name.
+ backends : list
+ Output backend names.
+ binary : str or None
+ See tracetool.backend.dtrace.BINARY.
+ probe_prefix : str or None
+ See tracetool.backend.dtrace.PROBEPREFIX.
+ """
+ # fix strange python error (UnboundLocalError tracetool)
+ import tracetool
+
+ format = str(format)
+ if len(format) is 0:
+ raise TracetoolError("format not set")
+ if not tracetool.format.exists(format):
+ raise TracetoolError("unknown format: %s" % format)
+
+ if len(backends) is 0:
+ raise TracetoolError("no backends specified")
+ for backend in backends:
+ if not tracetool.backend.exists(backend):
+ raise TracetoolError("unknown backend: %s" % backend)
+ backend = tracetool.backend.Wrapper(backends, format)
+
+ import tracetool.backend.dtrace
+ tracetool.backend.dtrace.BINARY = binary
+ tracetool.backend.dtrace.PROBEPREFIX = probe_prefix
+
+ events = _read_events(fevents)
+
+ tracetool.format.generate(events, format, backend)
diff --git a/scripts/tracetool/backend/__init__.py b/scripts/tracetool/backend/__init__.py
new file mode 100644
index 00000000..d4b6dab9
--- /dev/null
+++ b/scripts/tracetool/backend/__init__.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Backend management.
+
+
+Creating new backends
+---------------------
+
+A new backend named 'foo-bar' corresponds to Python module
+'tracetool/backend/foo_bar.py'.
+
+A backend module should provide a docstring, whose first non-empty line will be
+considered its short description.
+
+All backends must generate their contents through the 'tracetool.out' routine.
+
+
+Backend attributes
+------------------
+
+========= ====================================================================
+Attribute Description
+========= ====================================================================
+PUBLIC If exists and is set to 'True', the backend is considered "public".
+========= ====================================================================
+
+
+Backend functions
+-----------------
+
+All the following functions are optional, and no output will be generated if
+they do not exist.
+
+=============================== ==============================================
+Function Description
+=============================== ==============================================
+generate_<format>_begin(events) Generate backend- and format-specific file
+ header contents.
+generate_<format>_end(events) Generate backend- and format-specific file
+ footer contents.
+generate_<format>(event) Generate backend- and format-specific contents
+ for the given event.
+=============================== ==============================================
+
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+import os
+
+import tracetool
+
+
+def get_list(only_public = False):
+ """Get a list of (name, description) pairs."""
+ res = [("nop", "Tracing disabled.")]
+ modnames = []
+ for filename in os.listdir(tracetool.backend.__path__[0]):
+ if filename.endswith('.py') and filename != '__init__.py':
+ modnames.append(filename.rsplit('.', 1)[0])
+ for modname in sorted(modnames):
+ module = tracetool.try_import("tracetool.backend." + modname)
+
+ # just in case; should never fail unless non-module files are put there
+ if not module[0]:
+ continue
+ module = module[1]
+
+ public = getattr(module, "PUBLIC", False)
+ if only_public and not public:
+ continue
+
+ doc = module.__doc__
+ if doc is None:
+ doc = ""
+ doc = doc.strip().split("\n")[0]
+
+ name = modname.replace("_", "-")
+ res.append((name, doc))
+ return res
+
+
+def exists(name):
+ """Return whether the given backend exists."""
+ if len(name) == 0:
+ return False
+ if name == "nop":
+ return True
+ name = name.replace("-", "_")
+ return tracetool.try_import("tracetool.backend." + name)[1]
+
+
+class Wrapper:
+ def __init__(self, backends, format):
+ self._backends = [backend.replace("-", "_") for backend in backends]
+ self._format = format.replace("-", "_")
+ for backend in self._backends:
+ assert exists(backend)
+ assert tracetool.format.exists(self._format)
+
+ def _run_function(self, name, *args, **kwargs):
+ for backend in self._backends:
+ func = tracetool.try_import("tracetool.backend." + backend,
+ name % self._format, None)[1]
+ if func is not None:
+ func(*args, **kwargs)
+
+ def generate_begin(self, events):
+ self._run_function("generate_%s_begin", events)
+
+ def generate(self, event):
+ self._run_function("generate_%s", event)
+
+ def generate_end(self, events):
+ self._run_function("generate_%s_end", events)
diff --git a/scripts/tracetool/backend/dtrace.py b/scripts/tracetool/backend/dtrace.py
new file mode 100644
index 00000000..fabfe998
--- /dev/null
+++ b/scripts/tracetool/backend/dtrace.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+DTrace/SystemTAP backend.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+PUBLIC = True
+
+
+PROBEPREFIX = None
+
+def probeprefix():
+ if PROBEPREFIX is None:
+ raise ValueError("you must set PROBEPREFIX")
+ return PROBEPREFIX
+
+
+BINARY = None
+
+def binary():
+ if BINARY is None:
+ raise ValueError("you must set BINARY")
+ return BINARY
+
+
+def generate_h_begin(events):
+ out('#include "trace/generated-tracers-dtrace.h"',
+ '')
+
+
+def generate_h(event):
+ out(' QEMU_%(uppername)s(%(argnames)s);',
+ uppername=event.name.upper(),
+ argnames=", ".join(event.args.names()))
diff --git a/scripts/tracetool/backend/ftrace.py b/scripts/tracetool/backend/ftrace.py
new file mode 100644
index 00000000..d798c713
--- /dev/null
+++ b/scripts/tracetool/backend/ftrace.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Ftrace built-in backend.
+"""
+
+__author__ = "Eiichi Tsukata <eiichi.tsukata.xh@hitachi.com>"
+__copyright__ = "Copyright (C) 2013 Hitachi, Ltd."
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@redhat.com"
+
+
+from tracetool import out
+
+
+PUBLIC = True
+
+
+def generate_h_begin(events):
+ out('#include "trace/ftrace.h"',
+ '#include "trace/control.h"',
+ '')
+
+
+def generate_h(event):
+ argnames = ", ".join(event.args.names())
+ if len(event.args) > 0:
+ argnames = ", " + argnames
+
+ out(' {',
+ ' char ftrace_buf[MAX_TRACE_STRLEN];',
+ ' int unused __attribute__ ((unused));',
+ ' int trlen;',
+ ' if (trace_event_get_state(%(event_id)s)) {',
+ ' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,',
+ ' "%(name)s " %(fmt)s "\\n" %(argnames)s);',
+ ' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);',
+ ' unused = write(trace_marker_fd, ftrace_buf, trlen);',
+ ' }',
+ ' }',
+ name=event.name,
+ args=event.args,
+ event_id="TRACE_" + event.name.upper(),
+ fmt=event.fmt.rstrip("\n"),
+ argnames=argnames)
diff --git a/scripts/tracetool/backend/simple.py b/scripts/tracetool/backend/simple.py
new file mode 100644
index 00000000..e8c2cd57
--- /dev/null
+++ b/scripts/tracetool/backend/simple.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Simple built-in backend.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+PUBLIC = True
+
+
+def is_string(arg):
+ strtype = ('const char*', 'char*', 'const char *', 'char *')
+ if arg.lstrip().startswith(strtype):
+ return True
+ else:
+ return False
+
+
+def generate_h_begin(events):
+ for event in events:
+ out('void _simple_%(api)s(%(args)s);',
+ api=event.api(),
+ args=event.args)
+ out('')
+
+
+def generate_h(event):
+ out(' _simple_%(api)s(%(args)s);',
+ api=event.api(),
+ args=", ".join(event.args.names()))
+
+
+def generate_c_begin(events):
+ out('#include "trace.h"',
+ '#include "trace/control.h"',
+ '#include "trace/simple.h"',
+ '')
+
+
+def generate_c(event):
+ out('void _simple_%(api)s(%(args)s)',
+ '{',
+ ' TraceBufferRecord rec;',
+ api=event.api(),
+ args=event.args)
+ sizes = []
+ for type_, name in event.args:
+ if is_string(type_):
+ out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
+ name=name)
+ strsizeinfo = "4 + arg%s_len" % name
+ sizes.append(strsizeinfo)
+ else:
+ sizes.append("8")
+ sizestr = " + ".join(sizes)
+ if len(event.args) == 0:
+ sizestr = '0'
+
+
+ out('',
+ ' if (!trace_event_get_state(%(event_id)s)) {',
+ ' return;',
+ ' }',
+ '',
+ ' if (trace_record_start(&rec, %(event_id)s, %(size_str)s)) {',
+ ' return; /* Trace Buffer Full, Event Dropped ! */',
+ ' }',
+ event_id='TRACE_' + event.name.upper(),
+ size_str=sizestr)
+
+ if len(event.args) > 0:
+ for type_, name in event.args:
+ # string
+ if is_string(type_):
+ out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
+ name=name)
+ # pointer var (not string)
+ elif type_.endswith('*'):
+ out(' trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)%(name)s);',
+ name=name)
+ # primitive data type
+ else:
+ out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
+ name=name)
+
+ out(' trace_record_finish(&rec);',
+ '}',
+ '')
diff --git a/scripts/tracetool/backend/stderr.py b/scripts/tracetool/backend/stderr.py
new file mode 100644
index 00000000..ca580546
--- /dev/null
+++ b/scripts/tracetool/backend/stderr.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Stderr built-in backend.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+PUBLIC = True
+
+
+def generate_h_begin(events):
+ out('#include <stdio.h>',
+ '#include <sys/time.h>',
+ '#include <sys/types.h>',
+ '#include <unistd.h>',
+ '#include "trace/control.h"',
+ '')
+
+
+def generate_h(event):
+ argnames = ", ".join(event.args.names())
+ if len(event.args) > 0:
+ argnames = ", " + argnames
+
+ out(' if (trace_event_get_state(%(event_id)s)) {',
+ ' struct timeval _now;',
+ ' gettimeofday(&_now, NULL);',
+ ' fprintf(stderr, "%%d@%%zd.%%06zd:%(name)s " %(fmt)s "\\n",',
+ ' getpid(),',
+ ' (size_t)_now.tv_sec, (size_t)_now.tv_usec',
+ ' %(argnames)s);',
+ ' }',
+ event_id="TRACE_" + event.name.upper(),
+ name=event.name,
+ fmt=event.fmt.rstrip("\n"),
+ argnames=argnames)
diff --git a/scripts/tracetool/backend/ust.py b/scripts/tracetool/backend/ust.py
new file mode 100644
index 00000000..2f8f44ab
--- /dev/null
+++ b/scripts/tracetool/backend/ust.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+LTTng User Space Tracing backend.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+PUBLIC = True
+
+
+def generate_h_begin(events):
+ out('#include <lttng/tracepoint.h>',
+ '#include "trace/generated-ust-provider.h"',
+ '')
+
+
+def generate_h(event):
+ argnames = ", ".join(event.args.names())
+ if len(event.args) > 0:
+ argnames = ", " + argnames
+
+ out(' tracepoint(qemu, %(name)s%(tp_args)s);',
+ name=event.name,
+ tp_args=argnames)
diff --git a/scripts/tracetool/format/__init__.py b/scripts/tracetool/format/__init__.py
new file mode 100644
index 00000000..812570ff
--- /dev/null
+++ b/scripts/tracetool/format/__init__.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Format management.
+
+
+Creating new formats
+--------------------
+
+A new format named 'foo-bar' corresponds to Python module
+'tracetool/format/foo_bar.py'.
+
+A format module should provide a docstring, whose first non-empty line will be
+considered its short description.
+
+All formats must generate their contents through the 'tracetool.out' routine.
+
+
+Format functions
+----------------
+
+======== ==================================================================
+Function Description
+======== ==================================================================
+generate Called to generate a format-specific file.
+======== ==================================================================
+
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+import os
+
+import tracetool
+
+
+def get_list():
+ """Get a list of (name, description) pairs."""
+ res = []
+ modnames = []
+ for filename in os.listdir(tracetool.format.__path__[0]):
+ if filename.endswith('.py') and filename != '__init__.py':
+ modnames.append(filename.rsplit('.', 1)[0])
+ for modname in sorted(modnames):
+ module = tracetool.try_import("tracetool.format." + modname)
+
+ # just in case; should never fail unless non-module files are put there
+ if not module[0]:
+ continue
+ module = module[1]
+
+ doc = module.__doc__
+ if doc is None:
+ doc = ""
+ doc = doc.strip().split("\n")[0]
+
+ name = modname.replace("_", "-")
+ res.append((name, doc))
+ return res
+
+
+def exists(name):
+ """Return whether the given format exists."""
+ if len(name) == 0:
+ return False
+ name = name.replace("-", "_")
+ return tracetool.try_import("tracetool.format." + name)[1]
+
+
+def generate(events, format, backend):
+ if not exists(format):
+ raise ValueError("unknown format: %s" % format)
+ format = format.replace("-", "_")
+ func = tracetool.try_import("tracetool.format." + format,
+ "generate")[1]
+ if func is None:
+ raise AttributeError("format has no 'generate': %s" % format)
+ func(events, backend)
diff --git a/scripts/tracetool/format/c.py b/scripts/tracetool/format/c.py
new file mode 100644
index 00000000..699598fb
--- /dev/null
+++ b/scripts/tracetool/format/c.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-tracers.c
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disable" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '')
+ backend.generate_begin(events)
+ for event in events:
+ backend.generate(event)
+ backend.generate_end(events)
diff --git a/scripts/tracetool/format/d.py b/scripts/tracetool/format/d.py
new file mode 100644
index 00000000..c77d5b7a
--- /dev/null
+++ b/scripts/tracetool/format/d.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-tracers.dtrace (DTrace only).
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+# Reserved keywords from
+# https://wikis.oracle.com/display/DTrace/Types,+Operators+and+Expressions
+RESERVED_WORDS = (
+ 'auto', 'goto', 'sizeof', 'break', 'if', 'static', 'case', 'import',
+ 'string', 'char', 'inline', 'stringof', 'const', 'int', 'struct',
+ 'continue', 'long', 'switch', 'counter', 'offsetof', 'this',
+ 'default', 'probe', 'translator', 'do', 'provider', 'typedef',
+ 'double', 'register', 'union', 'else', 'restrict', 'unsigned',
+ 'enum', 'return', 'void', 'extern', 'self', 'volatile', 'float',
+ 'short', 'while', 'for', 'signed', 'xlate',
+)
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disable" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */'
+ '',
+ 'provider qemu {')
+
+ for e in events:
+ args = []
+ for type_, name in e.args:
+ if name in RESERVED_WORDS:
+ name += '_'
+ args.append(type_ + ' ' + name)
+
+ # Define prototype for probe arguments
+ out('',
+ 'probe %(name)s(%(args)s);',
+ name=e.name,
+ args=','.join(args))
+
+ out('',
+ '};')
diff --git a/scripts/tracetool/format/events_c.py b/scripts/tracetool/format/events_c.py
new file mode 100644
index 00000000..2d97fa31
--- /dev/null
+++ b/scripts/tracetool/format/events_c.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-events.c
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#include "trace.h"',
+ '#include "trace/generated-events.h"',
+ '#include "trace/control.h"',
+ '')
+
+ out('TraceEvent trace_events[TRACE_EVENT_COUNT] = {')
+
+ for e in events:
+ out(' { .id = %(id)s, .name = \"%(name)s\", .sstate = %(sstate)s, .dstate = 0 },',
+ id = "TRACE_" + e.name.upper(),
+ name = e.name,
+ sstate = "TRACE_%s_ENABLED" % e.name.upper())
+
+ out('};',
+ '')
diff --git a/scripts/tracetool/format/events_h.py b/scripts/tracetool/format/events_h.py
new file mode 100644
index 00000000..9f114a34
--- /dev/null
+++ b/scripts/tracetool/format/events_h.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-events.h
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#ifndef TRACE__GENERATED_EVENTS_H',
+ '#define TRACE__GENERATED_EVENTS_H',
+ '',
+ '#include <stdbool.h>',
+ '')
+
+ # event identifiers
+ out('typedef enum {')
+
+ for e in events:
+ out(' TRACE_%s,' % e.name.upper())
+
+ out(' TRACE_EVENT_COUNT',
+ '} TraceEventID;')
+
+ # static state
+ for e in events:
+ if 'disable' in e.properties:
+ enabled = 0
+ else:
+ enabled = 1
+ if "tcg-trans" in e.properties:
+ # a single define for the two "sub-events"
+ out('#define TRACE_%(name)s_ENABLED %(enabled)d',
+ name=e.original.original.name.upper(),
+ enabled=enabled)
+ out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled))
+
+ out('#include "trace/event-internal.h"',
+ '',
+ '#endif /* TRACE__GENERATED_EVENTS_H */')
diff --git a/scripts/tracetool/format/h.py b/scripts/tracetool/format/h.py
new file mode 100644
index 00000000..9b394300
--- /dev/null
+++ b/scripts/tracetool/format/h.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-tracers.h
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#ifndef TRACE__GENERATED_TRACERS_H',
+ '#define TRACE__GENERATED_TRACERS_H',
+ '',
+ '#include "qemu-common.h"',
+ '')
+
+ backend.generate_begin(events)
+
+ for e in events:
+ out('',
+ 'static inline void %(api)s(%(args)s)',
+ '{',
+ api=e.api(),
+ args=e.args)
+
+ if "disable" not in e.properties:
+ backend.generate(e)
+
+ out('}')
+
+ backend.generate_end(events)
+
+ out('#endif /* TRACE__GENERATED_TRACERS_H */')
diff --git a/scripts/tracetool/format/simpletrace_stap.py b/scripts/tracetool/format/simpletrace_stap.py
new file mode 100644
index 00000000..7e44bc18
--- /dev/null
+++ b/scripts/tracetool/format/simpletrace_stap.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Generate .stp file that outputs simpletrace binary traces (DTrace with SystemTAP only).
+"""
+
+__author__ = "Stefan Hajnoczi <redhat.com>"
+__copyright__ = "Copyright (C) 2014, Red Hat, Inc."
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@redhat.com"
+
+
+from tracetool import out
+from tracetool.backend.dtrace import binary, probeprefix
+from tracetool.backend.simple import is_string
+from tracetool.format.stap import stap_escape
+
+
+def generate(events, backend):
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '')
+
+ for event_id, e in enumerate(events):
+ if 'disable' in e.properties:
+ continue
+
+ out('probe %(probeprefix)s.simpletrace.%(name)s = %(probeprefix)s.%(name)s ?',
+ '{',
+ probeprefix=probeprefix(),
+ name=e.name)
+
+ # Calculate record size
+ sizes = ['24'] # sizeof(TraceRecord)
+ for type_, name in e.args:
+ name = stap_escape(name)
+ if is_string(type_):
+ out(' try {',
+ ' arg%(name)s_str = %(name)s ? user_string_n(%(name)s, 512) : "<null>"',
+ ' } catch {}',
+ ' arg%(name)s_len = strlen(arg%(name)s_str)',
+ name=name)
+ sizes.append('4 + arg%s_len' % name)
+ else:
+ sizes.append('8')
+ sizestr = ' + '.join(sizes)
+
+ # Generate format string and value pairs for record header and arguments
+ fields = [('8b', str(event_id)),
+ ('8b', 'gettimeofday_ns()'),
+ ('4b', sizestr),
+ ('4b', 'pid()')]
+ for type_, name in e.args:
+ name = stap_escape(name)
+ if is_string(type_):
+ fields.extend([('4b', 'arg%s_len' % name),
+ ('.*s', 'arg%s_len, arg%s_str' % (name, name))])
+ else:
+ fields.append(('8b', name))
+
+ # Emit the entire record in a single SystemTap printf()
+ fmt_str = '%'.join(fmt for fmt, _ in fields)
+ arg_str = ', '.join(arg for _, arg in fields)
+ out(' printf("%%%(fmt_str)s", %(arg_str)s)',
+ fmt_str=fmt_str, arg_str=arg_str)
+
+ out('}')
+
+ out()
diff --git a/scripts/tracetool/format/stap.py b/scripts/tracetool/format/stap.py
new file mode 100644
index 00000000..9e780f1b
--- /dev/null
+++ b/scripts/tracetool/format/stap.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Generate .stp file (DTrace with SystemTAP only).
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+from tracetool.backend.dtrace import binary, probeprefix
+
+
+# Technically 'self' is not used by systemtap yet, but
+# they recommended we keep it in the reserved list anyway
+RESERVED_WORDS = (
+ 'break', 'catch', 'continue', 'delete', 'else', 'for',
+ 'foreach', 'function', 'global', 'if', 'in', 'limit',
+ 'long', 'next', 'probe', 'return', 'self', 'string',
+ 'try', 'while'
+ )
+
+
+def stap_escape(identifier):
+ # Append underscore to reserved keywords
+ if identifier in RESERVED_WORDS:
+ return identifier + '_'
+ return identifier
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disable" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '')
+
+ for e in events:
+ # Define prototype for probe arguments
+ out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
+ '{',
+ probeprefix=probeprefix(),
+ name=e.name,
+ binary=binary())
+
+ i = 1
+ if len(e.args) > 0:
+ for name in e.args.names():
+ name = stap_escape(name)
+ out(' %s = $arg%d;' % (name, i))
+ i += 1
+
+ out('}')
+
+ out()
diff --git a/scripts/tracetool/format/tcg_h.py b/scripts/tracetool/format/tcg_h.py
new file mode 100644
index 00000000..f676b666
--- /dev/null
+++ b/scripts/tracetool/format/tcg_h.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Generate .h file for TCG code generation.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '/* You must include this file after the inclusion of helper.h */',
+ '',
+ '#ifndef TRACE__GENERATED_TCG_TRACERS_H',
+ '#define TRACE__GENERATED_TCG_TRACERS_H',
+ '',
+ '#include <stdint.h>',
+ '',
+ '#include "trace.h"',
+ '#include "exec/helper-proto.h"',
+ '',
+ )
+
+ for e in events:
+ # just keep one of them
+ if "tcg-trans" not in e.properties:
+ continue
+
+ # get the original event definition
+ e = e.original.original
+
+ out('static inline void %(name_tcg)s(%(args)s)',
+ '{',
+ name_tcg=e.api(e.QEMU_TRACE_TCG),
+ args=e.args)
+
+ if "disable" not in e.properties:
+ out(' %(name_trans)s(%(argnames_trans)s);',
+ ' gen_helper_%(name_exec)s(%(argnames_exec)s);',
+ name_trans=e.event_trans.api(e.QEMU_TRACE),
+ name_exec=e.event_exec.api(e.QEMU_TRACE),
+ argnames_trans=", ".join(e.event_trans.args.names()),
+ argnames_exec=", ".join(e.event_exec.args.names()))
+
+ out('}')
+
+ out('',
+ '#endif /* TRACE__GENERATED_TCG_TRACERS_H */')
diff --git a/scripts/tracetool/format/tcg_helper_c.py b/scripts/tracetool/format/tcg_helper_c.py
new file mode 100644
index 00000000..96655a05
--- /dev/null
+++ b/scripts/tracetool/format/tcg_helper_c.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Generate trace/generated-helpers.c.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+from tracetool.transform import *
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disable" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#include "qemu-common.h"',
+ '#include "trace.h"',
+ '#include "exec/helper-proto.h"',
+ '',
+ )
+
+ for e in events:
+ if "tcg-exec" not in e.properties:
+ continue
+
+ # tracetool.generate always transforms types to host
+ e_args = e.original.args
+
+ values = ["(%s)%s" % (t, n)
+ for t, n in e.args.transform(TCG_2_TCG_HELPER_DEF)]
+
+ out('void %(name_tcg)s(%(args)s)',
+ '{',
+ ' %(name)s(%(values)s);',
+ '}',
+ name_tcg="helper_%s_proxy" % e.api(),
+ name=e.api(),
+ args=e_args.transform(HOST_2_TCG_COMPAT, TCG_2_TCG_HELPER_DEF),
+ values=", ".join(values),
+ )
diff --git a/scripts/tracetool/format/tcg_helper_h.py b/scripts/tracetool/format/tcg_helper_h.py
new file mode 100644
index 00000000..a8ba7ba8
--- /dev/null
+++ b/scripts/tracetool/format/tcg_helper_h.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Generate trace/generated-helpers.h.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+from tracetool.transform import *
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disable" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ )
+
+ for e in events:
+ if "tcg-exec" not in e.properties:
+ continue
+
+ # tracetool.generate always transforms types to host
+ e_args = e.original.args
+
+ # TCG helper proxy declaration
+ fmt = "DEF_HELPER_FLAGS_%(argc)d(%(name)s, %(flags)svoid%(types)s)"
+ args = e_args.transform(HOST_2_TCG_COMPAT, HOST_2_TCG,
+ TCG_2_TCG_HELPER_DECL)
+ types = ", ".join(args.types())
+ if types != "":
+ types = ", " + types
+
+ flags = "TCG_CALL_NO_RWG, "
+
+ out(fmt,
+ flags=flags,
+ argc=len(args),
+ name=e.api() + "_proxy",
+ types=types,
+ )
diff --git a/scripts/tracetool/format/tcg_helper_wrapper_h.py b/scripts/tracetool/format/tcg_helper_wrapper_h.py
new file mode 100644
index 00000000..cac5a878
--- /dev/null
+++ b/scripts/tracetool/format/tcg_helper_wrapper_h.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Generate trace/generated-helpers-wrappers.h.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+from tracetool import out
+from tracetool.transform import *
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disable" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#define tcg_temp_new_nop(v) (v)',
+ '#define tcg_temp_free_nop(v)',
+ '',
+ )
+
+ for e in events:
+ if "tcg-exec" not in e.properties:
+ continue
+
+ # tracetool.generate always transforms types to host
+ e_args = e.original.args
+
+ # mixed-type to TCG helper bridge
+ args_tcg_compat = e_args.transform(HOST_2_TCG_COMPAT)
+
+ code_new = [
+ "%(tcg_type)s __%(name)s = %(tcg_func)s(%(name)s);" %
+ {"tcg_type": transform_type(type_, HOST_2_TCG),
+ "tcg_func": transform_type(type_, HOST_2_TCG_TMP_NEW),
+ "name": name}
+ for (type_, name) in args_tcg_compat
+ ]
+
+ code_free = [
+ "%(tcg_func)s(__%(name)s);" %
+ {"tcg_func": transform_type(type_, HOST_2_TCG_TMP_FREE),
+ "name": name}
+ for (type_, name) in args_tcg_compat
+ ]
+
+ gen_name = "gen_helper_" + e.api()
+
+ out('static inline void %(name)s(%(args)s)',
+ '{',
+ ' %(code_new)s',
+ ' %(proxy_name)s(%(tmp_names)s);',
+ ' %(code_free)s',
+ '}',
+ name=gen_name,
+ args=e_args,
+ proxy_name=gen_name + "_proxy",
+ code_new="\n ".join(code_new),
+ code_free="\n ".join(code_free),
+ tmp_names=", ".join(["__%s" % name for _, name in e_args]),
+ )
diff --git a/scripts/tracetool/format/ust_events_c.py b/scripts/tracetool/format/ust_events_c.py
new file mode 100644
index 00000000..bc970936
--- /dev/null
+++ b/scripts/tracetool/format/ust_events_c.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-ust.c
+"""
+
+__author__ = "Mohamad Gebai <mohamad.gebai@polymtl.ca>"
+__copyright__ = "Copyright 2012, Mohamad Gebai <mohamad.gebai@polymtl.ca>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@redhat.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disabled" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#define TRACEPOINT_DEFINE',
+ '#define TRACEPOINT_CREATE_PROBES',
+ '',
+ '/* If gcc version 4.7 or older is used, LTTng ust gives a warning when compiling with',
+ ' -Wredundant-decls.',
+ ' */',
+ '#pragma GCC diagnostic ignored "-Wredundant-decls"',
+ '',
+ '#include "generated-ust-provider.h"')
diff --git a/scripts/tracetool/format/ust_events_h.py b/scripts/tracetool/format/ust_events_h.py
new file mode 100644
index 00000000..3e8a7cdf
--- /dev/null
+++ b/scripts/tracetool/format/ust_events_h.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+trace/generated-ust-provider.h
+"""
+
+__author__ = "Mohamad Gebai <mohamad.gebai@polymtl.ca>"
+__copyright__ = "Copyright 2012, Mohamad Gebai <mohamad.gebai@polymtl.ca>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@redhat.com"
+
+
+from tracetool import out
+
+
+def generate(events, backend):
+ events = [e for e in events
+ if "disabled" not in e.properties]
+
+ out('/* This file is autogenerated by tracetool, do not edit. */',
+ '',
+ '#undef TRACEPOINT_PROVIDER',
+ '#define TRACEPOINT_PROVIDER qemu',
+ '',
+ '#undef TRACEPOINT_INCLUDE_FILE',
+ '#define TRACEPOINT_INCLUDE_FILE ./generated-ust-provider.h',
+ '',
+ '#if !defined (TRACE__GENERATED_UST_H) || defined(TRACEPOINT_HEADER_MULTI_READ)',
+ '#define TRACE__GENERATED_UST_H',
+ '',
+ '#include "qemu-common.h"',
+ '#include <lttng/tracepoint.h>',
+ '',
+ '/*',
+ ' * LTTng ust 2.0 does not allow you to use TP_ARGS(void) for tracepoints',
+ ' * requiring no arguments. We define these macros introduced in more recent'
+ ' * versions of LTTng ust as a workaround',
+ ' */',
+ '#ifndef _TP_EXPROTO1',
+ '#define _TP_EXPROTO1(a) void',
+ '#endif',
+ '#ifndef _TP_EXDATA_PROTO1',
+ '#define _TP_EXDATA_PROTO1(a) void *__tp_data',
+ '#endif',
+ '#ifndef _TP_EXDATA_VAR1',
+ '#define _TP_EXDATA_VAR1(a) __tp_data',
+ '#endif',
+ '#ifndef _TP_EXVAR1',
+ '#define _TP_EXVAR1(a)',
+ '#endif',
+ '')
+
+ for e in events:
+ if len(e.args) > 0:
+ out('TRACEPOINT_EVENT(',
+ ' qemu,',
+ ' %(name)s,',
+ ' TP_ARGS(%(args)s),',
+ ' TP_FIELDS(',
+ name=e.name,
+ args=", ".join(", ".join(i) for i in e.args))
+
+ types = e.args.types()
+ names = e.args.names()
+ fmts = e.formats()
+ for t,n,f in zip(types, names, fmts):
+ if ('char *' in t) or ('char*' in t):
+ out(' ctf_string(' + n + ', ' + n + ')')
+ elif ("%p" in f) or ("x" in f) or ("PRIx" in f):
+ out(' ctf_integer_hex('+ t + ', ' + n + ', ' + n + ')')
+ elif ("ptr" in t) or ("*" in t):
+ out(' ctf_integer_hex('+ t + ', ' + n + ', ' + n + ')')
+ elif ('int' in t) or ('long' in t) or ('unsigned' in t) or ('size_t' in t):
+ out(' ctf_integer(' + t + ', ' + n + ', ' + n + ')')
+ elif ('double' in t) or ('float' in t):
+ out(' ctf_float(' + t + ', ' + n + ', ' + n + ')')
+ elif ('void *' in t) or ('void*' in t):
+ out(' ctf_integer_hex(unsigned long, ' + n + ', ' + n + ')')
+
+ out(' )',
+ ')',
+ '')
+
+ else:
+ out('TRACEPOINT_EVENT(',
+ ' qemu,',
+ ' %(name)s,',
+ ' TP_ARGS(void),',
+ ' TP_FIELDS()',
+ ')',
+ '',
+ name=e.name)
+
+ out('#endif /* TRACE__GENERATED_UST_H */',
+ '',
+ '/* This part must be outside ifdef protection */',
+ '#include <lttng/tracepoint-event.h>')
diff --git a/scripts/tracetool/transform.py b/scripts/tracetool/transform.py
new file mode 100644
index 00000000..fc5e679e
--- /dev/null
+++ b/scripts/tracetool/transform.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Type-transformation rules.
+"""
+
+__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
+__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
+__license__ = "GPL version 2 or (at your option) any later version"
+
+__maintainer__ = "Stefan Hajnoczi"
+__email__ = "stefanha@linux.vnet.ibm.com"
+
+
+def _transform_type(type_, trans):
+ if isinstance(trans, str):
+ return trans
+ elif isinstance(trans, dict):
+ if type_ in trans:
+ return _transform_type(type_, trans[type_])
+ elif None in trans:
+ return _transform_type(type_, trans[None])
+ else:
+ return type_
+ elif callable(trans):
+ return trans(type_)
+ else:
+ raise ValueError("Invalid type transformation rule: %s" % trans)
+
+
+def transform_type(type_, *trans):
+ """Return a new type transformed according to the given rules.
+
+ Applies each of the transformation rules in trans in order.
+
+ If an element of trans is a string, return it.
+
+ If an element of trans is a function, call it with type_ as its only
+ argument.
+
+ If an element of trans is a dict, search type_ in its keys. If type_ is
+ a key, use the value as a transformation rule for type_. Otherwise, if
+ None is a key use the value as a transformation rule for type_.
+
+ Otherwise, return type_.
+
+ Parameters
+ ----------
+ type_ : str
+ Type to transform.
+ trans : list of function or dict
+ Type transformation rules.
+ """
+ if len(trans) == 0:
+ raise ValueError
+ res = type_
+ for t in trans:
+ res = _transform_type(res, t)
+ return res
+
+
+##################################################
+# tcg -> host
+
+def _tcg_2_host(type_):
+ if type_ == "TCGv":
+ # force a fixed-size type (target-independent)
+ return "uint64_t"
+ else:
+ return type_
+
+TCG_2_HOST = {
+ "TCGv_i32": "uint32_t",
+ "TCGv_i64": "uint64_t",
+ "TCGv_ptr": "void *",
+ None: _tcg_2_host,
+ }
+
+
+##################################################
+# host -> host compatible with tcg sizes
+
+HOST_2_TCG_COMPAT = {
+ "uint8_t": "uint32_t",
+ }
+
+
+##################################################
+# host/tcg -> tcg
+
+def _host_2_tcg(type_):
+ if type_.startswith("TCGv"):
+ return type_
+ raise ValueError("Don't know how to translate '%s' into a TCG type\n" % type_)
+
+HOST_2_TCG = {
+ "uint32_t": "TCGv_i32",
+ "uint64_t": "TCGv_i64",
+ "void *" : "TCGv_ptr",
+ None: _host_2_tcg,
+ }
+
+
+##################################################
+# tcg -> tcg helper definition
+
+def _tcg_2_helper_def(type_):
+ if type_ == "TCGv":
+ return "target_ulong"
+ else:
+ return type_
+
+TCG_2_TCG_HELPER_DEF = {
+ "TCGv_i32": "uint32_t",
+ "TCGv_i64": "uint64_t",
+ "TCGv_ptr": "void *",
+ None: _tcg_2_helper_def,
+ }
+
+
+##################################################
+# tcg -> tcg helper declaration
+
+def _tcg_2_tcg_helper_decl_error(type_):
+ raise ValueError("Don't know how to translate type '%s' into a TCG helper declaration type\n" % type_)
+
+TCG_2_TCG_HELPER_DECL = {
+ "TCGv" : "tl",
+ "TCGv_ptr": "ptr",
+ "TCGv_i32": "i32",
+ "TCGv_i64": "i64",
+ None: _tcg_2_tcg_helper_decl_error,
+ }
+
+
+##################################################
+# host/tcg -> tcg temporal constant allocation
+
+def _host_2_tcg_tmp_new(type_):
+ if type_.startswith("TCGv"):
+ return "tcg_temp_new_nop"
+ raise ValueError("Don't know how to translate type '%s' into a TCG temporal allocation" % type_)
+
+HOST_2_TCG_TMP_NEW = {
+ "uint32_t": "tcg_const_i32",
+ "uint64_t": "tcg_const_i64",
+ "void *" : "tcg_const_ptr",
+ None: _host_2_tcg_tmp_new,
+ }
+
+
+##################################################
+# host/tcg -> tcg temporal constant deallocation
+
+def _host_2_tcg_tmp_free(type_):
+ if type_.startswith("TCGv"):
+ return "tcg_temp_free_nop"
+ raise ValueError("Don't know how to translate type '%s' into a TCG temporal deallocation" % type_)
+
+HOST_2_TCG_TMP_FREE = {
+ "uint32_t": "tcg_temp_free_i32",
+ "uint64_t": "tcg_temp_free_i64",
+ "void *" : "tcg_temp_free_ptr",
+ None: _host_2_tcg_tmp_free,
+ }
diff --git a/scripts/update-acpi.sh b/scripts/update-acpi.sh
new file mode 100644
index 00000000..b5f05ff3
--- /dev/null
+++ b/scripts/update-acpi.sh
@@ -0,0 +1,4 @@
+cd x86_64-softmmu
+for file in hw/i386/*.hex; do
+ cp -f $file ../$file.generated
+done
diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh
new file mode 100755
index 00000000..20103284
--- /dev/null
+++ b/scripts/update-linux-headers.sh
@@ -0,0 +1,133 @@
+#!/bin/sh -e
+#
+# Update Linux kernel headers QEMU requires from a specified kernel tree.
+#
+# Copyright (C) 2011 Siemens AG
+#
+# Authors:
+# Jan Kiszka <jan.kiszka@siemens.com>
+#
+# This work is licensed under the terms of the GNU GPL version 2.
+# See the COPYING file in the top-level directory.
+
+tmpdir=`mktemp -d`
+linux="$1"
+output="$2"
+
+if [ -z "$linux" ] || ! [ -d "$linux" ]; then
+ cat << EOF
+usage: update-kernel-headers.sh LINUX_PATH [OUTPUT_PATH]
+
+LINUX_PATH Linux kernel directory to obtain the headers from
+OUTPUT_PATH output directory, usually the qemu source tree (default: $PWD)
+EOF
+ exit 1
+fi
+
+if [ -z "$output" ]; then
+ output="$PWD"
+fi
+
+cp_virtio() {
+ from=$1
+ to=$2
+ virtio=$(find "$from" -name '*virtio*h' -o -name "input.h" -o -name "pci_regs.h")
+ if [ "$virtio" ]; then
+ rm -rf "$to"
+ mkdir -p "$to"
+ for f in $virtio; do
+ if
+ grep '#include' "$f" | grep -v -e 'linux/virtio' \
+ -e 'linux/types' \
+ -e 'linux/if_ether' \
+ -e 'sys/' \
+ > /dev/null
+ then
+ echo "Unexpected #include in input file $f".
+ exit 2
+ fi
+
+ header=$(basename "$f");
+ sed -e 's/__u\([0-9][0-9]*\)/uint\1_t/g' \
+ -e 's/__s\([0-9][0-9]*\)/int\1_t/g' \
+ -e 's/__le\([0-9][0-9]*\)/uint\1_t/g' \
+ -e 's/__be\([0-9][0-9]*\)/uint\1_t/g' \
+ -e 's/<linux\/\([^>]*\)>/"standard-headers\/linux\/\1"/' \
+ -e 's/__bitwise__//' \
+ -e 's/__attribute__((packed))/QEMU_PACKED/' \
+ -e 's/__inline__/inline/' \
+ -e '/sys\/ioctl.h/d' \
+ -e 's/SW_MAX/SW_MAX_/' \
+ "$f" > "$to/$header";
+ done
+ fi
+}
+
+# This will pick up non-directories too (eg "Kconfig") but we will
+# ignore them in the next loop.
+ARCHLIST=$(cd "$linux/arch" && echo *)
+
+for arch in $ARCHLIST; do
+ # Discard anything which isn't a KVM-supporting architecture
+ if ! [ -e "$linux/arch/$arch/include/asm/kvm.h" ] &&
+ ! [ -e "$linux/arch/$arch/include/uapi/asm/kvm.h" ] ; then
+ continue
+ fi
+
+ # Blacklist architectures which have KVM headers but are actually dead
+ if [ "$arch" = "ia64" ]; then
+ continue
+ fi
+
+ make -C "$linux" INSTALL_HDR_PATH="$tmpdir" SRCARCH=$arch headers_install
+
+ rm -rf "$output/linux-headers/asm-$arch"
+ mkdir -p "$output/linux-headers/asm-$arch"
+ for header in kvm.h kvm_para.h; do
+ cp "$tmpdir/include/asm/$header" "$output/linux-headers/asm-$arch"
+ done
+ if [ $arch = x86 ]; then
+ cp "$tmpdir/include/asm/hyperv.h" "$output/linux-headers/asm-x86"
+ fi
+ if [ $arch = powerpc ]; then
+ cp "$tmpdir/include/asm/epapr_hcalls.h" "$output/linux-headers/asm-powerpc/"
+ fi
+
+ cp_virtio "$tmpdir/include/asm" "$output/include/standard-headers/asm-$arch"
+done
+
+rm -rf "$output/linux-headers/linux"
+mkdir -p "$output/linux-headers/linux"
+for header in kvm.h kvm_para.h vfio.h vhost.h \
+ psci.h; do
+ cp "$tmpdir/include/linux/$header" "$output/linux-headers/linux"
+done
+rm -rf "$output/linux-headers/asm-generic"
+mkdir -p "$output/linux-headers/asm-generic"
+for header in kvm_para.h; do
+ cp "$tmpdir/include/asm-generic/$header" "$output/linux-headers/asm-generic"
+done
+if [ -L "$linux/source" ]; then
+ cp "$linux/source/COPYING" "$output/linux-headers"
+else
+ cp "$linux/COPYING" "$output/linux-headers"
+fi
+
+cat <<EOF >$output/linux-headers/linux/virtio_config.h
+#include "standard-headers/linux/virtio_config.h"
+EOF
+cat <<EOF >$output/linux-headers/linux/virtio_ring.h
+#include "standard-headers/linux/virtio_ring.h"
+EOF
+
+cp_virtio "$tmpdir/include/linux/" "$output/include/standard-headers/linux"
+
+cat <<EOF >$output/include/standard-headers/linux/types.h
+#include <stdint.h>
+#include "qemu/compiler.h"
+EOF
+cat <<EOF >$output/include/standard-headers/linux/if_ether.h
+#define ETH_ALEN 6
+EOF
+
+rm -rf "$tmpdir"
diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py
new file mode 100755
index 00000000..b6c0bbea
--- /dev/null
+++ b/scripts/vmstate-static-checker.py
@@ -0,0 +1,424 @@
+#!/usr/bin/python
+#
+# Compares vmstate information stored in JSON format, obtained from
+# the -dump-vmstate QEMU command.
+#
+# Copyright 2014 Amit Shah <amit.shah@redhat.com>
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import json
+import sys
+
+# Count the number of errors found
+taint = 0
+
+def bump_taint():
+ global taint
+
+ # Ensure we don't wrap around or reset to 0 -- the shell only has
+ # an 8-bit return value.
+ if taint < 255:
+ taint = taint + 1
+
+
+def check_fields_match(name, s_field, d_field):
+ if s_field == d_field:
+ return True
+
+ # Some fields changed names between qemu versions. This list
+ # is used to whitelist such changes in each section / description.
+ changed_names = {
+ 'apic': ['timer', 'timer_expiry'],
+ 'e1000': ['dev', 'parent_obj'],
+ 'ehci': ['dev', 'pcidev'],
+ 'I440FX': ['dev', 'parent_obj'],
+ 'ich9_ahci': ['card', 'parent_obj'],
+ 'ich9-ahci': ['ahci', 'ich9_ahci'],
+ 'ioh3420': ['PCIDevice', 'PCIEDevice'],
+ 'ioh-3240-express-root-port': ['port.br.dev',
+ 'parent_obj.parent_obj.parent_obj',
+ 'port.br.dev.exp.aer_log',
+ 'parent_obj.parent_obj.parent_obj.exp.aer_log'],
+ 'cirrus_vga': ['hw_cursor_x', 'vga.hw_cursor_x',
+ 'hw_cursor_y', 'vga.hw_cursor_y'],
+ 'lsiscsi': ['dev', 'parent_obj'],
+ 'mch': ['d', 'parent_obj'],
+ 'pci_bridge': ['bridge.dev', 'parent_obj', 'bridge.dev.shpc', 'shpc'],
+ 'pcnet': ['pci_dev', 'parent_obj'],
+ 'PIIX3': ['pci_irq_levels', 'pci_irq_levels_vmstate'],
+ 'piix4_pm': ['dev', 'parent_obj', 'pci0_status',
+ 'acpi_pci_hotplug.acpi_pcihp_pci_status[0x0]',
+ 'pm1a.sts', 'ar.pm1.evt.sts', 'pm1a.en', 'ar.pm1.evt.en',
+ 'pm1_cnt.cnt', 'ar.pm1.cnt.cnt',
+ 'tmr.timer', 'ar.tmr.timer',
+ 'tmr.overflow_time', 'ar.tmr.overflow_time',
+ 'gpe', 'ar.gpe'],
+ 'rtl8139': ['dev', 'parent_obj'],
+ 'qxl': ['num_surfaces', 'ssd.num_surfaces'],
+ 'usb-ccid': ['abProtocolDataStructure', 'abProtocolDataStructure.data'],
+ 'usb-host': ['dev', 'parent_obj'],
+ 'usb-mouse': ['usb-ptr-queue', 'HIDPointerEventQueue'],
+ 'usb-tablet': ['usb-ptr-queue', 'HIDPointerEventQueue'],
+ 'vmware_vga': ['card', 'parent_obj'],
+ 'vmware_vga_internal': ['depth', 'new_depth'],
+ 'xhci': ['pci_dev', 'parent_obj'],
+ 'x3130-upstream': ['PCIDevice', 'PCIEDevice'],
+ 'xio3130-express-downstream-port': ['port.br.dev',
+ 'parent_obj.parent_obj.parent_obj',
+ 'port.br.dev.exp.aer_log',
+ 'parent_obj.parent_obj.parent_obj.exp.aer_log'],
+ 'xio3130-downstream': ['PCIDevice', 'PCIEDevice'],
+ 'xio3130-express-upstream-port': ['br.dev', 'parent_obj.parent_obj',
+ 'br.dev.exp.aer_log',
+ 'parent_obj.parent_obj.exp.aer_log'],
+ }
+
+ if not name in changed_names:
+ return False
+
+ if s_field in changed_names[name] and d_field in changed_names[name]:
+ return True
+
+ return False
+
+def get_changed_sec_name(sec):
+ # Section names can change -- see commit 292b1634 for an example.
+ changes = {
+ "ICH9 LPC": "ICH9-LPC",
+ }
+
+ for item in changes:
+ if item == sec:
+ return changes[item]
+ if changes[item] == sec:
+ return item
+ return ""
+
+def exists_in_substruct(fields, item):
+ # Some QEMU versions moved a few fields inside a substruct. This
+ # kept the on-wire format the same. This function checks if
+ # something got shifted inside a substruct. For example, the
+ # change in commit 1f42d22233b4f3d1a2933ff30e8d6a6d9ee2d08f
+
+ if not "Description" in fields:
+ return False
+
+ if not "Fields" in fields["Description"]:
+ return False
+
+ substruct_fields = fields["Description"]["Fields"]
+
+ if substruct_fields == []:
+ return False
+
+ return check_fields_match(fields["Description"]["name"],
+ substruct_fields[0]["field"], item)
+
+
+def check_fields(src_fields, dest_fields, desc, sec):
+ # This function checks for all the fields in a section. If some
+ # fields got embedded into a substruct, this function will also
+ # attempt to check inside the substruct.
+
+ d_iter = iter(dest_fields)
+ s_iter = iter(src_fields)
+
+ # Using these lists as stacks to store previous value of s_iter
+ # and d_iter, so that when time comes to exit out of a substruct,
+ # we can go back one level up and continue from where we left off.
+
+ s_iter_list = []
+ d_iter_list = []
+
+ advance_src = True
+ advance_dest = True
+ unused_count = 0
+
+ while True:
+ if advance_src:
+ try:
+ s_item = s_iter.next()
+ except StopIteration:
+ if s_iter_list == []:
+ break
+
+ s_iter = s_iter_list.pop()
+ continue
+ else:
+ if unused_count == 0:
+ # We want to avoid advancing just once -- when entering a
+ # dest substruct, or when exiting one.
+ advance_src = True
+
+ if advance_dest:
+ try:
+ d_item = d_iter.next()
+ except StopIteration:
+ if d_iter_list == []:
+ # We were not in a substruct
+ print "Section \"" + sec + "\",",
+ print "Description " + "\"" + desc + "\":",
+ print "expected field \"" + s_item["field"] + "\",",
+ print "while dest has no further fields"
+ bump_taint()
+ break
+
+ d_iter = d_iter_list.pop()
+ advance_src = False
+ continue
+ else:
+ if unused_count == 0:
+ advance_dest = True
+
+ if unused_count > 0:
+ if advance_dest == False:
+ unused_count = unused_count - s_item["size"]
+ if unused_count == 0:
+ advance_dest = True
+ continue
+ if unused_count < 0:
+ print "Section \"" + sec + "\",",
+ print "Description \"" + desc + "\":",
+ print "unused size mismatch near \"",
+ print s_item["field"] + "\""
+ bump_taint()
+ break
+ continue
+
+ if advance_src == False:
+ unused_count = unused_count - d_item["size"]
+ if unused_count == 0:
+ advance_src = True
+ continue
+ if unused_count < 0:
+ print "Section \"" + sec + "\",",
+ print "Description \"" + desc + "\":",
+ print "unused size mismatch near \"",
+ print d_item["field"] + "\""
+ bump_taint()
+ break
+ continue
+
+ if not check_fields_match(desc, s_item["field"], d_item["field"]):
+ # Some fields were put in substructs, keeping the
+ # on-wire format the same, but breaking static tools
+ # like this one.
+
+ # First, check if dest has a new substruct.
+ if exists_in_substruct(d_item, s_item["field"]):
+ # listiterators don't have a prev() function, so we
+ # have to store our current location, descend into the
+ # substruct, and ensure we come out as if nothing
+ # happened when the substruct is over.
+ #
+ # Essentially we're opening the substructs that got
+ # added which didn't change the wire format.
+ d_iter_list.append(d_iter)
+ substruct_fields = d_item["Description"]["Fields"]
+ d_iter = iter(substruct_fields)
+ advance_src = False
+ continue
+
+ # Next, check if src has substruct that dest removed
+ # (can happen in backward migration: 2.0 -> 1.5)
+ if exists_in_substruct(s_item, d_item["field"]):
+ s_iter_list.append(s_iter)
+ substruct_fields = s_item["Description"]["Fields"]
+ s_iter = iter(substruct_fields)
+ advance_dest = False
+ continue
+
+ if s_item["field"] == "unused" or d_item["field"] == "unused":
+ if s_item["size"] == d_item["size"]:
+ continue
+
+ if d_item["field"] == "unused":
+ advance_dest = False
+ unused_count = d_item["size"] - s_item["size"]
+ continue
+
+ if s_item["field"] == "unused":
+ advance_src = False
+ unused_count = s_item["size"] - d_item["size"]
+ continue
+
+ print "Section \"" + sec + "\",",
+ print "Description \"" + desc + "\":",
+ print "expected field \"" + s_item["field"] + "\",",
+ print "got \"" + d_item["field"] + "\"; skipping rest"
+ bump_taint()
+ break
+
+ check_version(s_item, d_item, sec, desc)
+
+ if not "Description" in s_item:
+ # Check size of this field only if it's not a VMSTRUCT entry
+ check_size(s_item, d_item, sec, desc, s_item["field"])
+
+ check_description_in_list(s_item, d_item, sec, desc)
+
+
+def check_subsections(src_sub, dest_sub, desc, sec):
+ for s_item in src_sub:
+ found = False
+ for d_item in dest_sub:
+ if s_item["name"] != d_item["name"]:
+ continue
+
+ found = True
+ check_descriptions(s_item, d_item, sec)
+
+ if not found:
+ print "Section \"" + sec + "\", Description \"" + desc + "\":",
+ print "Subsection \"" + s_item["name"] + "\" not found"
+ bump_taint()
+
+
+def check_description_in_list(s_item, d_item, sec, desc):
+ if not "Description" in s_item:
+ return
+
+ if not "Description" in d_item:
+ print "Section \"" + sec + "\", Description \"" + desc + "\",",
+ print "Field \"" + s_item["field"] + "\": missing description"
+ bump_taint()
+ return
+
+ check_descriptions(s_item["Description"], d_item["Description"], sec)
+
+
+def check_descriptions(src_desc, dest_desc, sec):
+ check_version(src_desc, dest_desc, sec, src_desc["name"])
+
+ if not check_fields_match(sec, src_desc["name"], dest_desc["name"]):
+ print "Section \"" + sec + "\":",
+ print "Description \"" + src_desc["name"] + "\"",
+ print "missing, got \"" + dest_desc["name"] + "\" instead; skipping"
+ bump_taint()
+ return
+
+ for f in src_desc:
+ if not f in dest_desc:
+ print "Section \"" + sec + "\"",
+ print "Description \"" + src_desc["name"] + "\":",
+ print "Entry \"" + f + "\" missing"
+ bump_taint()
+ continue
+
+ if f == 'Fields':
+ check_fields(src_desc[f], dest_desc[f], src_desc["name"], sec)
+
+ if f == 'Subsections':
+ check_subsections(src_desc[f], dest_desc[f], src_desc["name"], sec)
+
+
+def check_version(s, d, sec, desc=None):
+ if s["version_id"] > d["version_id"]:
+ print "Section \"" + sec + "\"",
+ if desc:
+ print "Description \"" + desc + "\":",
+ print "version error:", s["version_id"], ">", d["version_id"]
+ bump_taint()
+
+ if not "minimum_version_id" in d:
+ return
+
+ if s["version_id"] < d["minimum_version_id"]:
+ print "Section \"" + sec + "\"",
+ if desc:
+ print "Description \"" + desc + "\":",
+ print "minimum version error:", s["version_id"], "<",
+ print d["minimum_version_id"]
+ bump_taint()
+
+
+def check_size(s, d, sec, desc=None, field=None):
+ if s["size"] != d["size"]:
+ print "Section \"" + sec + "\"",
+ if desc:
+ print "Description \"" + desc + "\"",
+ if field:
+ print "Field \"" + field + "\"",
+ print "size mismatch:", s["size"], ",", d["size"]
+ bump_taint()
+
+
+def check_machine_type(s, d):
+ if s["Name"] != d["Name"]:
+ print "Warning: checking incompatible machine types:",
+ print "\"" + s["Name"] + "\", \"" + d["Name"] + "\""
+ return
+
+
+def main():
+ help_text = "Parse JSON-formatted vmstate dumps from QEMU in files SRC and DEST. Checks whether migration from SRC to DEST QEMU versions would break based on the VMSTATE information contained within the JSON outputs. The JSON output is created from a QEMU invocation with the -dump-vmstate parameter and a filename argument to it. Other parameters to QEMU do not matter, except the -M (machine type) parameter."
+
+ parser = argparse.ArgumentParser(description=help_text)
+ parser.add_argument('-s', '--src', type=file, required=True,
+ help='json dump from src qemu')
+ parser.add_argument('-d', '--dest', type=file, required=True,
+ help='json dump from dest qemu')
+ parser.add_argument('--reverse', required=False, default=False,
+ action='store_true',
+ help='reverse the direction')
+ args = parser.parse_args()
+
+ src_data = json.load(args.src)
+ dest_data = json.load(args.dest)
+ args.src.close()
+ args.dest.close()
+
+ if args.reverse:
+ temp = src_data
+ src_data = dest_data
+ dest_data = temp
+
+ for sec in src_data:
+ dest_sec = sec
+ if not dest_sec in dest_data:
+ # Either the section name got changed, or the section
+ # doesn't exist in dest.
+ dest_sec = get_changed_sec_name(sec)
+ if not dest_sec in dest_data:
+ print "Section \"" + sec + "\" does not exist in dest"
+ bump_taint()
+ continue
+
+ s = src_data[sec]
+ d = dest_data[dest_sec]
+
+ if sec == "vmschkmachine":
+ check_machine_type(s, d)
+ continue
+
+ check_version(s, d, sec)
+
+ for entry in s:
+ if not entry in d:
+ print "Section \"" + sec + "\": Entry \"" + entry + "\"",
+ print "missing"
+ bump_taint()
+ continue
+
+ if entry == "Description":
+ check_descriptions(s[entry], d[entry], sec)
+
+ return taint
+
+
+if __name__ == '__main__':
+ sys.exit(main())