aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/public
diff options
context:
space:
mode:
authorDaniel De Graaf <dgdegra@tycho.nsa.gov>2013-04-11 12:20:25 -0400
committerIan Campbell <ian.campbell@citrix.com>2013-04-12 14:28:17 +0100
commitf494d9f3c37542435239236085be25c820912304 (patch)
tree571f23e359f4ba4c78c818ef288147dbd0f563ae /xen/include/public
parentc1f0b214536773630cd5f16bf3d275015373555b (diff)
downloadxen-f494d9f3c37542435239236085be25c820912304.tar.gz
xen-f494d9f3c37542435239236085be25c820912304.tar.bz2
xen-f494d9f3c37542435239236085be25c820912304.zip
mini-os/tpm{back, front}: Change shared page ABI
This changes the vTPM shared page ABI from a copy of the Xen network interface to a single-page interface that better reflects the expected behavior of a TPM: only a single request packet can be sent at any given time, and every packet sent generates a single response packet. This protocol change should also increase efficiency as it avoids mapping and unmapping grants when possible. The vtpm xenbus device now requires a feature-protocol-v2 node in xenstore to avoid conflicts with existing (xen-patched) kernels supporting the old interface. While the contents of the shared page have been defined to allow packets larger than a single page (actually 4088 bytes) by allowing the client to add extra grant references, the mapping of these extra references has not been implemented; a feature node in xenstore may be used in the future to indicate full support for the multi-page protocol. Most uses of the TPM should not require this feature. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Cc: Jan Beulich <JBeulich@suse.com>
Diffstat (limited to 'xen/include/public')
-rw-r--r--xen/include/public/io/tpmif.h66
1 files changed, 66 insertions, 0 deletions
diff --git a/xen/include/public/io/tpmif.h b/xen/include/public/io/tpmif.h
index fca2c4ea42..dcc5e571ac 100644
--- a/xen/include/public/io/tpmif.h
+++ b/xen/include/public/io/tpmif.h
@@ -64,6 +64,72 @@ struct tpmif_tx_interface {
};
typedef struct tpmif_tx_interface tpmif_tx_interface_t;
+/******************************************************************************
+ * TPM I/O interface for Xen guest OSes, v2
+ *
+ * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * This protocol emulates the request/response behavior of a TPM using a Xen
+ * shared memory interface. All interaction with the TPM is at the direction
+ * of the frontend, since a TPM (hardware or virtual) is a passive device -
+ * the backend only processes commands as requested by the frontend.
+ *
+ * The frontend sends a request to the TPM by populating the shared page with
+ * the request packet, changing the state to VTPM_STATE_SUBMIT, and sending
+ * and event channel notification. When the backend is finished, it will set
+ * the state to VTPM_STATE_FINISH and send an event channel notification.
+ *
+ * In order to allow long-running commands to be canceled, the frontend can
+ * at any time change the state to VTPM_STATE_CANCEL and send a notification.
+ * The TPM can either finish the command (changing state to VTPM_STATE_FINISH)
+ * or can cancel the command and change the state to VTPM_STATE_IDLE. The TPM
+ * can also change the state to VTPM_STATE_IDLE instead of VTPM_STATE_FINISH
+ * if another reason for cancellation is required - for example, a physical
+ * TPM may cancel a command if the interface is seized by another locality.
+ *
+ * The TPM command format is defined by the TCG, and is available at
+ * http://www.trustedcomputinggroup.org/resources/tpm_main_specification
+ */
+
+enum vtpm_state {
+ VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */
+ VTPM_STATE_SUBMIT, /* request ready / vTPM working */
+ VTPM_STATE_FINISH, /* response ready / vTPM idle */
+ VTPM_STATE_CANCEL, /* cancel requested / vTPM working */
+};
+/* Note: The backend should only change state to IDLE or FINISH, while the
+ * frontend should only change to SUBMIT or CANCEL. Status changes do not need
+ * to use atomic operations.
+ */
+
+
+/* The shared page for vTPM request/response packets looks like:
+ *
+ * Offset Contents
+ * =================================================
+ * 0 struct vtpm_shared_page
+ * 16 [optional] List of grant IDs
+ * 16+4*nr_extra_pages TPM packet data
+ *
+ * If the TPM packet data extends beyond the end of a single page, the grant IDs
+ * defined in extra_pages are used as if they were mapped immediately following
+ * the primary shared page. The grants are allocated by the frontend and mapped
+ * by the backend. Before sending a request spanning multiple pages, the
+ * frontend should verify that the TPM supports such large requests by querying
+ * the TPM_CAP_PROP_INPUT_BUFFER property from the TPM.
+ */
+struct vtpm_shared_page {
+ uint32_t length; /* request/response length in bytes */
+
+ uint8_t state; /* enum vtpm_state */
+ uint8_t locality; /* for the current request */
+ uint8_t pad; /* should be zero */
+
+ uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
+ uint32_t extra_pages[0]; /* grant IDs; length is actually nr_extra_pages */
+};
+typedef struct vtpm_shared_page vtpm_shared_page_t;
+
#endif
/*