Mailing List Archive

[PATCH vtpm v2 08/12] add tpmfront, tpm_tis, and tpmback drivers to mini-os
This patch adds 3 new drivers to mini-os.

tpmfront - paravirtualized tpm frontend driver
tpmback - paravirtualized tpm backend driver
tpm_tis - hardware tpm driver

Unfortunately these drivers were derived from GPL
licensed linux kernel drivers so they must carry
the GPL license. However, since mini-os now
supports conditional compilation, hopefully these
drivers can be included into the xen tree and
conditionally removed from non-gpl projects.
By default they are disabled in the makefile.

Signed-off-by: Matthew Fioravante <matthew.fioravante@jhuapl.edu>

diff --git a/extras/mini-os/Makefile b/extras/mini-os/Makefile
index 2422db3..2302a23 100644
--- a/extras/mini-os/Makefile
+++ b/extras/mini-os/Makefile
@@ -22,6 +22,9 @@ CONFIG_QEMU_XS_ARGS ?= n
CONFIG_TEST ?= n
CONFIG_PCIFRONT ?= n
CONFIG_BLKFRONT ?= y
+CONFIG_TPMFRONT ?= n
+CONFIG_TPM_TIS ?= n
+CONFIG_TPMBACK ?= n
CONFIG_NETFRONT ?= y
CONFIG_FBFRONT ?= y
CONFIG_KBDFRONT ?= y
@@ -36,6 +39,9 @@ flags-$(CONFIG_SPARSE_BSS) += -DCONFIG_SPARSE_BSS
flags-$(CONFIG_QEMU_XS_ARGS) += -DCONFIG_QEMU_XS_ARGS
flags-$(CONFIG_PCIFRONT) += -DCONFIG_PCIFRONT
flags-$(CONFIG_BLKFRONT) += -DCONFIG_BLKFRONT
+flags-$(CONFIG_TPMFRONT) += -DCONFIG_TPMFRONT
+flags-$(CONFIG_TPM_TIS) += -DCONFIG_TPM_TIS
+flags-$(CONFIG_TPMBACK) += -DCONFIG_TPMBACK
flags-$(CONFIG_NETFRONT) += -DCONFIG_NETFRONT
flags-$(CONFIG_KBDFRONT) += -DCONFIG_KBDFRONT
flags-$(CONFIG_FBFRONT) += -DCONFIG_FBFRONT
@@ -67,6 +73,9 @@ TARGET := mini-os
SUBDIRS := lib xenbus console

src-$(CONFIG_BLKFRONT) += blkfront.c
+src-$(CONFIG_TPMFRONT) += tpmfront.c
+src-$(CONFIG_TPM_TIS) += tpm_tis.c
+src-$(CONFIG_TPMBACK) += tpmback.c
src-y += daytime.c
src-y += events.c
src-$(CONFIG_FBFRONT) += fbfront.c
diff --git a/extras/mini-os/include/lib.h b/extras/mini-os/include/lib.h
index d4641b6..935bede 100644
--- a/extras/mini-os/include/lib.h
+++ b/extras/mini-os/include/lib.h
@@ -142,6 +142,8 @@ enum fd_type {
FTYPE_FB,
FTYPE_MEM,
FTYPE_SAVEFILE,
+ FTYPE_TPMFRONT,
+ FTYPE_TPM_TIS,
};

LIST_HEAD(evtchn_port_list, evtchn_port_info);
@@ -185,6 +187,20 @@ extern struct file {
struct {
struct consfront_dev *dev;
} cons;
+#ifdef CONFIG_TPMFRONT
+ struct {
+ struct tpmfront_dev *dev;
+ int respgot;
+ off_t offset;
+ } tpmfront;
+#endif
+#ifdef CONFIG_TPM_TIS
+ struct {
+ struct tpm_chip *dev;
+ int respgot;
+ off_t offset;
+ } tpm_tis;
+#endif
#ifdef CONFIG_XENBUS
struct {
/* To each xenbus FD is associated a queue of watch events for this
diff --git a/extras/mini-os/include/tpm_tis.h b/extras/mini-os/include/tpm_tis.h
new file mode 100644
index 0000000..1faca0d
--- /dev/null
+++ b/extras/mini-os/include/tpm_tis.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2010-2012 United States Government, as represented by
+ * the Secretary of Defense. All rights reserved.
+ *
+ * This code has been derived from drivers/char/tpm.c
+ * from the linux kernel
+ *
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * This code has also been derived from drivers/char/tpm/tpm_tis.c
+ * from the linux kernel
+ *
+ * Copyright (C) 2005, 2006 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License
+ */
+#ifndef TPM_TIS_H
+#define TPM_TIS_H
+
+#include <mini-os/types.h>
+#include <mini-os/byteorder.h>
+
+#define TPM_TIS_EN_LOCL0 1
+#define TPM_TIS_EN_LOCL1 (1 << 1)
+#define TPM_TIS_EN_LOCL2 (1 << 2)
+#define TPM_TIS_EN_LOCL3 (1 << 3)
+#define TPM_TIS_EN_LOCL4 (1 << 4)
+#define TPM_TIS_EN_LOCLALL (TPM_TIS_EN_LOCL0 | TPM_TIS_EN_LOCL1 | TPM_TIS_EN_LOCL2 | TPM_TIS_EN_LOCL3 | TPM_TIS_EN_LOCL4)
+#define TPM_TIS_LOCL_INT_TO_FLAG(x) (1 << x)
+#define TPM_BASEADDR 0xFED40000
+#define TPM_PROBE_IRQ 0xFFFF
+
+struct tpm_chip;
+
+struct tpm_chip* init_tpm_tis(unsigned long baseaddr, int localities, unsigned int irq);
+void shutdown_tpm_tis(struct tpm_chip* tpm);
+
+int tpm_tis_request_locality(struct tpm_chip* tpm, int locality);
+int tpm_tis_cmd(struct tpm_chip* tpm, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen);
+
+#ifdef HAVE_LIBC
+#include <sys/stat.h>
+#include <fcntl.h>
+/* POSIX IO functions:
+ * use tpm_tis_open() to get a file descriptor to the tpm device
+ * use write() on the fd to send a command to the backend. You must
+ * include the entire command in a single call to write().
+ * use read() on the fd to read the response. You can use
+ * fstat() to get the size of the response and lseek() to seek on it.
+ */
+int tpm_tis_open(struct tpm_chip* tpm);
+int tpm_tis_posix_read(int fd, uint8_t* buf, size_t count);
+int tpm_tis_posix_write(int fd, const uint8_t* buf, size_t count);
+int tpm_tis_posix_fstat(int fd, struct stat* buf);
+#endif
+
+#endif
diff --git a/extras/mini-os/include/tpmback.h b/extras/mini-os/include/tpmback.h
new file mode 100644
index 0000000..302f83b
--- /dev/null
+++ b/extras/mini-os/include/tpmback.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2010-2012 United States Government, as represented by
+ * the Secretary of Defense. All rights reserved.
+ *
+ * This code has been derived from drivers/xen/tpmback/tpmback.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * which was itself derived from drivers/xen/netback/netback.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This code has also been derived from drivers/xen/tpmback/xenbus.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (C) 2005 IBM Corporation
+ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
+ *
+ * This code has also been derived from drivers/xen/tpmback/interface.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * which was itself also derived from drvivers/xen/netback/interface.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2004, Keir Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License
+ */
+
+#include <xen/io/tpmif.h>
+#include <xen/io/xenbus.h>
+#include <mini-os/types.h>
+#include <xen/xen.h>
+#ifndef TPMBACK_H
+#define TPMBACK_H
+
+struct tpmcmd {
+ domid_t domid; /* Domid of the frontend */
+ unsigned int handle; /* Handle of the frontend */
+ char* uuid; /* uuid of the tpm interface - allocated internally, dont free it */
+
+ unsigned int req_len; /* Size of the command in buf - set by tpmback driver */
+ uint8_t* req; /* tpm command bits, allocated by driver, DON'T FREE IT */
+ unsigned int resp_len; /* Size of the outgoing command,
+ you set this before passing the cmd object to tpmback_resp */
+ uint8_t* resp; /* Buffer for response - YOU MUST ALLOCATE IT, YOU MUST ALSO FREE IT */
+};
+typedef struct tpmcmd tpmcmd_t;
+
+/* Initialize the tpm backend driver
+ * @exclusive_domname - This is NULL terminated list of vtpm uuid strings. If this list
+ * is non-empty, then only frontend domains with vtpm uuid's matching
+ * entries in this list will be allowed to connect.
+ * Other connections will be immediatly closed.
+ * Set this argument to NULL to allow any vtpm to connect.
+ */
+void init_tpmback(char** exclusive_uuids);
+/* Shutdown tpm backend driver */
+void shutdown_tpmback(void);
+
+/* Blocks until a tpm command is sent from any front end.
+ * Returns a pointer to the tpm command to handle.
+ * Do not try to free this pointer or the req buffer
+ * This function will return NULL if the tpm backend driver
+ * is shutdown or any other error occurs */
+tpmcmd_t* tpmback_req_any(void);
+
+/* Blocks until a tpm command from the frontend at domid/handle
+ * is sent.
+ * Returns NULL if domid/handle is not connected, tpmback is
+ * shutdown or shutting down, or if there is an error
+ */
+tpmcmd_t* tpmback_req(domid_t domid, unsigned int handle);
+
+/* Send the response to the tpm command back to the frontend
+ * This function will free the tpmcmd object, but you must free the resp
+ * buffer yourself */
+void tpmback_resp(tpmcmd_t* tpmcmd);
+
+/* Waits for the first frontend to connect and then sets domid and handle appropriately.
+ * If one or more frontends are already connected, this will set domid and handle to one
+ * of them arbitrarily. The main use for this function is to wait until a single
+ * frontend connection has occured.
+ * returns 0 on success, non-zero on failure */
+int tpmback_wait_for_frontend_connect(domid_t *domid, unsigned int *handle);
+
+/* returns the number of frontends connected */
+int tpmback_num_frontends(void);
+
+/* Returns the uuid of the specified frontend, NULL on error */
+char* tpmback_get_uuid(domid_t domid, unsigned int handle);
+
+/* Specify a function to call when a new tpm device connects */
+void tpmback_set_open_callback(void (*cb)(domid_t, unsigned int));
+
+/* Specify a function to call when a tpm device disconnects */
+void tpmback_set_close_callback(void (*cb)(domid_t, unsigned int));
+
+//Not Implemented
+void tpmback_set_suspend_callback(void (*cb)(domid_t, unsigned int));
+void tpmback_set_resume_callback(void (*cb)(domid_t, unsigned int));
+
+#endif
diff --git a/extras/mini-os/include/tpmfront.h b/extras/mini-os/include/tpmfront.h
new file mode 100644
index 0000000..fd2cb17
--- /dev/null
+++ b/extras/mini-os/include/tpmfront.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2010-2012 United States Government, as represented by
+ * the Secretary of Defense. All rights reserved.
+ *
+ * This code has been derived from drivers/char/tpm_vtpm.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (C) 2006 IBM Corporation
+ *
+ * This code has also been derived from drivers/char/tpm_xen.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * which was itself derived from drivers/xen/netfront/netfront.c
+ * from the linux kernel
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#ifndef TPMFRONT_H
+#define TPMFRONT_H
+
+#include <mini-os/types.h>
+#include <mini-os/os.h>
+#include <mini-os/events.h>
+#include <mini-os/wait.h>
+#include <xen/xen.h>
+#include <xen/io/xenbus.h>
+#include <xen/io/tpmif.h>
+
+struct tpmfront_dev {
+ grant_ref_t ring_ref;
+ evtchn_port_t evtchn;
+
+ tpmif_tx_interface_t* tx;
+
+ void** pages;
+
+ domid_t bedomid;
+ char* nodename;
+ char* bepath;
+
+ XenbusState state;
+
+ uint8_t waiting;
+ struct wait_queue_head waitq;
+
+ uint8_t* respbuf;
+ size_t resplen;
+
+#ifdef HAVE_LIBC
+ int fd;
+#endif
+
+};
+
+
+/*Initialize frontend */
+struct tpmfront_dev* init_tpmfront(const char* nodename);
+/*Shutdown frontend */
+void shutdown_tpmfront(struct tpmfront_dev* dev);
+
+/* Send a tpm command to the backend and wait for the response
+ *
+ * @dev - frontend device
+ * @req - request buffer
+ * @reqlen - length of request buffer
+ * @resp - *resp will be set to internal response buffer, don't free it! Value is undefined on error
+ * @resplen - *resplen will be set to the length of the response. Value is undefined on error
+ *
+ * returns 0 on success, non zero on failure.
+ * */
+int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen);
+
+#ifdef HAVE_LIBC
+#include <sys/stat.h>
+/* POSIX IO functions:
+ * use tpmfront_open() to get a file descriptor to the tpm device
+ * use write() on the fd to send a command to the backend. You must
+ * include the entire command in a single call to write().
+ * use read() on the fd to read the response. You can use
+ * fstat() to get the size of the response and lseek() to seek on it.
+ */
+int tpmfront_open(struct tpmfront_dev* dev);
+int tpmfront_posix_read(int fd, uint8_t* buf, size_t count);
+int tpmfront_posix_write(int fd, const uint8_t* buf, size_t count);
+int tpmfront_posix_fstat(int fd, struct stat* buf);
+#endif
+
+
+#endif
diff --git a/extras/mini-os/lib/sys.c b/extras/mini-os/lib/sys.c
index 6cb97b1..d212969 100644
--- a/extras/mini-os/lib/sys.c
+++ b/extras/mini-os/lib/sys.c
@@ -27,6 +27,8 @@
#include <netfront.h>
#include <blkfront.h>
#include <fbfront.h>
+#include <tpmfront.h>
+#include <tpm_tis.h>
#include <xenbus.h>
#include <xenstore.h>

@@ -294,6 +296,16 @@ int read(int fd, void *buf, size_t nbytes)
return blkfront_posix_read(fd, buf, nbytes);
}
#endif
+#ifdef CONFIG_TPMFRONT
+ case FTYPE_TPMFRONT: {
+ return tpmfront_posix_read(fd, buf, nbytes);
+ }
+#endif
+#ifdef CONFIG_TPM_TIS
+ case FTYPE_TPM_TIS: {
+ return tpm_tis_posix_read(fd, buf, nbytes);
+ }
+#endif
default:
break;
}
@@ -330,6 +342,14 @@ int write(int fd, const void *buf, size_t nbytes)
case FTYPE_BLK:
return blkfront_posix_write(fd, buf, nbytes);
#endif
+#ifdef CONFIG_TPMFRONT
+ case FTYPE_TPMFRONT:
+ return tpmfront_posix_write(fd, buf, nbytes);
+#endif
+#ifdef CONFIG_TPM_TIS
+ case FTYPE_TPM_TIS:
+ return tpm_tis_posix_write(fd, buf, nbytes);
+#endif
default:
break;
}
@@ -341,8 +361,16 @@ int write(int fd, const void *buf, size_t nbytes)
off_t lseek(int fd, off_t offset, int whence)
{
switch(files[fd].type) {
+#if defined(CONFIG_BLKFRONT) || defined(CONFIG_TPMFRONT) || defined(CONFIG_TPM_TIS)
#ifdef CONFIG_BLKFRONT
case FTYPE_BLK:
+#endif
+#ifdef CONFIG_TPMFRNT
+ case FTYPE_TPMFRONT:
+#endif
+#ifdef CONFIG_TPM_TIS
+ case FTYPE_TPM_TIS:
+#endif
switch (whence) {
case SEEK_SET:
files[fd].file.offset = offset;
@@ -420,6 +448,18 @@ int close(int fd)
files[fd].type = FTYPE_NONE;
return 0;
#endif
+#ifdef CONFIG_TPMFRONT
+ case FTYPE_TPMFRONT:
+ shutdown_tpmfront(files[fd].tpmfront.dev);
+ files[fd].type = FTYPE_NONE;
+ return 0;
+#endif
+#ifdef CONFIG_TPM_TIS
+ case FTYPE_TPM_TIS:
+ shutdown_tpm_tis(files[fd].tpm_tis.dev);
+ files[fd].type = FTYPE_NONE;
+ return 0;
+#endif
#ifdef CONFIG_KBDFRONT
case FTYPE_KBD:
shutdown_kbdfront(files[fd].kbd.dev);
@@ -489,6 +529,14 @@ int fstat(int fd, struct stat *buf)
case FTYPE_BLK:
return blkfront_posix_fstat(fd, buf);
#endif
+#ifdef CONFIG_TPMFRONT
+ case FTYPE_TPMFRONT:
+ return tpmfront_posix_fstat(fd, buf);
+#endif
+#ifdef CONFIG_TPM_TIS
+ case FTYPE_TPM_TIS:
+ return tpm_tis_posix_fstat(fd, buf);
+#endif
default:
break;
}
diff --git a/extras/mini-os/tpm_tis.c b/extras/mini-os/tpm_tis.c
new file mode 100644
index 0000000..b0e27b2
--- /dev/null
+++ b/extras/mini-os/tpm_tis.c
@@ -0,0 +1,1341 @@
+/*
+ * Copyright (c) 2010-2012 United States Government, as represented by
+ * the Secretary of Defense. All rights reserved.
+ *
+ * This code has been derived from drivers/char/tpm.c
+ * from the linux kernel
+ *
+ * Copyright (C) 2004 IBM Corporation
+ *
+ * This code has also been derived from drivers/char/tpm/tpm_tis.c
+ * from the linux kernel
+ *
+ * Copyright (C) 2005, 2006 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License
+ */
+#include <mini-os/ioremap.h>
+#include <mini-os/iorw.h>
+#include <mini-os/tpm_tis.h>
+#include <mini-os/os.h>
+#include <mini-os/sched.h>
+#include <mini-os/byteorder.h>
+#include <mini-os/events.h>
+#include <mini-os/wait.h>
+#include <mini-os/xmalloc.h>
+#include <errno.h>
+#include <stdbool.h>
+
+#ifndef min
+ #define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
+#endif
+
+#define TPM_HEADER_SIZE 10
+
+#define TPM_BUFSIZE 2048
+
+struct tpm_input_header {
+ uint16_t tag;
+ uint32_t length;
+ uint32_t ordinal;
+}__attribute__((packed));
+
+struct tpm_output_header {
+ uint16_t tag;
+ uint32_t length;
+ uint32_t return_code;
+}__attribute__((packed));
+
+struct stclear_flags_t {
+ uint16_t tag;
+ uint8_t deactivated;
+ uint8_t disableForceClear;
+ uint8_t physicalPresence;
+ uint8_t physicalPresenceLock;
+ uint8_t bGlobalLock;
+}__attribute__((packed));
+
+struct tpm_version_t {
+ uint8_t Major;
+ uint8_t Minor;
+ uint8_t revMajor;
+ uint8_t revMinor;
+}__attribute__((packed));
+
+struct tpm_version_1_2_t {
+ uint16_t tag;
+ uint8_t Major;
+ uint8_t Minor;
+ uint8_t revMajor;
+ uint8_t revMinor;
+}__attribute__((packed));
+
+struct timeout_t {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+}__attribute__((packed));
+
+struct duration_t {
+ uint32_t tpm_short;
+ uint32_t tpm_medium;
+ uint32_t tpm_long;
+}__attribute__((packed));
+
+struct permanent_flags_t {
+ uint16_t tag;
+ uint8_t disable;
+ uint8_t ownership;
+ uint8_t deactivated;
+ uint8_t readPubek;
+ uint8_t disableOwnerClear;
+ uint8_t allowMaintenance;
+ uint8_t physicalPresenceLifetimeLock;
+ uint8_t physicalPresenceHWEnable;
+ uint8_t physicalPresenceCMDEnable;
+ uint8_t CEKPUsed;
+ uint8_t TPMpost;
+ uint8_t TPMpostLock;
+ uint8_t FIPS;
+ uint8_t operator;
+ uint8_t enableRevokeEK;
+ uint8_t nvLocked;
+ uint8_t readSRKPub;
+ uint8_t tpmEstablished;
+ uint8_t maintenanceDone;
+ uint8_t disableFullDALogicInfo;
+}__attribute__((packed));
+
+typedef union {
+ struct permanent_flags_t perm_flags;
+ struct stclear_flags_t stclear_flags;
+ bool owned;
+ uint32_t num_pcrs;
+ struct tpm_version_t tpm_version;
+ struct tpm_version_1_2_t tpm_version_1_2;
+ uint32_t manufacturer_id;
+ struct timeout_t timeout;
+ struct duration_t duration;
+} cap_t;
+
+struct tpm_getcap_params_in {
+ uint32_t cap;
+ uint32_t subcap_size;
+ uint32_t subcap;
+}__attribute__((packed));
+
+struct tpm_getcap_params_out {
+ uint32_t cap_size;
+ cap_t cap;
+}__attribute__((packed));
+
+struct tpm_readpubek_params_out {
+ uint8_t algorithm[4];
+ uint8_t encscheme[2];
+ uint8_t sigscheme[2];
+ uint32_t paramsize;
+ uint8_t parameters[12]; /*assuming RSA*/
+ uint32_t keysize;
+ uint8_t modulus[256];
+ uint8_t checksum[20];
+}__attribute__((packed));
+
+typedef union {
+ struct tpm_input_header in;
+ struct tpm_output_header out;
+} tpm_cmd_header;
+
+#define TPM_DIGEST_SIZE 20
+struct tpm_pcrread_out {
+ uint8_t pcr_result[TPM_DIGEST_SIZE];
+}__attribute__((packed));
+
+struct tpm_pcrread_in {
+ uint32_t pcr_idx;
+}__attribute__((packed));
+
+struct tpm_pcrextend_in {
+ uint32_t pcr_idx;
+ uint8_t hash[TPM_DIGEST_SIZE];
+}__attribute__((packed));
+
+typedef union {
+ struct tpm_getcap_params_out getcap_out;
+ struct tpm_readpubek_params_out readpubek_out;
+ uint8_t readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
+ struct tpm_getcap_params_in getcap_in;
+ struct tpm_pcrread_in pcrread_in;
+ struct tpm_pcrread_out pcrread_out;
+ struct tpm_pcrextend_in pcrextend_in;
+} tpm_cmd_params;
+
+struct tpm_cmd_t {
+ tpm_cmd_header header;
+ tpm_cmd_params params;
+}__attribute__((packed));
+
+
+enum tpm_duration {
+ TPM_SHORT = 0,
+ TPM_MEDIUM = 1,
+ TPM_LONG = 2,
+ TPM_UNDEFINED,
+};
+
+#define TPM_MAX_ORDINAL 243
+#define TPM_MAX_PROTECTED_ORDINAL 12
+#define TPM_PROTECTED_ORDINAL_MASK 0xFF
+
+extern const uint8_t tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL];
+extern const uint8_t tpm_ordinal_duration[TPM_MAX_ORDINAL];
+
+#define TPM_DIGEST_SIZE 20
+#define TPM_ERROR_SIZE 10
+#define TPM_RET_CODE_IDX 6
+
+/* tpm_capabilities */
+#define TPM_CAP_FLAG cpu_to_be32(4)
+#define TPM_CAP_PROP cpu_to_be32(5)
+#define CAP_VERSION_1_1 cpu_to_be32(0x06)
+#define CAP_VERSION_1_2 cpu_to_be32(0x1A)
+
+/* tpm_sub_capabilities */
+#define TPM_CAP_PROP_PCR cpu_to_be32(0x101)
+#define TPM_CAP_PROP_MANUFACTURER cpu_to_be32(0x103)
+#define TPM_CAP_FLAG_PERM cpu_to_be32(0x108)
+#define TPM_CAP_FLAG_VOL cpu_to_be32(0x109)
+#define TPM_CAP_PROP_OWNER cpu_to_be32(0x111)
+#define TPM_CAP_PROP_TIS_TIMEOUT cpu_to_be32(0x115)
+#define TPM_CAP_PROP_TIS_DURATION cpu_to_be32(0x120)
+
+
+#define TPM_INTERNAL_RESULT_SIZE 200
+#define TPM_TAG_RQU_COMMAND cpu_to_be16(193)
+#define TPM_ORD_GET_CAP cpu_to_be32(101)
+
+extern const struct tpm_input_header tpm_getcap_header;
+
+
+
+const uint8_t tpm_protected_ordinal_duration[TPM_MAX_PROTECTED_ORDINAL] = {
+ TPM_UNDEFINED, /* 0 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 5 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 10 */
+ TPM_SHORT,
+};
+
+const uint8_t tpm_ordinal_duration[TPM_MAX_ORDINAL] = {
+ TPM_UNDEFINED, /* 0 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 5 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 10 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_LONG,
+ TPM_MEDIUM, /* 15 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT, /* 20 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT, /* 25 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 30 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 35 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 40 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 45 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_MEDIUM, /* 50 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 55 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 60 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 65 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 70 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 75 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 80 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 85 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 90 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 95 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 100 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 105 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 110 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 115 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 120 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 125 */
+ TPM_SHORT,
+ TPM_LONG,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT, /* 130 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 135 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 140 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 145 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 150 */
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 155 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 160 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 165 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_LONG, /* 170 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 175 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_MEDIUM, /* 180 */
+ TPM_SHORT,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM, /* 185 */
+ TPM_SHORT,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 190 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 195 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 200 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 205 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_MEDIUM, /* 210 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_MEDIUM,
+ TPM_UNDEFINED, /* 215 */
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT,
+ TPM_SHORT, /* 220 */
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_SHORT,
+ TPM_UNDEFINED, /* 225 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 230 */
+ TPM_LONG,
+ TPM_MEDIUM,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED, /* 235 */
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_UNDEFINED,
+ TPM_SHORT, /* 240 */
+ TPM_UNDEFINED,
+ TPM_MEDIUM,
+};
+
+const struct tpm_input_header tpm_getcap_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(22),
+ .ordinal = TPM_ORD_GET_CAP
+};
+
+
+enum tis_access {
+ TPM_ACCESS_VALID = 0x80,
+ TPM_ACCESS_ACTIVE_LOCALITY = 0x20, /* (R) */
+ TPM_ACCESS_RELINQUISH_LOCALITY = 0x20,/* (W) */
+ TPM_ACCESS_REQUEST_PENDING = 0x04, /* (W) */
+ TPM_ACCESS_REQUEST_USE = 0x02, /* (W) */
+};
+
+enum tis_status {
+ TPM_STS_VALID = 0x80, /* (R) */
+ TPM_STS_COMMAND_READY = 0x40, /* (R) */
+ TPM_STS_DATA_AVAIL = 0x10, /* (R) */
+ TPM_STS_DATA_EXPECT = 0x08, /* (R) */
+ TPM_STS_GO = 0x20, /* (W) */
+};
+
+enum tis_int_flags {
+ TPM_GLOBAL_INT_ENABLE = 0x80000000,
+ TPM_INTF_BURST_COUNT_STATIC = 0x100,
+ TPM_INTF_CMD_READY_INT = 0x080,
+ TPM_INTF_INT_EDGE_FALLING = 0x040,
+ TPM_INTF_INT_EDGE_RISING = 0x020,
+ TPM_INTF_INT_LEVEL_LOW = 0x010,
+ TPM_INTF_INT_LEVEL_HIGH = 0x008,
+ TPM_INTF_LOCALITY_CHANGE_INT = 0x004,
+ TPM_INTF_STS_VALID_INT = 0x002,
+ TPM_INTF_DATA_AVAIL_INT = 0x001,
+};
+
+enum tis_defaults {
+ TIS_MEM_BASE = 0xFED40000,
+ TIS_MEM_LEN = 0x5000,
+ TIS_SHORT_TIMEOUT = 750, /*ms*/
+ TIS_LONG_TIMEOUT = 2000, /*2 sec */
+};
+
+#define TPM_TIMEOUT 5
+
+#define TPM_ACCESS(t, l) (((uint8_t*)t->pages[l]) + 0x0000)
+#define TPM_INT_ENABLE(t, l) ((uint32_t*)(((uint8_t*)t->pages[l]) + 0x0008))
+#define TPM_INT_VECTOR(t, l) (((uint8_t*)t->pages[l]) + 0x000C)
+#define TPM_INT_STATUS(t, l) (((uint8_t*)t->pages[l]) + 0x0010)
+#define TPM_INTF_CAPS(t, l) ((uint32_t*)(((uint8_t*)t->pages[l]) + 0x0014))
+#define TPM_STS(t, l) ((uint8_t*)(((uint8_t*)t->pages[l]) + 0x0018))
+#define TPM_DATA_FIFO(t, l) (((uint8_t*)t->pages[l]) + 0x0024)
+
+#define TPM_DID_VID(t, l) ((uint32_t*)(((uint8_t*)t->pages[l]) + 0x0F00))
+#define TPM_RID(t, l) (((uint8_t*)t->pages[l]) + 0x0F04)
+
+struct tpm_chip {
+ int enabled_localities;
+ int locality;
+ unsigned long baseaddr;
+ uint8_t* pages[5];
+ int did, vid, rid;
+
+ uint8_t data_buffer[TPM_BUFSIZE];
+ int data_len;
+
+ s_time_t timeout_a, timeout_b, timeout_c, timeout_d;
+ s_time_t duration[3];
+
+#ifdef HAVE_LIBC
+ int fd;
+#endif
+
+ unsigned int irq;
+ struct wait_queue_head read_queue;
+ struct wait_queue_head int_queue;
+};
+
+
+static void __init_tpm_chip(struct tpm_chip* tpm) {
+ tpm->enabled_localities = TPM_TIS_EN_LOCLALL;
+ tpm->locality = -1;
+ tpm->baseaddr = 0;
+ tpm->pages[0] = tpm->pages[1] = tpm->pages[2] = tpm->pages[3] = tpm->pages[4] = NULL;
+ tpm->vid = 0;
+ tpm->did = 0;
+ tpm->irq = 0;
+ init_waitqueue_head(&tpm->read_queue);
+ init_waitqueue_head(&tpm->int_queue);
+
+ tpm->data_len = -1;
+
+#ifdef HAVE_LIBC
+ tpm->fd = -1;
+#endif
+}
+
+/*
+ * Returns max number of nsecs to wait
+ */
+s_time_t tpm_calc_ordinal_duration(struct tpm_chip *chip,
+ uint32_t ordinal)
+{
+ int duration_idx = TPM_UNDEFINED;
+ s_time_t duration = 0;
+
+ if (ordinal < TPM_MAX_ORDINAL)
+ duration_idx = tpm_ordinal_duration[ordinal];
+ else if ((ordinal & TPM_PROTECTED_ORDINAL_MASK) <
+ TPM_MAX_PROTECTED_ORDINAL)
+ duration_idx =
+ tpm_protected_ordinal_duration[ordinal &
+ TPM_PROTECTED_ORDINAL_MASK];
+
+ if (duration_idx != TPM_UNDEFINED) {
+ duration = chip->duration[duration_idx];
+ }
+
+ if (duration <= 0) {
+ return SECONDS(120);
+ }
+ else
+ {
+ return duration;
+ }
+}
+
+
+static int locality_enabled(struct tpm_chip* tpm, int l) {
+ return tpm->enabled_localities & (1 << l);
+}
+
+static int check_locality(struct tpm_chip* tpm, int l) {
+ if(locality_enabled(tpm, l) && (ioread8(TPM_ACCESS(tpm, l)) &
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ return l;
+ }
+ return -1;
+}
+
+void release_locality(struct tpm_chip* tpm, int l, int force)
+{
+ if (locality_enabled(tpm, l) && (force || (ioread8(TPM_ACCESS(tpm, l)) &
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID)) ==
+ (TPM_ACCESS_REQUEST_PENDING | TPM_ACCESS_VALID))) {
+ iowrite8(TPM_ACCESS(tpm, l), TPM_ACCESS_RELINQUISH_LOCALITY);
+ }
+}
+
+int tpm_tis_request_locality(struct tpm_chip* tpm, int l) {
+
+ s_time_t stop;
+ /*Make sure locality is valid */
+ if(!locality_enabled(tpm, l)) {
+ printk("tpm_tis_change_locality() Tried to change to locality %d, but it is disabled or invalid!\n", l);
+ return -1;
+ }
+ /* Check if we already have the current locality */
+ if(check_locality(tpm, l) >= 0) {
+ return tpm->locality = l;
+ }
+ /* Set the new locality*/
+ iowrite8(TPM_ACCESS(tpm, l), TPM_ACCESS_REQUEST_USE);
+
+ if(tpm->irq) {
+ /* Wait for interrupt */
+ wait_event_deadline(tpm->int_queue, (check_locality(tpm, l) >= 0), NOW() + tpm->timeout_a);
+
+ /* FIXME: Handle timeout event, should return error in that case */
+ return l;
+ } else {
+ /* Wait for burstcount */
+ stop = NOW() + tpm->timeout_a;
+ do {
+ if(check_locality(tpm, l) >= 0) {
+ return tpm->locality = l;
+ }
+ msleep(TPM_TIMEOUT);
+ } while(NOW() < stop);
+ }
+
+ printk("REQ LOCALITY FAILURE\n");
+ return -1;
+}
+
+static uint8_t tpm_tis_status(struct tpm_chip* tpm) {
+ return ioread8(TPM_STS(tpm, tpm->locality));
+}
+
+/* This causes the current command to be aborted */
+static void tpm_tis_ready(struct tpm_chip* tpm) {
+ iowrite8(TPM_STS(tpm, tpm->locality), TPM_STS_COMMAND_READY);
+}
+#define tpm_tis_cancel_cmd(v) tpm_tis_ready(v)
+
+static int get_burstcount(struct tpm_chip* tpm) {
+ s_time_t stop;
+ int burstcnt;
+
+ stop = NOW() + tpm->timeout_d;
+ do {
+ burstcnt = ioread8((TPM_STS(tpm, tpm->locality) + 1));
+ burstcnt += ioread8(TPM_STS(tpm, tpm->locality) + 2) << 8;
+
+ if (burstcnt) {
+ return burstcnt;
+ }
+ msleep(TPM_TIMEOUT);
+ } while(NOW() < stop);
+ return -EBUSY;
+}
+
+static int wait_for_stat(struct tpm_chip* tpm, uint8_t mask,
+ unsigned long timeout, struct wait_queue_head* queue) {
+ s_time_t stop;
+ uint8_t status;
+
+ status = tpm_tis_status(tpm);
+ if((status & mask) == mask) {
+ return 0;
+ }
+
+ if(tpm->irq) {
+ wait_event_deadline(*queue, ((tpm_tis_status(tpm) & mask) == mask), timeout);
+ /* FIXME: Check for timeout and return -ETIME */
+ return 0;
+ } else {
+ stop = NOW() + timeout;
+ do {
+ msleep(TPM_TIMEOUT);
+ status = tpm_tis_status(tpm);
+ if((status & mask) == mask)
+ return 0;
+ } while( NOW() < stop);
+ }
+ return -ETIME;
+}
+
+static int recv_data(struct tpm_chip* tpm, uint8_t* buf, size_t count) {
+ int size = 0;
+ int burstcnt;
+ while( size < count &&
+ wait_for_stat(tpm,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ tpm->timeout_c,
+ &tpm->read_queue)
+ == 0) {
+ burstcnt = get_burstcount(tpm);
+ for(; burstcnt > 0 && size < count; --burstcnt)
+ {
+ buf[size++] = ioread8(TPM_DATA_FIFO(tpm, tpm->locality));
+ }
+ }
+ return size;
+}
+
+int tpm_tis_recv(struct tpm_chip* tpm, uint8_t* buf, size_t count) {
+ int size = 0;
+ int expected, status;
+
+ if (count < TPM_HEADER_SIZE) {
+ size = -EIO;
+ goto out;
+ }
+
+ /* read first 10 bytes, including tag, paramsize, and result */
+ if((size =
+ recv_data(tpm, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
+ printk("Error reading tpm cmd header\n");
+ goto out;
+ }
+
+ expected = be32_to_cpu(*((uint32_t*)(buf + 2)));
+ if(expected > count) {
+ size = -EIO;
+ goto out;
+ }
+
+ if((size += recv_data(tpm, & buf[TPM_HEADER_SIZE],
+ expected - TPM_HEADER_SIZE)) < expected) {
+ printk("Unable to read rest of tpm command size=%d expected=%d\n", size, expected);
+ size = -ETIME;
+ goto out;
+ }
+
+ wait_for_stat(tpm, TPM_STS_VALID, tpm->timeout_c, &tpm->int_queue);
+ status = tpm_tis_status(tpm);
+ if(status & TPM_STS_DATA_AVAIL) {
+ printk("Error: left over data\n");
+ size = -EIO;
+ goto out;
+ }
+
+out:
+ tpm_tis_ready(tpm);
+ release_locality(tpm, tpm->locality, 0);
+ return size;
+}
+int tpm_tis_send(struct tpm_chip* tpm, uint8_t* buf, size_t len) {
+ int rc;
+ int status, burstcnt = 0;
+ int count = 0;
+ uint32_t ordinal;
+
+ if(tpm_tis_request_locality(tpm, tpm->locality) < 0) {
+ return -EBUSY;
+ }
+
+ status = tpm_tis_status(tpm);
+ if((status & TPM_STS_COMMAND_READY) == 0) {
+ tpm_tis_ready(tpm);
+ if(wait_for_stat(tpm, TPM_STS_COMMAND_READY, tpm->timeout_b, &tpm->int_queue) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+
+ while(count < len - 1) {
+ burstcnt = get_burstcount(tpm);
+ for(;burstcnt > 0 && count < len -1; --burstcnt) {
+ iowrite8(TPM_DATA_FIFO(tpm, tpm->locality), buf[count++]);
+ }
+
+ wait_for_stat(tpm, TPM_STS_VALID, tpm->timeout_c, &tpm->int_queue);
+ status = tpm_tis_status(tpm);
+ if((status & TPM_STS_DATA_EXPECT) == 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+ }
+
+ /*Write last byte*/
+ iowrite8(TPM_DATA_FIFO(tpm, tpm->locality), buf[count]);
+ wait_for_stat(tpm, TPM_STS_VALID, tpm->timeout_c, &tpm->read_queue);
+ status = tpm_tis_status(tpm);
+ if((status & TPM_STS_DATA_EXPECT) != 0) {
+ rc = -EIO;
+ goto out_err;
+ }
+
+ /*go and do it*/
+ iowrite8(TPM_STS(tpm, tpm->locality), TPM_STS_GO);
+
+ if(tpm->irq) {
+ /*Wait for interrupt */
+ ordinal = be32_to_cpu(*(buf + 6));
+ if(wait_for_stat(tpm,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ tpm_calc_ordinal_duration(tpm, ordinal),
+ &tpm->read_queue) < 0) {
+ rc = -ETIME;
+ goto out_err;
+ }
+ }
+#ifdef HAVE_LIBC
+ if(tpm->fd >= 0) {
+ files[tpm->fd].read = 0;
+ files[tpm->fd].tpm_tis.respgot = 0;
+ files[tpm->fd].tpm_tis.offset = 0;
+ }
+#endif
+ return len;
+
+out_err:
+ tpm_tis_ready(tpm);
+ release_locality(tpm, tpm->locality, 0);
+ return rc;
+}
+
+static void tpm_tis_irq_handler(evtchn_port_t port, struct pt_regs *regs, void* data)
+{
+ struct tpm_chip* tpm = data;
+ uint32_t interrupt;
+ int i;
+
+ interrupt = ioread32(TPM_INT_STATUS(tpm, tpm->locality));
+ if(interrupt == 0) {
+ return;
+ }
+
+ if(interrupt & TPM_INTF_DATA_AVAIL_INT) {
+ wake_up(&tpm->read_queue);
+ }
+ if(interrupt & TPM_INTF_LOCALITY_CHANGE_INT) {
+ for(i = 0; i < 5; ++i) {
+ if(check_locality(tpm, i) >= 0) {
+ break;
+ }
+ }
+ }
+ if(interrupt & (TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_STS_VALID_INT |
+ TPM_INTF_CMD_READY_INT)) {
+ wake_up(&tpm->int_queue);
+ }
+
+ /* Clear interrupts handled with TPM_EOI */
+ iowrite32(TPM_INT_STATUS(tpm, tpm->locality), interrupt);
+ ioread32(TPM_INT_STATUS(tpm, tpm->locality));
+ return;
+}
+
+/*
+ * Internal kernel interface to transmit TPM commands
+ */
+static ssize_t tpm_transmit(struct tpm_chip *chip, const uint8_t *buf,
+ size_t bufsiz)
+{
+ ssize_t rc;
+ uint32_t count, ordinal;
+ s_time_t stop;
+
+ count = be32_to_cpu(*((uint32_t *) (buf + 2)));
+ ordinal = be32_to_cpu(*((uint32_t *) (buf + 6)));
+ if (count == 0)
+ return -ENODATA;
+ if (count > bufsiz) {
+ printk("Error: invalid count value %x %zx \n", count, bufsiz);
+ return -E2BIG;
+ }
+
+ //down(&chip->tpm_mutex);
+
+ if ((rc = tpm_tis_send(chip, (uint8_t *) buf, count)) < 0) {
+ printk("tpm_transmit: tpm_send: error %zd\n", rc);
+ goto out;
+ }
+
+ if (chip->irq)
+ goto out_recv;
+
+ stop = NOW() + tpm_calc_ordinal_duration(chip, ordinal);
+ do {
+ uint8_t status = tpm_tis_status(chip);
+ if ((status & (TPM_STS_DATA_AVAIL | TPM_STS_VALID)) ==
+ (TPM_STS_DATA_AVAIL | TPM_STS_VALID))
+ goto out_recv;
+
+ if ((status == TPM_STS_COMMAND_READY)) {
+ printk("TPM Error: Operation Canceled\n");
+ rc = -ECANCELED;
+ goto out;
+ }
+
+ msleep(TPM_TIMEOUT); /* CHECK */
+ rmb();
+ } while (NOW() < stop);
+
+ /* Cancel the command */
+ tpm_tis_cancel_cmd(chip);
+ printk("TPM Operation Timed out\n");
+ rc = -ETIME;
+ goto out;
+
+out_recv:
+ if((rc = tpm_tis_recv(chip, (uint8_t *) buf, bufsiz)) < 0) {
+ printk("tpm_transmit: tpm_recv: error %d\n", rc);
+ }
+out:
+ //up(&chip->tpm_mutex);
+ return rc;
+}
+
+static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
+ int len, const char *desc)
+{
+ int err;
+
+ len = tpm_transmit(chip,(uint8_t *) cmd, len);
+ if (len < 0)
+ return len;
+ if (len == TPM_ERROR_SIZE) {
+ err = be32_to_cpu(cmd->header.out.return_code);
+ printk("A TPM error (%d) occurred %s\n", err, desc);
+ return err;
+ }
+ return 0;
+}
+
+void tpm_get_timeouts(struct tpm_chip *chip)
+{
+ struct tpm_cmd_t tpm_cmd;
+ struct timeout_t *timeout_cap;
+ struct duration_t *duration_cap;
+ ssize_t rc;
+ uint32_t timeout;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
+
+ if((rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the timeouts")) != 0) {
+ printk("transmit failed %d\n", rc);
+ goto duration;
+ }
+
+ if (be32_to_cpu(tpm_cmd.params.getcap_out.cap_size)
+ != 4 * sizeof(uint32_t)) {
+ printk("Out len check failure %lu \n", be32_to_cpu(tpm_cmd.header.out.length));
+ goto duration;
+ }
+
+ timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
+ /* Don't overwrite default if value is 0 */
+ timeout = be32_to_cpu(timeout_cap->a);
+ if (timeout)
+ chip->timeout_a = MICROSECS(timeout); /*Convert to msec */
+ timeout = be32_to_cpu(timeout_cap->b);
+ if (timeout)
+ chip->timeout_b = MICROSECS(timeout); /*Convert to msec */
+ timeout = be32_to_cpu(timeout_cap->c);
+ if (timeout)
+ chip->timeout_c = MICROSECS(timeout); /*Convert to msec */
+ timeout = be32_to_cpu(timeout_cap->d);
+ if (timeout)
+ chip->timeout_d = MICROSECS(timeout); /*Convert to msec */
+
+duration:
+ tpm_cmd.header.in = tpm_getcap_header;
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
+
+ if((rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
+ "attempting to determine the durations")) < 0) {
+ return;
+ }
+
+ if (be32_to_cpu(tpm_cmd.params.getcap_out.cap_size)
+ != 3 * sizeof(uint32_t)) {
+ return;
+ }
+ duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
+ chip->duration[TPM_SHORT] = be32_to_cpu(duration_cap->tpm_short);
+ /* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
+ * value wrong and apparently reports msecs rather than usecs. So we
+ * fix up the resulting too-small TPM_SHORT value to make things work.
+ */
+ if (chip->duration[TPM_SHORT] < 10) {
+ chip->duration[TPM_SHORT] = MILLISECS(chip->duration[TPM_SHORT]);
+ } else {
+ chip->duration[TPM_SHORT] = MICROSECS(chip->duration[TPM_SHORT]);
+ }
+
+ chip->duration[TPM_MEDIUM] = MICROSECS(be32_to_cpu(duration_cap->tpm_medium));
+ chip->duration[TPM_LONG] = MICROSECS(be32_to_cpu(duration_cap->tpm_long));
+}
+
+
+
+void tpm_continue_selftest(struct tpm_chip* chip) {
+ uint8_t data[] = {
+ 0, 193, /* TPM_TAG_RQU_COMMAND */
+ 0, 0, 0, 10, /* length */
+ 0, 0, 0, 83, /* TPM_ORD_GetCapability */
+ };
+
+ tpm_transmit(chip, data, sizeof(data));
+}
+
+ssize_t tpm_getcap(struct tpm_chip *chip, uint32_t subcap_id, cap_t *cap,
+ const char *desc)
+{
+ struct tpm_cmd_t tpm_cmd;
+ int rc;
+
+ tpm_cmd.header.in = tpm_getcap_header;
+ if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
+ tpm_cmd.params.getcap_in.cap = subcap_id;
+ /*subcap field not necessary */
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
+ tpm_cmd.header.in.length -= cpu_to_be32(sizeof(uint32_t));
+ } else {
+ if (subcap_id == TPM_CAP_FLAG_PERM ||
+ subcap_id == TPM_CAP_FLAG_VOL)
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+ else
+ tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+ tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
+ tpm_cmd.params.getcap_in.subcap = subcap_id;
+ }
+ rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, desc);
+ if (!rc)
+ *cap = tpm_cmd.params.getcap_out.cap;
+ return rc;
+}
+
+
+struct tpm_chip* init_tpm_tis(unsigned long baseaddr, int localities, unsigned int irq)
+{
+ int i;
+ unsigned long addr;
+ struct tpm_chip* tpm = NULL;
+ uint32_t didvid;
+ uint32_t intfcaps;
+ uint32_t intmask;
+
+ printk("============= Init TPM TIS Driver ==============\n");
+
+ /*Sanity check the localities input */
+ if(localities & ~TPM_TIS_EN_LOCLALL) {
+ printk("init_tpm_tis() Invalid locality specification! %X\n", localities);
+ goto abort_egress;
+ }
+
+ printk("IOMEM Machine Base Address: %lX\n", baseaddr);
+
+ /* Create the tpm data structure */
+ tpm = malloc(sizeof(struct tpm_chip));
+ __init_tpm_chip(tpm);
+
+ /* Set the enabled localities - if 0 we leave default as all enabled */
+ if(localities != 0) {
+ tpm->enabled_localities = localities;
+ }
+ printk("Enabled Localities: ");
+ for(i = 0; i < 5; ++i) {
+ if(locality_enabled(tpm, i)) {
+ printk("%d ", i);
+ }
+ }
+ printk("\n");
+
+ /* Set the base machine address */
+ tpm->baseaddr = baseaddr;
+
+ /* Set default timeouts */
+ tpm->timeout_a = MILLISECS(TIS_SHORT_TIMEOUT);
+ tpm->timeout_b = MILLISECS(TIS_LONG_TIMEOUT);
+ tpm->timeout_c = MILLISECS(TIS_SHORT_TIMEOUT);
+ tpm->timeout_d = MILLISECS(TIS_SHORT_TIMEOUT);
+
+ /*Map the mmio pages */
+ addr = tpm->baseaddr;
+ for(i = 0; i < 5; ++i) {
+ if(locality_enabled(tpm, i)) {
+ /* Map the page in now */
+ if((tpm->pages[i] = ioremap_nocache(addr, PAGE_SIZE)) == NULL) {
+ printk("Unable to map iomem page a address %p\n", addr);
+ goto abort_egress;
+ }
+
+ /* Set default locality to the first enabled one */
+ if (tpm->locality < 0) {
+ if(tpm_tis_request_locality(tpm, i) < 0) {
+ printk("Unable to request locality %d??\n", i);
+ goto abort_egress;
+ }
+ }
+ }
+ addr += PAGE_SIZE;
+ }
+
+
+ /* Get the vendor and device ids */
+ didvid = ioread32(TPM_DID_VID(tpm, tpm->locality));
+ tpm->did = didvid >> 16;
+ tpm->vid = didvid & 0xFFFF;
+
+
+ /* Get the revision id */
+ tpm->rid = ioread8(TPM_RID(tpm, tpm->locality));
+
+ printk("1.2 TPM (device-id=0x%X vendor-id = %X rev-id = %X)\n", tpm->did, tpm->vid, tpm->rid);
+
+ intfcaps = ioread32(TPM_INTF_CAPS(tpm, tpm->locality));
+ printk("TPM interface capabilities (0x%x):\n", intfcaps);
+ if (intfcaps & TPM_INTF_BURST_COUNT_STATIC)
+ printk("\tBurst Count Static\n");
+ if (intfcaps & TPM_INTF_CMD_READY_INT)
+ printk("\tCommand Ready Int Support\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_FALLING)
+ printk("\tInterrupt Edge Falling\n");
+ if (intfcaps & TPM_INTF_INT_EDGE_RISING)
+ printk("\tInterrupt Edge Rising\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_LOW)
+ printk("\tInterrupt Level Low\n");
+ if (intfcaps & TPM_INTF_INT_LEVEL_HIGH)
+ printk("\tInterrupt Level High\n");
+ if (intfcaps & TPM_INTF_LOCALITY_CHANGE_INT)
+ printk("\tLocality Change Int Support\n");
+ if (intfcaps & TPM_INTF_STS_VALID_INT)
+ printk("\tSts Valid Int Support\n");
+ if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
+ printk("\tData Avail Int Support\n");
+
+ /*Interupt setup */
+ intmask = ioread32(TPM_INT_ENABLE(tpm, tpm->locality));
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT;
+
+ iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask);
+
+ /*If interupts are enabled, handle it */
+ if(irq) {
+ if(irq != TPM_PROBE_IRQ) {
+ tpm->irq = irq;
+ } else {
+ /*FIXME add irq probing feature later */
+ printk("IRQ probing not implemented\n");
+ }
+ }
+
+ if(tpm->irq) {
+ iowrite8(TPM_INT_VECTOR(tpm, tpm->locality), tpm->irq);
+
+ if(bind_pirq(tpm->irq, 1, tpm_tis_irq_handler, tpm) != 0) {
+ printk("Unabled to request irq: %u for use\n", tpm->irq);
+ printk("Will use polling mode\n");
+ tpm->irq = 0;
+ } else {
+ /* Clear all existing */
+ iowrite32(TPM_INT_STATUS(tpm, tpm->locality), ioread32(TPM_INT_STATUS(tpm, tpm->locality)));
+
+ /* Turn on interrupts */
+ iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), intmask | TPM_GLOBAL_INT_ENABLE);
+ }
+ }
+
+ tpm_get_timeouts(tpm);
+ tpm_continue_selftest(tpm);
+
+
+ return tpm;
+abort_egress:
+ if(tpm != NULL) {
+ shutdown_tpm_tis(tpm);
+ }
+ return NULL;
+}
+
+void shutdown_tpm_tis(struct tpm_chip* tpm){
+ int i;
+
+ printk("Shutting down tpm_tis device\n");
+
+ iowrite32(TPM_INT_ENABLE(tpm, tpm->locality), ~TPM_GLOBAL_INT_ENABLE);
+
+ /*Unmap all of the mmio pages */
+ for(i = 0; i < 5; ++i) {
+ if(tpm->pages[i] != NULL) {
+ iounmap(tpm->pages[i], PAGE_SIZE);
+ tpm->pages[i] = NULL;
+ }
+ }
+ free(tpm);
+ return;
+}
+
+
+int tpm_tis_cmd(struct tpm_chip* tpm, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen)
+{
+ if(tpm->locality < 0) {
+ printk("tpm_tis_cmd() failed! locality not set!\n");
+ return -1;
+ }
+ if(reqlen > TPM_BUFSIZE) {
+ reqlen = TPM_BUFSIZE;
+ }
+ memcpy(tpm->data_buffer, req, reqlen);
+ *resplen = tpm_transmit(tpm, tpm->data_buffer, TPM_BUFSIZE);
+
+ *resp = malloc(*resplen);
+ memcpy(*resp, tpm->data_buffer, *resplen);
+ return 0;
+}
+
+#ifdef HAVE_LIBC
+int tpm_tis_open(struct tpm_chip* tpm)
+{
+ /* Silently prevent multiple opens */
+ if(tpm->fd != -1) {
+ return tpm->fd;
+ }
+
+ tpm->fd = alloc_fd(FTYPE_TPM_TIS);
+ printk("tpm_tis_open() -> %d\n", tpm->fd);
+ files[tpm->fd].tpm_tis.dev = tpm;
+ files[tpm->fd].tpm_tis.offset = 0;
+ files[tpm->fd].tpm_tis.respgot = 0;
+ return tpm->fd;
+}
+
+int tpm_tis_posix_write(int fd, const uint8_t* buf, size_t count)
+{
+ struct tpm_chip* tpm;
+ tpm = files[fd].tpm_tis.dev;
+
+ if(tpm->locality < 0) {
+ printk("tpm_tis_posix_write() failed! locality not set!\n");
+ errno = EINPROGRESS;
+ return -1;
+ }
+ if(count == 0) {
+ return 0;
+ }
+
+ /* Return an error if we are already processing a command */
+ if(count > TPM_BUFSIZE) {
+ count = TPM_BUFSIZE;
+ }
+ /* Send the command now */
+ memcpy(tpm->data_buffer, buf, count);
+ if((tpm->data_len = tpm_transmit(tpm, tpm->data_buffer, TPM_BUFSIZE)) < 0) {
+ errno = EIO;
+ return -1;
+ }
+ return count;
+}
+
+int tpm_tis_posix_read(int fd, uint8_t* buf, size_t count)
+{
+ int rc;
+ struct tpm_chip* tpm;
+ tpm = files[fd].tpm_tis.dev;
+
+ if(count == 0) {
+ return 0;
+ }
+
+ /* If there is no tpm resp to read, return EIO */
+ if(tpm->data_len < 0) {
+ errno = EIO;
+ return -1;
+ }
+
+
+ /* Handle EOF case */
+ if(files[fd].tpm_tis.offset >= tpm->data_len) {
+ rc = 0;
+ } else {
+ rc = min(tpm->data_len - files[fd].tpm_tis.offset, count);
+ memcpy(buf, tpm->data_buffer + files[fd].tpm_tis.offset, rc);
+ }
+ files[fd].tpm_tis.offset += rc;
+ /* Reset the data pending flag */
+ return rc;
+}
+int tpm_tis_posix_fstat(int fd, struct stat* buf)
+{
+ struct tpm_chip* tpm;
+ tpm = files[fd].tpm_tis.dev;
+
+ buf->st_mode = O_RDWR;
+ buf->st_uid = 0;
+ buf->st_gid = 0;
+ buf->st_size = be32_to_cpu(*((uint32_t*)(tpm->data_buffer + 2)));
+ buf->st_atime = buf->st_mtime = buf->st_ctime = time(NULL);
+ return 0;
+}
+
+
+#endif
diff --git a/extras/mini-os/tpmback.c b/extras/mini-os/tpmback.c
new file mode 100644
index 0000000..b6ae2e6
--- /dev/null
+++ b/extras/mini-os/tpmback.c
@@ -0,0 +1,1128 @@
+/*
+ * Copyright (c) 2010-2012 United States Government, as represented by
+ * the Secretary of Defense. All rights reserved.
+ *
+ * This code has been derived from drivers/xen/tpmback/tpmback.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * which was itself derived from drivers/xen/netback/netback.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This code has also been derived from drivers/xen/tpmback/xenbus.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (C) 2005 IBM Corporation
+ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
+ *
+ * This code has also been derived from drivers/xen/tpmback/interface.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * which was itself also derived from drvivers/xen/netback/interface.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2004, Keir Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License
+ */
+#include <mini-os/os.h>
+#include <mini-os/xenbus.h>
+#include <mini-os/events.h>
+#include <errno.h>
+#include <mini-os/gnttab.h>
+#include <xen/io/xenbus.h>
+#include <xen/io/tpmif.h>
+#include <xen/io/protocols.h>
+#include <mini-os/xmalloc.h>
+#include <time.h>
+#include <mini-os/tpmback.h>
+#include <mini-os/lib.h>
+#include <fcntl.h>
+#include <mini-os/mm.h>
+#include <mini-os/posix/sys/mman.h>
+#include <mini-os/semaphore.h>
+#include <mini-os/wait.h>
+
+
+#ifndef HAVE_LIBC
+#define strtoul simple_strtoul
+#endif
+
+//#define TPMBACK_PRINT_DEBUG
+#ifdef TPMBACK_PRINT_DEBUG
+#define TPMBACK_DEBUG(fmt,...) printk("Tpmback:Debug("__FILE__":%d) " fmt, __LINE__, ##__VA_ARGS__)
+#define TPMBACK_DEBUG_MORE(fmt,...) printk(fmt, ##__VA_ARGS__)
+#else
+#define TPMBACK_DEBUG(fmt,...)
+#endif
+#define TPMBACK_ERR(fmt,...) printk("Tpmback:Error " fmt, ##__VA_ARGS__)
+#define TPMBACK_LOG(fmt,...) printk("Tpmback:Info " fmt, ##__VA_ARGS__)
+
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+
+/* Default size of the tpmif array at initialization */
+#define DEF_ARRAY_SIZE 1
+
+/* tpmif and tpmdev flags */
+#define TPMIF_CLOSED 1
+#define TPMIF_REQ_READY 2
+
+struct tpmif {
+ domid_t domid;
+ unsigned int handle;
+
+ char* fe_path;
+ char* fe_state_path;
+
+ /* Locally bound event channel*/
+ evtchn_port_t evtchn;
+
+ /* Shared page */
+ tpmif_tx_interface_t* tx;
+
+ /* pointer to TPMIF_RX_RING_SIZE pages */
+ void** pages;
+
+ enum xenbus_state state;
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+
+ char* uuid;
+
+ /* state flags */
+ int flags;
+};
+typedef struct tpmif tpmif_t;
+
+struct tpmback_dev {
+
+ tpmif_t** tpmlist;
+ unsigned long num_tpms;
+ unsigned long num_alloc;
+
+ struct gntmap map;
+
+ /* True if at least one tpmif has a request to be handled */
+ int flags;
+
+ /* exclusive domains, see init_tpmback comment in tpmback.h */
+ char** exclusive_uuids;
+
+ xenbus_event_queue events;
+
+ /* Callbacks */
+ void (*open_callback)(domid_t, unsigned int);
+ void (*close_callback)(domid_t, unsigned int);
+ void (*suspend_callback)(domid_t, unsigned int);
+ void (*resume_callback)(domid_t, unsigned int);
+};
+typedef struct tpmback_dev tpmback_dev_t;
+
+enum { EV_NONE, EV_NEWFE, EV_STCHNG } tpm_ev_enum;
+
+/* Global objects */
+static struct thread* eventthread = NULL;
+static tpmback_dev_t gtpmdev = {
+ .tpmlist = NULL,
+ .num_tpms = 0,
+ .num_alloc = 0,
+ .flags = TPMIF_CLOSED,
+ .events = NULL,
+ .open_callback = NULL,
+ .close_callback = NULL,
+ .suspend_callback = NULL,
+ .resume_callback = NULL,
+};
+struct wait_queue_head waitq;
+int globalinit = 0;
+
+/************************************
+ * TPMIF SORTED ARRAY FUNCTIONS
+ * tpmback_dev_t.tpmlist is a sorted array, sorted by domid and then handle number
+ * Duplicates are not allowed
+ * **********************************/
+
+inline void tpmif_req_ready(tpmif_t* tpmif) {
+ tpmif->flags |= TPMIF_REQ_READY;
+ gtpmdev.flags |= TPMIF_REQ_READY;
+}
+
+inline void tpmdev_check_req(void) {
+ int i;
+ int flags;
+ local_irq_save(flags);
+ for(i = 0; i < gtpmdev.num_tpms; ++i) {
+ if(gtpmdev.tpmlist[i]->flags & TPMIF_REQ_READY) {
+ gtpmdev.flags |= TPMIF_REQ_READY;
+ local_irq_restore(flags);
+ return;
+ }
+ }
+ gtpmdev.flags &= ~TPMIF_REQ_READY;
+ local_irq_restore(flags);
+}
+
+inline void tpmif_req_finished(tpmif_t* tpmif) {
+ tpmif->flags &= ~TPMIF_REQ_READY;
+ tpmdev_check_req();
+}
+
+int __get_tpmif_index(int st, int n, domid_t domid, unsigned int handle)
+{
+ int i = st + n /2;
+ tpmif_t* tmp;
+
+ if( n <= 0 )
+ return -1;
+
+ tmp = gtpmdev.tpmlist[i];
+ if(domid == tmp->domid && tmp->handle == handle) {
+ return i;
+ } else if ( (domid < tmp->domid) ||
+ (domid == tmp->domid && handle < tmp->handle)) {
+ return __get_tpmif_index(st, n/2, domid, handle);
+ } else {
+ return __get_tpmif_index(i + 1, n/2 - ((n +1) % 2), domid, handle);
+ }
+}
+
+/* Returns the array index of the tpmif domid/handle. Returns -1 if no such tpmif exists */
+int get_tpmif_index(domid_t domid, unsigned int handle)
+{
+ int flags;
+ int index;
+ local_irq_save(flags);
+ index = __get_tpmif_index(0, gtpmdev.num_tpms, domid, handle);
+ local_irq_restore(flags);
+ return index;
+}
+
+/* Returns the tpmif domid/handle or NULL if none exists */
+tpmif_t* get_tpmif(domid_t domid, unsigned int handle)
+{
+ int flags;
+ int i;
+ tpmif_t* ret;
+ local_irq_save(flags);
+ i = get_tpmif_index(domid, handle);
+ if (i < 0) {
+ ret = NULL;
+ } else {
+ ret = gtpmdev.tpmlist[i];
+ }
+ local_irq_restore(flags);
+ return ret;
+}
+
+/* Remove the given tpmif. Returns 0 if it was removed, -1 if it was not removed */
+int remove_tpmif(tpmif_t* tpmif)
+{
+ int i, j;
+ char* err;
+ int flags;
+ local_irq_save(flags);
+
+ /* Find the index in the array if it exists */
+ i = get_tpmif_index(tpmif->domid, tpmif->handle);
+ if (i < 0) {
+ goto error;
+ }
+
+ /* Remove the interface from the list */
+ for(j = i; j < gtpmdev.num_tpms - 1; ++j) {
+ gtpmdev.tpmlist[j] = gtpmdev.tpmlist[j+1];
+ }
+ gtpmdev.tpmlist[j] = NULL;
+ --gtpmdev.num_tpms;
+
+ /* If removed tpm was the only ready tpm, then we need to check and turn off the ready flag */
+ tpmdev_check_req();
+
+ local_irq_restore(flags);
+
+ /* Stop listening for events on this tpm interface */
+ if((err = xenbus_unwatch_path_token(XBT_NIL, tpmif->fe_state_path, tpmif->fe_state_path))) {
+ TPMBACK_ERR("Unable to unwatch path token `%s' Error was %s Ignoring..\n", tpmif->fe_state_path, err);
+ free(err);
+ }
+
+ return 0;
+error:
+ local_irq_restore(flags);
+ return -1;
+}
+
+/* Insert tpmif into dev->tpmlist. Returns 0 on success and non zero on error.
+ * It is an error to insert a tpmif with the same domid and handle
+ * number
+ * as something already in the list */
+int insert_tpmif(tpmif_t* tpmif)
+{
+ int flags;
+ unsigned int i, j;
+ tpmif_t* tmp;
+ char* err;
+
+ local_irq_save(flags);
+
+ /*Check if we need to allocate more space */
+ if (gtpmdev.num_tpms == gtpmdev.num_alloc) {
+ gtpmdev.num_alloc *= 2;
+ gtpmdev.tpmlist = realloc(gtpmdev.tpmlist, gtpmdev.num_alloc);
+ }
+
+ /*Find where to put the new interface */
+ for(i = 0; i < gtpmdev.num_tpms; ++i)
+ {
+ tmp = gtpmdev.tpmlist[i];
+ if(tpmif->domid == tmp->domid && tpmif->handle == tmp->handle) {
+ TPMBACK_ERR("Tried to insert duplicate tpm interface %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle);
+ goto error;
+ }
+ if((tpmif->domid < tmp->domid) ||
+ (tpmif->domid == tmp->domid && tpmif->handle < tmp->handle)) {
+ break;
+ }
+ }
+
+ /*Shift all the tpm pointers past i down one */
+ for(j = gtpmdev.num_tpms; j > i; --j) {
+ gtpmdev.tpmlist[j] = gtpmdev.tpmlist[j-1];
+ }
+
+ /*Add the new interface */
+ gtpmdev.tpmlist[i] = tpmif;
+ ++gtpmdev.num_tpms;
+
+ /*Should not be needed, anything inserted with ready flag is probably an error */
+ tpmdev_check_req();
+
+ local_irq_restore(flags);
+
+ /*Listen for state changes on the new interface */
+ if((err = xenbus_watch_path_token(XBT_NIL, tpmif->fe_state_path, tpmif->fe_state_path, &gtpmdev.events)))
+ {
+ /* if we got an error here we should carefully remove the interface and then return */
+ TPMBACK_ERR("Unable to watch path token `%s' Error was %s\n", tpmif->fe_state_path, err);
+ free(err);
+ remove_tpmif(tpmif);
+ goto error_post_irq;
+ }
+
+ return 0;
+error:
+ local_irq_restore(flags);
+error_post_irq:
+ return -1;
+}
+
+
+/*****************
+ * CHANGE BACKEND STATE
+ * *****************/
+/*Attempts to change the backend state in xenstore
+ * returns 0 on success and non-zero on error */
+int tpmif_change_state(tpmif_t* tpmif, enum xenbus_state state)
+{
+ char path[512];
+ char *value;
+ char *err;
+ enum xenbus_state readst;
+ TPMBACK_DEBUG("Backend state change %u/%u from=%d to=%d\n", (unsigned int) tpmif->domid, tpmif->handle, tpmif->state, state);
+ if (tpmif->state == state)
+ return 0;
+
+ snprintf(path, 512, "backend/vtpm/%u/%u/state", (unsigned int) tpmif->domid, tpmif->handle);
+
+ if((err = xenbus_read(XBT_NIL, path, &value))) {
+ TPMBACK_ERR("Unable to read backend state %s, error was %s\n", path, err);
+ free(err);
+ return -1;
+ }
+ if(sscanf(value, "%d", &readst) != 1) {
+ TPMBACK_ERR("Non integer value (%s) in %s ??\n", value, path);
+ free(value);
+ return -1;
+ }
+ free(value);
+
+ /* It's possible that the backend state got updated by hotplug or something else behind our back */
+ if(readst != tpmif->state) {
+ TPMBACK_DEBUG("tpm interface state was %d but xenstore state was %d!\n", tpmif->state, readst);
+ tpmif->state = readst;
+ }
+
+ /*If if the state isnt changing, then we dont update xenstore b/c we dont want to fire extraneous events */
+ if(tpmif->state == state) {
+ return 0;
+ }
+
+ /*update xenstore*/
+ snprintf(path, 512, "backend/vtpm/%u/%u", (unsigned int) tpmif->domid, tpmif->handle);
+ if((err = xenbus_printf(XBT_NIL, path, "state", "%u", state))) {
+ TPMBACK_ERR("Error writing to xenstore %s, error was %s new state=%d\n", path, err, state);
+ free(err);
+ return -1;
+ }
+
+ tpmif->state = state;
+
+ return 0;
+}
+/**********************************
+ * TPMIF CREATION AND DELETION
+ * *******************************/
+inline tpmif_t* __init_tpmif(domid_t domid, unsigned int handle)
+{
+ tpmif_t* tpmif;
+ tpmif = malloc(sizeof(*tpmif));
+ tpmif->domid = domid;
+ tpmif->handle = handle;
+ tpmif->fe_path = NULL;
+ tpmif->fe_state_path = NULL;
+ tpmif->state = XenbusStateInitialising;
+ tpmif->status = DISCONNECTED;
+ tpmif->tx = NULL;
+ tpmif->pages = NULL;
+ tpmif->flags = 0;
+ tpmif->uuid = NULL;
+ return tpmif;
+}
+
+void __free_tpmif(tpmif_t* tpmif)
+{
+ if(tpmif->pages) {
+ free(tpmif->pages);
+ }
+ if(tpmif->fe_path) {
+ free(tpmif->fe_path);
+ }
+ if(tpmif->fe_state_path) {
+ free(tpmif->fe_state_path);
+ }
+ if(tpmif->uuid) {
+ free(tpmif->uuid);
+ }
+ free(tpmif);
+}
+/* Creates a new tpm interface, adds it to the sorted array and returns it.
+ * returns NULL on error
+ * If the tpm interface already exists, it is returned*/
+tpmif_t* new_tpmif(domid_t domid, unsigned int handle)
+{
+ tpmif_t* tpmif;
+ char* err;
+ char path[512];
+
+ /* Make sure we haven't already created this tpm
+ * Double events can occur */
+ if((tpmif = get_tpmif(domid, handle)) != NULL) {
+ return tpmif;
+ }
+
+ tpmif = __init_tpmif(domid, handle);
+
+ /* Get the uuid from xenstore */
+ snprintf(path, 512, "backend/vtpm/%u/%u/uuid", (unsigned int) domid, handle);
+ if((err = xenbus_read(XBT_NIL, path, &tpmif->uuid))) {
+ TPMBACK_ERR("Error reading %s, Error = %s\n", path, err);
+ free(err);
+ goto error;
+ }
+
+ /* Do the exclusive uuid check now */
+ if(gtpmdev.exclusive_uuids != NULL) {
+ char** ptr;
+
+ /* Check that its in the whitelist */
+ for(ptr = gtpmdev.exclusive_uuids; *ptr != NULL; ++ptr) {
+ if(!strcmp(tpmif->uuid, *ptr)) {
+ break;
+ }
+ }
+ /* If *ptr == NULL then we went through the whole list without a match, so close the connection */
+ if(*ptr == NULL) {
+ tpmif_change_state(tpmif, XenbusStateClosed);
+ TPMBACK_ERR("Frontend %u/%u tried to connect with invalid uuid=%s\n", (unsigned int) domid, handle, tpmif->uuid);
+ goto error;
+ }
+ }
+
+ /* allocate pages to be used for shared mapping */
+ if((tpmif->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE)) == NULL) {
+ goto error;
+ }
+ memset(tpmif->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE);
+
+ if(tpmif_change_state(tpmif, XenbusStateInitWait)) {
+ goto error;
+ }
+
+ snprintf(path, 512, "backend/vtpm/%u/%u/frontend", (unsigned int) domid, handle);
+ if((err = xenbus_read(XBT_NIL, path, &tpmif->fe_path))) {
+ TPMBACK_ERR("Error creating new tpm instance xenbus_read(%s), Error = %s", path, err);
+ free(err);
+ goto error;
+ }
+
+ /*Set the state path */
+ tpmif->fe_state_path = malloc(strlen(tpmif->fe_path) + 7);
+ strcpy(tpmif->fe_state_path, tpmif->fe_path);
+ strcat(tpmif->fe_state_path, "/state");
+
+ if(insert_tpmif(tpmif)) {
+ goto error;
+ }
+ TPMBACK_DEBUG("New tpmif %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle);
+ /* Do the callback now */
+ if(gtpmdev.open_callback) {
+ gtpmdev.open_callback(tpmif->domid, tpmif->handle);
+ }
+ return tpmif;
+error:
+ __free_tpmif(tpmif);
+ return NULL;
+
+}
+
+/* Removes tpmif from dev->tpmlist and frees it's memory usage */
+void free_tpmif(tpmif_t* tpmif)
+{
+ char* err;
+ char path[512];
+ TPMBACK_DEBUG("Free tpmif %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle);
+ if(tpmif->flags & TPMIF_CLOSED) {
+ TPMBACK_ERR("Tried to free an instance twice! Theres a bug somewhere!\n");
+ BUG();
+ }
+ tpmif->flags = TPMIF_CLOSED;
+
+ tpmif_change_state(tpmif, XenbusStateClosing);
+
+ /* Unmap share page and unbind event channel */
+ if(tpmif->status == CONNECTED) {
+ tpmif->status = DISCONNECTING;
+ mask_evtchn(tpmif->evtchn);
+
+ if(gntmap_munmap(&gtpmdev.map, (unsigned long)tpmif->tx, 1)) {
+ TPMBACK_ERR("%u/%u Error occured while trying to unmap shared page\n", (unsigned int) tpmif->domid, tpmif->handle);
+ }
+
+ unbind_evtchn(tpmif->evtchn);
+ }
+ tpmif->status = DISCONNECTED;
+ tpmif_change_state(tpmif, XenbusStateClosed);
+
+ /* Do the callback now */
+ if(gtpmdev.close_callback) {
+ gtpmdev.close_callback(tpmif->domid, tpmif->handle);
+ }
+
+ /* remove from array */
+ remove_tpmif(tpmif);
+
+ /* Wake up anyone possibly waiting on this interface and let them exit */
+ wake_up(&waitq);
+ schedule();
+
+ /* Remove the old xenbus entries */
+ snprintf(path, 512, "backend/vtpm/%u/%u", (unsigned int) tpmif->domid, tpmif->handle);
+ if((err = xenbus_rm(XBT_NIL, path))) {
+ TPMBACK_ERR("Error cleaning up xenbus entries path=%s error=%s\n", path, err);
+ free(err);
+ }
+
+ TPMBACK_LOG("Frontend %u/%u disconnected\n", (unsigned int) tpmif->domid, tpmif->handle);
+
+ /* free memory */
+ __free_tpmif(tpmif);
+
+}
+
+/**********************
+ * REMAINING TPMBACK FUNCTIONS
+ * ********************/
+
+/*Event channel handler */
+void tpmback_handler(evtchn_port_t port, struct pt_regs *regs, void *data)
+{
+ tpmif_t* tpmif = (tpmif_t*) data;
+ tpmif_tx_request_t* tx = &tpmif->tx->ring[0].req;
+ /* Throw away 0 size events, these can trigger from event channel unmasking */
+ if(tx->size == 0)
+ return;
+
+ TPMBACK_DEBUG("EVENT CHANNEL FIRE %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle);
+ tpmif_req_ready(tpmif);
+ wake_up(&waitq);
+
+}
+
+/* Connect to frontend */
+int connect_fe(tpmif_t* tpmif)
+{
+ char path[512];
+ char* err, *value;
+ uint32_t domid;
+ grant_ref_t ringref;
+ evtchn_port_t evtchn;
+
+ /* If already connected then quit */
+ if (tpmif->status == CONNECTED) {
+ TPMBACK_DEBUG("%u/%u tried to connect while it was already connected?\n", (unsigned int) tpmif->domid, tpmif->handle);
+ return 0;
+ }
+
+ /* Fetch the grant reference */
+ snprintf(path, 512, "%s/ring-ref", tpmif->fe_path);
+ if((err = xenbus_read(XBT_NIL, path, &value))) {
+ TPMBACK_ERR("Error creating new tpm instance xenbus_read(%s) Error = %s", path, err);
+ free(err);
+ return -1;
+ }
+ if(sscanf(value, "%d", &ringref) != 1) {
+ TPMBACK_ERR("Non integer value (%s) in %s ??\n", value, path);
+ free(value);
+ return -1;
+ }
+ free(value);
+
+
+ /* Fetch the event channel*/
+ snprintf(path, 512, "%s/event-channel", tpmif->fe_path);
+ if((err = xenbus_read(XBT_NIL, path, &value))) {
+ TPMBACK_ERR("Error creating new tpm instance xenbus_read(%s) Error = %s", path, err);
+ free(err);
+ return -1;
+ }
+ if(sscanf(value, "%d", &evtchn) != 1) {
+ TPMBACK_ERR("Non integer value (%s) in %s ??\n", value, path);
+ free(value);
+ return -1;
+ }
+ free(value);
+
+ domid = tpmif->domid;
+ if((tpmif->tx = gntmap_map_grant_refs(&gtpmdev.map, 1, &domid, 0, &ringref, PROT_READ | PROT_WRITE)) == NULL) {
+ TPMBACK_ERR("Failed to map grant reference %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle);
+ return -1;
+ }
+ memset(tpmif->tx, 0, PAGE_SIZE);
+
+ /*Bind the event channel */
+ if((evtchn_bind_interdomain(tpmif->domid, evtchn, tpmback_handler, tpmif, &tpmif->evtchn)))
+ {
+ TPMBACK_ERR("%u/%u Unable to bind to interdomain event channel!\n", (unsigned int) tpmif->domid, tpmif->handle);
+ goto error_post_map;
+ }
+ unmask_evtchn(tpmif->evtchn);
+
+ /* Write the ready flag and change status to connected */
+ snprintf(path, 512, "backend/vtpm/%u/%u", (unsigned int) tpmif->domid, tpmif->handle);
+ if((err = xenbus_printf(XBT_NIL, path, "ready", "%u", 1))) {
+ TPMBACK_ERR("%u/%u Unable to write ready flag on connect_fe()\n", (unsigned int) tpmif->domid, tpmif->handle);
+ free(err);
+ goto error_post_evtchn;
+ }
+ tpmif->status = CONNECTED;
+ if((tpmif_change_state(tpmif, XenbusStateConnected))){
+ goto error_post_evtchn;
+ }
+
+ TPMBACK_LOG("Frontend %u/%u connected\n", (unsigned int) tpmif->domid, tpmif->handle);
+
+ return 0;
+error_post_evtchn:
+ mask_evtchn(tpmif->evtchn);
+ unbind_evtchn(tpmif->evtchn);
+error_post_map:
+ gntmap_munmap(&gtpmdev.map, (unsigned long)tpmif->tx, 1);
+ return -1;
+}
+
+static int frontend_changed(tpmif_t* tpmif)
+{
+ int state = xenbus_read_integer(tpmif->fe_state_path);
+ if(state < 0) {
+ state = XenbusStateUnknown;
+ }
+
+ TPMBACK_DEBUG("Frontend %u/%u state changed to %d\n", (unsigned int) tpmif->domid, tpmif->handle, state);
+
+ switch (state) {
+ case XenbusStateInitialising:
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateConnected:
+ if(connect_fe(tpmif)) {
+ TPMBACK_ERR("Failed to connect to front end %u/%u\n", (unsigned int) tpmif->domid, tpmif->handle);
+ tpmif_change_state(tpmif, XenbusStateClosed);
+ return -1;
+ }
+ break;
+
+ case XenbusStateClosing:
+ tpmif_change_state(tpmif, XenbusStateClosing);
+ break;
+
+ case XenbusStateUnknown: /* keep it here */
+ case XenbusStateClosed:
+ free_tpmif(tpmif);
+ break;
+
+ default:
+ TPMBACK_DEBUG("BAD STATE CHANGE %u/%u state = %d for tpmif\n", (unsigned int) tpmif->domid, tpmif->handle, state);
+ return -1;
+ }
+ return 0;
+}
+
+
+/* parses the string that comes out of xenbus_watch_wait_return. */
+static int parse_eventstr(const char* evstr, domid_t* domid, unsigned int* handle)
+{
+ int ret;
+ char cmd[40];
+ char* err;
+ char* value;
+ unsigned int udomid = 0;
+ tpmif_t* tpmif;
+ /* First check for new frontends, this occurs when /backend/vtpm/<domid>/<handle> gets created. Note we what the sscanf to fail on the last %s */
+ if (sscanf(evstr, "backend/vtpm/%u/%u/%40s", &udomid, handle, cmd) == 2) {
+ *domid = udomid;
+ /* Make sure the entry exists, if this event triggers because the entry dissapeared then ignore it */
+ if((err = xenbus_read(XBT_NIL, evstr, &value))) {
+ free(err);
+ return EV_NONE;
+ }
+ free(value);
+ /* Make sure the tpmif entry does not already exist, this should not happen */
+ if((tpmif = get_tpmif(*domid, *handle)) != NULL) {
+ TPMBACK_DEBUG("Duplicate tpm entries! %u %u\n", tpmif->domid, tpmif->handle);
+ return EV_NONE;
+ }
+ return EV_NEWFE;
+ } else if((ret = sscanf(evstr, "/local/domain/%u/device/vtpm/%u/%40s", &udomid, handle, cmd)) == 3) {
+ *domid = udomid;
+ if (!strcmp(cmd, "state"))
+ return EV_STCHNG;
+ }
+ return EV_NONE;
+}
+
+void handle_backend_event(char* evstr) {
+ tpmif_t* tpmif;
+ domid_t domid;
+ unsigned int handle;
+ int event;
+
+ TPMBACK_DEBUG("Xenbus Event: %s\n", evstr);
+
+ event = parse_eventstr(evstr, &domid, &handle);
+
+ switch(event) {
+ case EV_NEWFE:
+ if(new_tpmif(domid, handle) == NULL) {
+ TPMBACK_ERR("Failed to create new tpm instance %u/%u\n", (unsigned int) domid, handle);
+ }
+ wake_up(&waitq);
+ break;
+ case EV_STCHNG:
+ if((tpmif = get_tpmif(domid, handle))) {
+ frontend_changed(tpmif);
+ } else {
+ TPMBACK_DEBUG("Event Received for non-existant tpm! instance=%u/%u xenbus_event=%s\n", (unsigned int) domid, handle, evstr);
+ }
+ break;
+ }
+}
+
+/* Runs through the given path and creates events recursively
+ * for all of its children.
+ * @path - xenstore path to scan */
+static void generate_backend_events(const char* path)
+{
+ char* err;
+ int i, len;
+ char **dirs;
+ char *entry;
+
+ if((err = xenbus_ls(XBT_NIL, path, &dirs)) != NULL) {
+ free(err);
+ return;
+ }
+
+ for(i = 0; dirs[i] != NULL; ++i) {
+ len = strlen(path) + strlen(dirs[i]) + 2;
+ entry = malloc(len);
+ snprintf(entry, len, "%s/%s", path, dirs[i]);
+
+ /* Generate and handle event for the entry itself */
+ handle_backend_event(entry);
+
+ /* Do children */
+ generate_backend_events(entry);
+
+ /* Cleanup */
+ free(entry);
+ free(dirs[i]);
+ }
+ free(dirs);
+ return;
+}
+
+char* tpmback_get_uuid(domid_t domid, unsigned int handle)
+{
+ tpmif_t* tpmif;
+ if((tpmif = get_tpmif(domid, handle)) == NULL) {
+ TPMBACK_DEBUG("get_uuid() failed, %u/%u is an invalid frontend\n", (unsigned int) domid, handle);
+ return NULL;
+ }
+
+ return tpmif->uuid;
+}
+
+void tpmback_set_open_callback(void (*cb)(domid_t, unsigned int))
+{
+ gtpmdev.open_callback = cb;
+}
+void tpmback_set_close_callback(void (*cb)(domid_t, unsigned int))
+{
+ gtpmdev.close_callback = cb;
+}
+void tpmback_set_suspend_callback(void (*cb)(domid_t, unsigned int))
+{
+ gtpmdev.suspend_callback = cb;
+}
+void tpmback_set_resume_callback(void (*cb)(domid_t, unsigned int))
+{
+ gtpmdev.resume_callback = cb;
+}
+
+static void event_listener(void)
+{
+ const char* bepath = "backend/vtpm";
+ char **path;
+ char* err;
+
+ /* Setup the backend device watch */
+ if((err = xenbus_watch_path_token(XBT_NIL, bepath, bepath, &gtpmdev.events)) != NULL) {
+ TPMBACK_ERR("xenbus_watch_path_token(%s) failed with error %s!\n", bepath, err);
+ free(err);
+ goto egress;
+ }
+
+ /* Check for any frontends that connected before we set the watch.
+ * This is almost guaranteed to happen if both domains are started
+ * immediatly one after the other.
+ * We do this by manually generating events on everything in the backend
+ * path */
+ generate_backend_events(bepath);
+
+ /* Wait and listen for changes in frontend connections */
+ while(1) {
+ path = xenbus_wait_for_watch_return(&gtpmdev.events);
+
+ /*If quit flag was set then exit */
+ if(gtpmdev.flags & TPMIF_CLOSED) {
+ TPMBACK_DEBUG("listener thread got quit event. Exiting..\n");
+ free(path);
+ break;
+ }
+ handle_backend_event(*path);
+ free(path);
+
+ }
+
+ if((err = xenbus_unwatch_path_token(XBT_NIL, bepath, bepath)) != NULL) {
+ free(err);
+ }
+egress:
+ return;
+}
+
+void event_thread(void* p) {
+ event_listener();
+}
+
+void init_tpmback(char** exclusive_uuids)
+{
+ if(!globalinit) {
+ init_waitqueue_head(&waitq);
+ globalinit = 1;
+ }
+ printk("============= Init TPM BACK ================\n");
+ gtpmdev.tpmlist = malloc(sizeof(tpmif_t*) * DEF_ARRAY_SIZE);
+ gtpmdev.num_alloc = DEF_ARRAY_SIZE;
+ gtpmdev.num_tpms = 0;
+ gtpmdev.flags = 0;
+ gtpmdev.exclusive_uuids = exclusive_uuids;
+
+ gtpmdev.open_callback = gtpmdev.close_callback = NULL;
+ gtpmdev.suspend_callback = gtpmdev.resume_callback = NULL;
+
+ eventthread = create_thread("tpmback-listener", event_thread, NULL);
+
+}
+
+void shutdown_tpmback(void)
+{
+ /* Disable callbacks */
+ gtpmdev.open_callback = gtpmdev.close_callback = NULL;
+ gtpmdev.suspend_callback = gtpmdev.resume_callback = NULL;
+
+ TPMBACK_LOG("Shutting down tpm backend\n");
+ /* Set the quit flag */
+ gtpmdev.flags = TPMIF_CLOSED;
+
+ //printk("num tpms is %d\n", gtpmdev.num_tpms);
+ /*Free all backend instances */
+ while(gtpmdev.num_tpms) {
+ free_tpmif(gtpmdev.tpmlist[0]);
+ }
+ free(gtpmdev.tpmlist);
+ gtpmdev.tpmlist = NULL;
+ gtpmdev.num_alloc = 0;
+
+ /* Wake up anyone possibly waiting on the device and let them exit */
+ wake_up(&waitq);
+ schedule();
+}
+
+inline void init_tpmcmd(tpmcmd_t* tpmcmd, domid_t domid, unsigned int handle, char* uuid)
+{
+ tpmcmd->domid = domid;
+ tpmcmd->handle = handle;
+ tpmcmd->uuid = uuid;
+ tpmcmd->req = NULL;
+ tpmcmd->req_len = 0;
+ tpmcmd->resp = NULL;
+ tpmcmd->resp_len = 0;
+}
+
+tpmcmd_t* get_request(tpmif_t* tpmif) {
+ tpmcmd_t* cmd;
+ tpmif_tx_request_t* tx;
+ int offset;
+ int tocopy;
+ int i;
+ uint32_t domid;
+ int flags;
+
+ local_irq_save(flags);
+
+ /* Allocate the cmd object to hold the data */
+ if((cmd = malloc(sizeof(*cmd))) == NULL) {
+ goto error;
+ }
+ init_tpmcmd(cmd, tpmif->domid, tpmif->handle, tpmif->uuid);
+
+ tx = &tpmif->tx->ring[0].req;
+ cmd->req_len = tx->size;
+ /* Allocate the buffer */
+ if(cmd->req_len) {
+ if((cmd->req = malloc(cmd->req_len)) == NULL) {
+ goto error;
+ }
+ }
+ /* Copy the bits from the shared pages */
+ offset = 0;
+ for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->req_len; ++i) {
+ tx = &tpmif->tx->ring[i].req;
+
+ /* Map the page with the data */
+ domid = (uint32_t)tpmif->domid;
+ if((tpmif->pages[i] = gntmap_map_grant_refs(&gtpmdev.map, 1, &domid, 0, &tx->ref, PROT_READ)) == NULL) {
+ TPMBACK_ERR("%u/%u Unable to map shared page during read!\n", (unsigned int) tpmif->domid, tpmif->handle);
+ goto error;
+ }
+
+ /* do the copy now */
+ tocopy = min(cmd->req_len - offset, PAGE_SIZE);
+ memcpy(&cmd->req[offset], tpmif->pages[i], tocopy);
+ offset += tocopy;
+
+ /* release the page */
+ gntmap_munmap(&gtpmdev.map, (unsigned long)tpmif->pages[i], 1);
+
+ }
+
+#ifdef TPMBACK_PRINT_DEBUG
+ TPMBACK_DEBUG("Received Tpm Command from %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->req_len);
+ for(i = 0; i < cmd->req_len; ++i) {
+ if (!(i % 30)) {
+ TPMBACK_DEBUG_MORE("\n");
+ }
+ TPMBACK_DEBUG_MORE("%02hhX ", cmd->req[i]);
+ }
+ TPMBACK_DEBUG_MORE("\n\n");
+#endif
+
+ local_irq_restore(flags);
+ return cmd;
+error:
+ if(cmd != NULL) {
+ if (cmd->req != NULL) {
+ free(cmd->req);
+ cmd->req = NULL;
+ }
+ free(cmd);
+ cmd = NULL;
+ }
+ local_irq_restore(flags);
+ return NULL;
+
+}
+
+void send_response(tpmcmd_t* cmd, tpmif_t* tpmif)
+{
+ tpmif_tx_request_t* tx;
+ int offset;
+ int i;
+ uint32_t domid;
+ int tocopy;
+ int flags;
+
+ local_irq_save(flags);
+
+ tx = &tpmif->tx->ring[0].req;
+ tx->size = cmd->resp_len;
+
+ offset = 0;
+ for(i = 0; i < TPMIF_TX_RING_SIZE && offset < cmd->resp_len; ++i) {
+ tx = &tpmif->tx->ring[i].req;
+
+ /* Map the page with the data */
+ domid = (uint32_t)tpmif->domid;
+ if((tpmif->pages[i] = gntmap_map_grant_refs(&gtpmdev.map, 1, &domid, 0, &tx->ref, PROT_WRITE)) == NULL) {
+ TPMBACK_ERR("%u/%u Unable to map shared page during write!\n", (unsigned int) tpmif->domid, tpmif->handle);
+ goto error;
+ }
+
+ /* do the copy now */
+ tocopy = min(cmd->resp_len - offset, PAGE_SIZE);
+ memcpy(tpmif->pages[i], &cmd->resp[offset], tocopy);
+ offset += tocopy;
+
+ /* release the page */
+ gntmap_munmap(&gtpmdev.map, (unsigned long)tpmif->pages[i], 1);
+
+ }
+
+#ifdef TPMBACK_PRINT_DEBUG
+ TPMBACK_DEBUG("Sent response to %u/%u of size %u", (unsigned int) tpmif->domid, tpmif->handle, cmd->resp_len);
+ for(i = 0; i < cmd->resp_len; ++i) {
+ if (!(i % 30)) {
+ TPMBACK_DEBUG_MORE("\n");
+ }
+ TPMBACK_DEBUG_MORE("%02hhX ", cmd->resp[i]);
+ }
+ TPMBACK_DEBUG_MORE("\n\n");
+#endif
+ /* clear the ready flag and send the event channel notice to the frontend */
+ tpmif_req_finished(tpmif);
+ notify_remote_via_evtchn(tpmif->evtchn);
+error:
+ local_irq_restore(flags);
+ return;
+}
+
+tpmcmd_t* tpmback_req_any(void)
+{
+ int i;
+ /* Block until something has a request */
+ wait_event(waitq, (gtpmdev.flags & (TPMIF_REQ_READY | TPMIF_CLOSED)));
+
+ /* Check if were shutting down */
+ if(gtpmdev.flags & TPMIF_CLOSED) {
+ /* if something was waiting for us to give up the queue so it can shutdown, let it finish */
+ schedule();
+ return NULL;
+ }
+
+ for(i = 0; i < gtpmdev.num_tpms; ++i) {
+ if(gtpmdev.tpmlist[i]->flags & TPMIF_REQ_READY) {
+ return get_request(gtpmdev.tpmlist[i]);
+ }
+ }
+
+ TPMBACK_ERR("backend request ready flag was set but no interfaces were actually ready\n");
+ return NULL;
+}
+
+tpmcmd_t* tpmback_req(domid_t domid, unsigned int handle)
+{
+ tpmif_t* tpmif;
+ tpmif = get_tpmif(domid, handle);
+ if(tpmif == NULL) {
+ return NULL;
+ }
+
+ wait_event(waitq, (tpmif->flags & (TPMIF_REQ_READY | TPMIF_CLOSED) || gtpmdev.flags & TPMIF_CLOSED));
+
+ /* Check if were shutting down */
+ if(tpmif->flags & TPMIF_CLOSED || gtpmdev.flags & TPMIF_CLOSED) {
+ /* if something was waiting for us to give up the queue so it can free this instance, let it finish */
+ schedule();
+ return NULL;
+ }
+
+ return get_request(tpmif);
+}
+
+void tpmback_resp(tpmcmd_t* tpmcmd)
+{
+ tpmif_t* tpmif;
+
+ /* Get the associated interface, if it doesnt exist then just quit */
+ tpmif = get_tpmif(tpmcmd->domid, tpmcmd->handle);
+ if(tpmif == NULL) {
+ TPMBACK_ERR("Tried to send a reponse to non existant frontend %u/%u\n", (unsigned int) tpmcmd->domid, tpmcmd->handle);
+ goto end;
+ }
+
+ if(!(tpmif->flags & TPMIF_REQ_READY)) {
+ TPMBACK_ERR("Tried to send response to a frontend that was not waiting for one %u/%u\n", (unsigned int) tpmcmd->domid, tpmcmd->handle);
+ goto end;
+ }
+
+ /* Send response to frontend */
+ send_response(tpmcmd, tpmif);
+
+end:
+ if(tpmcmd->req != NULL) {
+ free(tpmcmd->req);
+ }
+ free(tpmcmd);
+ return;
+}
+
+int tpmback_wait_for_frontend_connect(domid_t *domid, unsigned int *handle)
+{
+ tpmif_t* tpmif;
+ int flags;
+ wait_event(waitq, ((gtpmdev.num_tpms > 0) || gtpmdev.flags & TPMIF_CLOSED));
+ if(gtpmdev.flags & TPMIF_CLOSED) {
+ return -1;
+ }
+ local_irq_save(flags);
+ tpmif = gtpmdev.tpmlist[0];
+ *domid = tpmif->domid;
+ *handle = tpmif->handle;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+int tpmback_num_frontends(void)
+{
+ return gtpmdev.num_tpms;
+}
diff --git a/extras/mini-os/tpmfront.c b/extras/mini-os/tpmfront.c
new file mode 100644
index 0000000..0218d7f
--- /dev/null
+++ b/extras/mini-os/tpmfront.c
@@ -0,0 +1,608 @@
+/*
+ * Copyright (c) 2010-2012 United States Government, as represented by
+ * the Secretary of Defense. All rights reserved.
+ *
+ * This code has been derived from drivers/char/tpm_vtpm.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (C) 2006 IBM Corporation
+ *
+ * This code has also been derived from drivers/char/tpm_xen.c
+ * from the xen 2.6.18 linux kernel
+ *
+ * Copyright (c) 2005, IBM Corporation
+ *
+ * which was itself derived from drivers/xen/netfront/netfront.c
+ * from the linux kernel
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+#include <mini-os/os.h>
+#include <mini-os/xenbus.h>
+#include <mini-os/xmalloc.h>
+#include <mini-os/events.h>
+#include <mini-os/wait.h>
+#include <mini-os/gnttab.h>
+#include <xen/io/xenbus.h>
+#include <xen/io/tpmif.h>
+#include <mini-os/tpmfront.h>
+#include <fcntl.h>
+
+//#define TPMFRONT_PRINT_DEBUG
+#ifdef TPMFRONT_PRINT_DEBUG
+#define TPMFRONT_DEBUG(fmt,...) printk("Tpmfront:Debug("__FILE__":%d) " fmt, __LINE__, ##__VA_ARGS__)
+#define TPMFRONT_DEBUG_MORE(fmt,...) printk(fmt, ##__VA_ARGS__)
+#else
+#define TPMFRONT_DEBUG(fmt,...)
+#endif
+#define TPMFRONT_ERR(fmt,...) printk("Tpmfront:Error " fmt, ##__VA_ARGS__)
+#define TPMFRONT_LOG(fmt,...) printk("Tpmfront:Info " fmt, ##__VA_ARGS__)
+
+#define min(a,b) (((a) < (b)) ? (a) : (b))
+
+void tpmfront_handler(evtchn_port_t port, struct pt_regs *regs, void *data) {
+ struct tpmfront_dev* dev = (struct tpmfront_dev*) data;
+ /*If we get a response when we didnt make a request, just ignore it */
+ if(!dev->waiting) {
+ return;
+ }
+
+ dev->waiting = 0;
+#ifdef HAVE_LIBC
+ if(dev->fd >= 0) {
+ files[dev->fd].read = 1;
+ }
+#endif
+ wake_up(&dev->waitq);
+}
+
+static int publish_xenbus(struct tpmfront_dev* dev) {
+ xenbus_transaction_t xbt;
+ int retry;
+ char* err;
+ /* Write the grant reference and event channel to xenstore */
+again:
+ if((err = xenbus_transaction_start(&xbt))) {
+ TPMFRONT_ERR("Unable to start xenbus transaction, error was %s\n", err);
+ free(err);
+ return -1;
+ }
+
+ if((err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u", (unsigned int) dev->ring_ref))) {
+ TPMFRONT_ERR("Unable to write %s/ring-ref, error was %s\n", dev->nodename, err);
+ free(err);
+ goto abort_transaction;
+ }
+
+ if((err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", (unsigned int) dev->evtchn))) {
+ TPMFRONT_ERR("Unable to write %s/event-channel, error was %s\n", dev->nodename, err);
+ free(err);
+ goto abort_transaction;
+ }
+
+ if((err = xenbus_transaction_end(xbt, 0, &retry))) {
+ TPMFRONT_ERR("Unable to complete xenbus transaction, error was %s\n", err);
+ free(err);
+ return -1;
+ }
+ if(retry) {
+ goto again;
+ }
+
+ return 0;
+abort_transaction:
+ if((err = xenbus_transaction_end(xbt, 1, &retry))) {
+ free(err);
+ }
+ return -1;
+}
+
+static int wait_for_backend_connect(xenbus_event_queue* events, char* path)
+{
+ int state;
+
+ TPMFRONT_LOG("Waiting for backend connection..\n");
+ /* Wait for the backend to connect */
+ while(1) {
+ state = xenbus_read_integer(path);
+ if ( state < 0)
+ state = XenbusStateUnknown;
+ switch(state) {
+ /* Bad states, we quit with error */
+ case XenbusStateUnknown:
+ case XenbusStateClosing:
+ case XenbusStateClosed:
+ TPMFRONT_ERR("Unable to connect to backend\n");
+ return -1;
+ /* If backend is connected then break out of loop */
+ case XenbusStateConnected:
+ TPMFRONT_LOG("Backend Connected\n");
+ return 0;
+ default:
+ xenbus_wait_for_watch(events);
+ }
+ }
+
+}
+
+static int wait_for_backend_closed(xenbus_event_queue* events, char* path)
+{
+ int state;
+
+ TPMFRONT_LOG("Waiting for backend to close..\n");
+ while(1) {
+ state = xenbus_read_integer(path);
+ if ( state < 0)
+ state = XenbusStateUnknown;
+ switch(state) {
+ case XenbusStateUnknown:
+ TPMFRONT_ERR("Backend Unknown state, forcing shutdown\n");
+ return -1;
+ case XenbusStateClosed:
+ TPMFRONT_LOG("Backend Closed\n");
+ return 0;
+ default:
+ xenbus_wait_for_watch(events);
+ }
+ }
+
+}
+
+static int wait_for_backend_state_changed(struct tpmfront_dev* dev, XenbusState state) {
+ char* err;
+ int ret = 0;
+ xenbus_event_queue events = NULL;
+ char path[512];
+
+ snprintf(path, 512, "%s/state", dev->bepath);
+ /*Setup the watch to wait for the backend */
+ if((err = xenbus_watch_path_token(XBT_NIL, path, path, &events))) {
+ TPMFRONT_ERR("Could not set a watch on %s, error was %s\n", path, err);
+ free(err);
+ return -1;
+ }
+
+ /* Do the actual wait loop now */
+ switch(state) {
+ case XenbusStateConnected:
+ ret = wait_for_backend_connect(&events, path);
+ break;
+ case XenbusStateClosed:
+ ret = wait_for_backend_closed(&events, path);
+ break;
+ default:
+ break;
+ }
+
+ if((err = xenbus_unwatch_path_token(XBT_NIL, path, path))) {
+ TPMFRONT_ERR("Unable to unwatch %s, error was %s, ignoring..\n", path, err);
+ free(err);
+ }
+ return ret;
+}
+
+static int tpmfront_connect(struct tpmfront_dev* dev)
+{
+ char* err;
+ /* Create shared page */
+ dev->tx = (tpmif_tx_interface_t*) alloc_page();
+ if(dev->tx == NULL) {
+ TPMFRONT_ERR("Unable to allocate page for shared memory\n");
+ goto error;
+ }
+ memset(dev->tx, 0, PAGE_SIZE);
+ dev->ring_ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->tx), 0);
+ TPMFRONT_DEBUG("grant ref is %lu\n", (unsigned long) dev->ring_ref);
+
+ /*Create event channel */
+ if(evtchn_alloc_unbound(dev->bedomid, tpmfront_handler, dev, &dev->evtchn)) {
+ TPMFRONT_ERR("Unable to allocate event channel\n");
+ goto error_postmap;
+ }
+ unmask_evtchn(dev->evtchn);
+ TPMFRONT_DEBUG("event channel is %lu\n", (unsigned long) dev->evtchn);
+
+ /* Write the entries to xenstore */
+ if(publish_xenbus(dev)) {
+ goto error_postevtchn;
+ }
+
+ /* Change state to connected */
+ dev->state = XenbusStateConnected;
+
+ /* Tell the backend that we are ready */
+ if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", dev->state))) {
+ TPMFRONT_ERR("Unable to write to xenstore %s/state, value=%u", dev->nodename, XenbusStateConnected);
+ free(err);
+ goto error;
+ }
+
+ return 0;
+error_postevtchn:
+ mask_evtchn(dev->evtchn);
+ unbind_evtchn(dev->evtchn);
+error_postmap:
+ gnttab_end_access(dev->ring_ref);
+ free_page(dev->tx);
+error:
+ return -1;
+}
+
+struct tpmfront_dev* init_tpmfront(const char* _nodename)
+{
+ struct tpmfront_dev* dev;
+ const char* nodename;
+ char path[512];
+ char* value, *err;
+ unsigned long long ival;
+ int i;
+
+ printk("============= Init TPM Front ================\n");
+
+ dev = malloc(sizeof(struct tpmfront_dev));
+ memset(dev, 0, sizeof(struct tpmfront_dev));
+
+#ifdef HAVE_LIBC
+ dev->fd = -1;
+#endif
+
+ nodename = _nodename ? _nodename : "device/vtpm/0";
+ dev->nodename = strdup(nodename);
+
+ init_waitqueue_head(&dev->waitq);
+
+ /* Get backend domid */
+ snprintf(path, 512, "%s/backend-id", dev->nodename);
+ if((err = xenbus_read(XBT_NIL, path, &value))) {
+ TPMFRONT_ERR("Unable to read %s during tpmfront initialization! error = %s\n", path, err);
+ free(err);
+ goto error;
+ }
+ if(sscanf(value, "%llu", &ival) != 1) {
+ TPMFRONT_ERR("%s has non-integer value (%s)\n", path, value);
+ free(value);
+ goto error;
+ }
+ free(value);
+ dev->bedomid = ival;
+
+ /* Get backend xenstore path */
+ snprintf(path, 512, "%s/backend", dev->nodename);
+ if((err = xenbus_read(XBT_NIL, path, &dev->bepath))) {
+ TPMFRONT_ERR("Unable to read %s during tpmfront initialization! error = %s\n", path, err);
+ free(err);
+ goto error;
+ }
+
+ /* Create and publish grant reference and event channel */
+ if (tpmfront_connect(dev)) {
+ goto error;
+ }
+
+ /* Wait for backend to connect */
+ if( wait_for_backend_state_changed(dev, XenbusStateConnected)) {
+ goto error;
+ }
+
+ /* Allocate pages that will contain the messages */
+ dev->pages = malloc(sizeof(void*) * TPMIF_TX_RING_SIZE);
+ if(dev->pages == NULL) {
+ goto error;
+ }
+ memset(dev->pages, 0, sizeof(void*) * TPMIF_TX_RING_SIZE);
+ for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) {
+ dev->pages[i] = (void*)alloc_page();
+ if(dev->pages[i] == NULL) {
+ goto error;
+ }
+ }
+
+ TPMFRONT_LOG("Initialization Completed successfully\n");
+
+ return dev;
+
+error:
+ shutdown_tpmfront(dev);
+ return NULL;
+}
+void shutdown_tpmfront(struct tpmfront_dev* dev)
+{
+ char* err;
+ char path[512];
+ int i;
+ tpmif_tx_request_t* tx;
+ if(dev == NULL) {
+ return;
+ }
+ TPMFRONT_LOG("Shutting down tpmfront\n");
+ /* disconnect */
+ if(dev->state == XenbusStateConnected) {
+ dev->state = XenbusStateClosing;
+ //FIXME: Transaction for this?
+ /* Tell backend we are closing */
+ if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
+ free(err);
+ }
+
+ /* Clean up xenstore entries */
+ snprintf(path, 512, "%s/event-channel", dev->nodename);
+ if((err = xenbus_rm(XBT_NIL, path))) {
+ free(err);
+ }
+ snprintf(path, 512, "%s/ring-ref", dev->nodename);
+ if((err = xenbus_rm(XBT_NIL, path))) {
+ free(err);
+ }
+
+ /* Tell backend we are closed */
+ dev->state = XenbusStateClosed;
+ if((err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%u", (unsigned int) dev->state))) {
+ TPMFRONT_ERR("Unable to write to %s, error was %s", dev->nodename, err);
+ free(err);
+ }
+
+ /* Wait for the backend to close and unmap shared pages, ignore any errors */
+ wait_for_backend_state_changed(dev, XenbusStateClosed);
+
+ /* Cleanup any shared pages */
+ if(dev->pages) {
+ for(i = 0; i < TPMIF_TX_RING_SIZE; ++i) {
+ if(dev->pages[i]) {
+ tx = &dev->tx->ring[i].req;
+ if(tx->ref != 0) {
+ gnttab_end_access(tx->ref);
+ }
+ free_page(dev->pages[i]);
+ }
+ }
+ free(dev->pages);
+ }
+
+ /* Close event channel and unmap shared page */
+ mask_evtchn(dev->evtchn);
+ unbind_evtchn(dev->evtchn);
+ gnttab_end_access(dev->ring_ref);
+
+ free_page(dev->tx);
+
+ }
+
+ /* Cleanup memory usage */
+ if(dev->respbuf) {
+ free(dev->respbuf);
+ }
+ if(dev->bepath) {
+ free(dev->bepath);
+ }
+ if(dev->nodename) {
+ free(dev->nodename);
+ }
+ free(dev);
+}
+
+int tpmfront_send(struct tpmfront_dev* dev, const uint8_t* msg, size_t length)
+{
+ int i;
+ tpmif_tx_request_t* tx = NULL;
+ /* Error Checking */
+ if(dev == NULL || dev->state != XenbusStateConnected) {
+ TPMFRONT_ERR("Tried to send message through disconnected frontend\n");
+ return -1;
+ }
+
+#ifdef TPMFRONT_PRINT_DEBUG
+ TPMFRONT_DEBUG("Sending Msg to backend size=%u", (unsigned int) length);
+ for(i = 0; i < length; ++i) {
+ if(!(i % 30)) {
+ TPMFRONT_DEBUG_MORE("\n");
+ }
+ TPMFRONT_DEBUG_MORE("%02X ", msg[i]);
+ }
+ TPMFRONT_DEBUG_MORE("\n");
+#endif
+
+ /* Copy to shared pages now */
+ for(i = 0; length > 0 && i < TPMIF_TX_RING_SIZE; ++i) {
+ /* Share the page */
+ tx = &dev->tx->ring[i].req;
+ tx->unused = 0;
+ tx->addr = virt_to_mach(dev->pages[i]);
+ tx->ref = gnttab_grant_access(dev->bedomid, virt_to_mfn(dev->pages[i]), 0);
+ /* Copy the bits to the page */
+ tx->size = length > PAGE_SIZE ? PAGE_SIZE : length;
+ memcpy(dev->pages[i], &msg[i * PAGE_SIZE], tx->size);
+
+ /* Update counters */
+ length -= tx->size;
+ }
+ dev->waiting = 1;
+ dev->resplen = 0;
+#ifdef HAVE_LIBC
+ if(dev->fd >= 0) {
+ files[dev->fd].read = 0;
+ files[dev->fd].tpmfront.respgot = 0;
+ files[dev->fd].tpmfront.offset = 0;
+ }
+#endif
+ notify_remote_via_evtchn(dev->evtchn);
+ return 0;
+}
+int tpmfront_recv(struct tpmfront_dev* dev, uint8_t** msg, size_t *length)
+{
+ tpmif_tx_request_t* tx;
+ int i;
+ if(dev == NULL || dev->state != XenbusStateConnected) {
+ TPMFRONT_ERR("Tried to receive message from disconnected frontend\n");
+ return -1;
+ }
+ /*Wait for the response */
+ wait_event(dev->waitq, (!dev->waiting));
+
+ /* Initialize */
+ *msg = NULL;
+ *length = 0;
+
+ /* special case, just quit */
+ tx = &dev->tx->ring[0].req;
+ if(tx->size == 0 ) {
+ goto quit;
+ }
+ /* Get the total size */
+ tx = &dev->tx->ring[0].req;
+ for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) {
+ tx = &dev->tx->ring[i].req;
+ *length += tx->size;
+ }
+ /* Alloc the buffer */
+ if(dev->respbuf) {
+ free(dev->respbuf);
+ }
+ *msg = dev->respbuf = malloc(*length);
+ dev->resplen = *length;
+ /* Copy the bits */
+ tx = &dev->tx->ring[0].req;
+ for(i = 0; i < TPMIF_TX_RING_SIZE && tx->size > 0; ++i) {
+ tx = &dev->tx->ring[i].req;
+ memcpy(&(*msg)[i * PAGE_SIZE], dev->pages[i], tx->size);
+ gnttab_end_access(tx->ref);
+ tx->ref = 0;
+ }
+#ifdef TPMFRONT_PRINT_DEBUG
+ TPMFRONT_DEBUG("Received response from backend size=%u", (unsigned int) *length);
+ for(i = 0; i < *length; ++i) {
+ if(!(i % 30)) {
+ TPMFRONT_DEBUG_MORE("\n");
+ }
+ TPMFRONT_DEBUG_MORE("%02X ", (*msg)[i]);
+ }
+ TPMFRONT_DEBUG_MORE("\n");
+#endif
+#ifdef HAVE_LIBC
+ if(dev->fd >= 0) {
+ files[dev->fd].tpmfront.respgot = 1;
+ }
+#endif
+quit:
+ return 0;
+}
+
+int tpmfront_cmd(struct tpmfront_dev* dev, uint8_t* req, size_t reqlen, uint8_t** resp, size_t* resplen)
+{
+ int rc;
+ if((rc = tpmfront_send(dev, req, reqlen))) {
+ return rc;
+ }
+ if((rc = tpmfront_recv(dev, resp, resplen))) {
+ return rc;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_LIBC
+#include <errno.h>
+int tpmfront_open(struct tpmfront_dev* dev)
+{
+ /* Silently prevent multiple opens */
+ if(dev->fd != -1) {
+ return dev->fd;
+ }
+
+ dev->fd = alloc_fd(FTYPE_TPMFRONT);
+ printk("tpmfront_open(%s) -> %d\n", dev->nodename, dev->fd);
+ files[dev->fd].tpmfront.dev = dev;
+ files[dev->fd].tpmfront.offset = 0;
+ files[dev->fd].tpmfront.respgot = 0;
+ return dev->fd;
+}
+
+int tpmfront_posix_write(int fd, const uint8_t* buf, size_t count)
+{
+ int rc;
+ struct tpmfront_dev* dev;
+ dev = files[fd].tpmfront.dev;
+
+ if(count == 0) {
+ return 0;
+ }
+
+ /* Return an error if we are already processing a command */
+ if(dev->waiting) {
+ errno = EINPROGRESS;
+ return -1;
+ }
+ /* Send the command now */
+ if((rc = tpmfront_send(dev, buf, count)) != 0) {
+ errno = EIO;
+ return -1;
+ }
+ return count;
+}
+
+int tpmfront_posix_read(int fd, uint8_t* buf, size_t count)
+{
+ int rc;
+ uint8_t* dummybuf;
+ size_t dummysz;
+ struct tpmfront_dev* dev;
+
+ dev = files[fd].tpmfront.dev;
+
+ if(count == 0) {
+ return 0;
+ }
+
+ /* get the response if we haven't already */
+ if(files[dev->fd].tpmfront.respgot == 0) {
+ if ((rc = tpmfront_recv(dev, &dummybuf, &dummysz)) != 0) {
+ errno = EIO;
+ return -1;
+ }
+ }
+
+ /* handle EOF case */
+ if(files[dev->fd].tpmfront.offset >= dev->resplen) {
+ return 0;
+ }
+
+ /* Compute the number of bytes and do the copy operation */
+ if((rc = min(count, dev->resplen - files[dev->fd].tpmfront.offset)) != 0) {
+ memcpy(buf, dev->respbuf + files[dev->fd].tpmfront.offset, rc);
+ files[dev->fd].tpmfront.offset += rc;
+ }
+
+ return rc;
+}
+
+int tpmfront_posix_fstat(int fd, struct stat* buf)
+{
+ uint8_t* dummybuf;
+ size_t dummysz;
+ int rc;
+ struct tpmfront_dev* dev = files[fd].tpmfront.dev;
+
+ /* If we have a response waiting, then read it now from the backend
+ * so we can get its length*/
+ if(dev->waiting || (files[dev->fd].read == 1 && !files[dev->fd].tpmfront.respgot)) {
+ if ((rc = tpmfront_recv(dev, &dummybuf, &dummysz)) != 0) {
+ errno = EIO;
+ return -1;
+ }
+ }
+
+ buf->st_mode = O_RDWR;
+ buf->st_uid = 0;
+ buf->st_gid = 0;
+ buf->st_size = dev->resplen;
+ buf->st_atime = buf->st_mtime = buf->st_ctime = time(NULL);
+
+ return 0;
+}
+
+
+#endif
--
1.7.4.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel