diff mbox series

[01/13] core/flash: Allow OPAL flash calls for size equal to flash

Message ID 1573060953-6464-15-git-send-email-debmc@linux.ibm.com
State Not Applicable
Headers show
Series [01/13] core/flash: Allow OPAL flash calls for size equal to flash | expand

Commit Message

Deb McLemore Nov. 6, 2019, 5:22 p.m. UTC
From: Cyril Bur <cyril.bur@au1.ibm.com>

A bounds checking mistake prevents opal_flash_{read,write,erase} calls
from having a length equal to the size of the flash. This bug has been
present since the beginning (e7d1f60e core/flash: Add flash API) of
these calls.

Until before d6a5b53f libflash/blocklevel: Add blocklevel_smart_erase()
6/4/2017 none of our tools would have performed a single command for the
full size of the flash. It would still have been possible to persuade
`dd` to do this by using a block size equal to the size of the flash
or other mtd related tools.

Any pflash built with blocklevel_smart_erase() will perform one call to
Linux and then Skiboot for the size of flash.

Signed-off-by: Cyril Bur <cyril.bur@au1.ibm.com>
Acked-by: Samuel Mendoza-Jonas <sam@mendozajonas.com>
Reviewed-By: Alistair Popple <alistair@popple.id.au>
Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
---
 core/flash.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/core/flash.c b/core/flash.c
index 7fbfca2..2f041db 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -10,6 +10,7 @@ 
 
 #include <skiboot.h>
 #include <cpu.h>
+#include <inttypes.h>
 #include <lock.h>
 #include <opal.h>
 #include <opal-msg.h>
@@ -436,8 +437,10 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 		goto err;
 	}
 
-	if (size >= flash->size || offset >= flash->size
+	if (size > flash->size || offset >= flash->size
 			|| offset + size > flash->size) {
+		prlog(PR_DEBUG, "Requested flash op %d beyond flash size %" PRIu64 "\n",
+				op, flash->size);
 		rc = OPAL_PARAMETER;
 		goto err;
 	}
-- 
2.7.4


From 715a9f9584131224a1af883bc0934405c97a31c2 Mon Sep 17 00:00:00 2001
From: Cyril Bur <cyril.bur@au1.ibm.com>
Date: Thu, 29 Jun 2017 09:38:02 +1000
Subject: [PATCH 02/13] core/flash: Make flash_reserve() and flash_release()
 use any flash

Now that skiboot can have multiple flashes it doesn't make sense for
flash_reserve() and flash_release() to only operate on system_flash.

New functions system_flash_reserve() and system_flash_release() preserve
the current functionality of flash_reserve() and flash_release() and
flash_reserve() and flash_releasei() can now be used to mark any flash
as busy.

Signed-off-by: Cyril Bur <cyril.bur@au1.ibm.com>
Reviewed-by: Alistair Popple <alistair@popple.id.au>
Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
---
 core/flash.c       | 20 +++++++++++++++-----
 hw/ipmi/ipmi-sel.c |  4 ++--
 include/skiboot.h  |  4 ++--
 3 files changed, 19 insertions(+), 9 deletions(-)

diff --git a/core/flash.c b/core/flash.c
index 2f041db..78e0023 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -60,15 +60,15 @@  static struct lock flash_lock;
 static struct flash *nvram_flash;
 static u32 nvram_offset, nvram_size;
 
-bool flash_reserve(void)
+static bool flash_reserve(struct flash *flash)
 {
 	bool rc = false;
 
 	if (!try_lock(&flash_lock))
 		return false;
 
-	if (!system_flash->busy) {
-		system_flash->busy = true;
+	if (!flash->busy) {
+		flash->busy = true;
 		rc = true;
 	}
 	unlock(&flash_lock);
@@ -76,10 +76,10 @@  bool flash_reserve(void)
 	return rc;
 }
 
-void flash_release(void)
+static void flash_release(struct flash *flash)
 {
 	lock(&flash_lock);
-	system_flash->busy = false;
+	flash->busy = false;
 	unlock(&flash_lock);
 }
 
@@ -94,6 +94,16 @@  bool flash_unregister(void)
 	return true;
 }
 
+bool system_flash_reserve(void)
+{
+	return flash_reserve(system_flash);
+}
+
+void system_flash_release(void)
+{
+	flash_release(system_flash);
+}
+
 static int flash_nvram_info(uint32_t *total_size)
 {
 	int rc;
diff --git a/hw/ipmi/ipmi-sel.c b/hw/ipmi/ipmi-sel.c
index 794aa38..ec4a318 100644
--- a/hw/ipmi/ipmi-sel.c
+++ b/hw/ipmi/ipmi-sel.c
@@ -463,7 +463,7 @@  static void sel_pnor(uint8_t access, void *context __unused)
 			break;
 		}
 
-		granted = flash_reserve();
+		granted = system_flash_reserve();
 		if (granted)
 			occ_pnor_set_owner(PNOR_OWNER_EXTERNAL);
 		/* Ack the request */
@@ -472,7 +472,7 @@  static void sel_pnor(uint8_t access, void *context __unused)
 		break;
 	case RELEASE_PNOR:
 		prlog(PR_NOTICE, "PNOR access released\n");
-		flash_release();
+		system_flash_release();
 		occ_pnor_set_owner(PNOR_OWNER_HOST);
 		break;
 	default:
diff --git a/include/skiboot.h b/include/skiboot.h
index 10b43e8..9efee97 100644
--- a/include/skiboot.h
+++ b/include/skiboot.h
@@ -217,8 +217,8 @@  extern int flash_register(struct blocklevel_device *bl);
 extern int flash_start_preload_resource(enum resource_id id, uint32_t subid,
 					void *buf, size_t *len);
 extern int flash_resource_loaded(enum resource_id id, uint32_t idx);
-extern bool flash_reserve(void);
-extern void flash_release(void);
+extern bool system_flash_reserve(void);
+extern void system_flash_release(void);
 extern bool flash_unregister(void);
 #define FLASH_SUBPART_ALIGNMENT 0x1000
 #define FLASH_SUBPART_HEADER_SIZE FLASH_SUBPART_ALIGNMENT
-- 
2.7.4


From 0038b8eb498db592015d94dcefa3ccfe14c82ce4 Mon Sep 17 00:00:00 2001
From: Cyril Bur <cyril.bur@au1.ibm.com>
Date: Thu, 29 Jun 2017 09:38:03 +1000
Subject: [PATCH 03/13] core/flash: Don't hold flash_lock for the entirety of
 an opal_flash_op()

It doesn't make sense to hold the lock to the flash for an entire flash
op. The flash_lock provides mutual exclusion to the flashes structure
and each flash element has a busy boolean which ensures that mutual
exclusion on access of the flash.

Signed-off-by: Cyril Bur <cyril.bur@au1.ibm.com>
Reviewed-By: Alistair Popple <alistair@popple.id.au>
Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
---
 core/flash.c | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/core/flash.c b/core/flash.c
index 78e0023..d97d88e 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -429,23 +429,16 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	struct flash *flash = NULL;
 	int rc;
 
-	if (!try_lock(&flash_lock))
-		return OPAL_BUSY;
-
 	list_for_each(&flashes, flash, list)
 		if (flash->id == id)
 			break;
 
-	if (flash->id != id) {
+	if (flash->id != id)
 		/* Couldn't find the flash */
-		rc = OPAL_PARAMETER;
-		goto err;
-	}
+		return OPAL_PARAMETER;
 
-	if (flash->busy) {
-		rc = OPAL_BUSY;
-		goto err;
-	}
+	if (!flash_reserve(flash))
+		return OPAL_BUSY;
 
 	if (size > flash->size || offset >= flash->size
 			|| offset + size > flash->size) {
@@ -481,13 +474,13 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 		goto err;
 	}
 
-	unlock(&flash_lock);
+	flash_release(flash);
 
 	opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, token, rc);
 	return OPAL_ASYNC_COMPLETION;
 
 err:
-	unlock(&flash_lock);
+	flash_release(flash);
 	return rc;
 }
 
-- 
2.7.4


From 3ae0772f6f99557b7a1e82759bf457703b99ab90 Mon Sep 17 00:00:00 2001
From: Cyril Bur <cyril.bur@au1.ibm.com>
Date: Fri, 30 Jun 2017 15:37:08 +1000
Subject: [PATCH 04/13] core/flash: Make opal_flash_op() actually asynchronous

This patch provides a simple (although not particularly efficient)
asynchronous capability to the opal_flash interface. The advantage of
this approach is that it doesn't require any changing of blocklevel or
its backends to provide an asynchronous implementation. This is also the
disadvantage of this implementation as all it actually does is break the
work up in chunks that it can performed quickly, but still
synchronously. Only a backend could provide a more asynchronous
implementation.

This solves a problem we have right now with the opal_flash_erase call
where it can block in Skiboot for around three minutes. This causes a
variety of problems in Linux due to a processor being gone for a long
time.
For example:

[   98.610043] INFO: rcu_sched detected stalls on CPUs/tasks:
[   98.610050]         113-...: (1 GPs behind) idle=96f/140000000000000/0 softirq=527/528 fqs=1044
[   98.610051]         (detected by 112, t=2102 jiffies, g=223, c=222, q=123)
[   98.610060] Task dump for CPU 113:
[   98.610062] pflash          R  running task        0  3335   3333 0x00040004
[   98.610066] Call Trace:
[   98.610070] [c000001fdd847730] [0000000000000001] 0x1 (unreliable)
[   98.610076] [c000001fdd847900] [c000000000013854] __switch_to+0x1e8/0x1f4
[   98.610081] [c000001fdd847960] [c0000000006122c4] __schedule+0x32c/0x874
[   98.610083] [c000001fdd847a30] [c000001fdd847b40] 0xc000001fdd847b40

It is for this reason that breaking the work up in smaller chunks solves
this problem as Skiboot can return the CPU to Linux between chunks to
avoid Linux getting upset.

Reported-By: Samuel Mendoza-Jonas <sam@mendozajonas.com>
Signed-off-by: Cyril Bur <cyril.bur@au1.ibm.com>
Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
---
 core/flash.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++++++---------
 core/timer.c |   8 ++++
 2 files changed, 108 insertions(+), 17 deletions(-)

diff --git a/core/flash.c b/core/flash.c
index d97d88e..e98c8e0 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -25,8 +25,24 @@ 
 #include <libstb/trustedboot.h>
 #include <libxz/xz.h>
 #include <elf.h>
+#include <timer.h>
 #include <timebase.h>
 
+enum flash_op {
+	FLASH_OP_READ,
+	FLASH_OP_WRITE,
+	FLASH_OP_ERASE,
+};
+
+struct flash_async_info {
+	enum flash_op op;
+	struct timer poller;
+	uint64_t token;
+	uint64_t pos;
+	uint64_t len;
+	uint64_t buf;
+};
+
 struct flash {
 	struct list_node	list;
 	bool			busy;
@@ -35,6 +51,7 @@  struct flash {
 	uint64_t		size;
 	uint32_t		block_size;
 	int			id;
+	struct flash_async_info async;
 };
 
 static struct {
@@ -234,6 +251,54 @@  static int flash_nvram_probe(struct flash *flash, struct ffs_handle *ffs)
 
 /* core flash support */
 
+/*
+ * Called with flash lock held, drop it on async completion
+ */
+static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unused)
+{
+	struct flash *flash = data;
+	uint64_t offset, buf, len;
+	int rc;
+
+	offset = flash->async.pos;
+	buf = flash->async.buf;
+	len = MIN(flash->async.len, flash->block_size*10);
+	printf("Flash poll op %d len %llu\n", flash->async.op, len);
+
+	switch (flash->async.op) {
+	case FLASH_OP_READ:
+		rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, len);
+		break;
+	case FLASH_OP_WRITE:
+		rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, len);
+		break;
+	case FLASH_OP_ERASE:
+		rc = blocklevel_erase(flash->bl, offset, len);
+		break;
+	default:
+		assert(0);
+	}
+
+	if (rc)
+		rc = OPAL_HARDWARE;
+
+	flash->async.pos += len;
+	flash->async.buf += len;
+	flash->async.len -= len;
+	if (!rc && flash->async.len) {
+		/*
+		 * We want to get called pretty much straight away, just have
+		 * to be sure that we jump back out to Linux so that if this
+		 * very long we don't cause RCU or the scheduler to freak
+		 */
+		schedule_timer(&flash->async.poller, 0);
+		return;
+	}
+
+	opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
+	flash_release(flash);
+}
+
 static struct dt_node *flash_add_dt_node(struct flash *flash, int id)
 {
 	int i;
@@ -389,6 +454,7 @@  int flash_register(struct blocklevel_device *bl)
 	flash->size = size;
 	flash->block_size = block_size;
 	flash->id = num_flashes();
+	init_timer(&flash->async.poller, flash_poll, flash);
 
 	rc = ffs_init(0, flash->size, bl, &ffs, 1);
 	if (rc) {
@@ -417,16 +483,11 @@  int flash_register(struct blocklevel_device *bl)
 	return OPAL_SUCCESS;
 }
 
-enum flash_op {
-	FLASH_OP_READ,
-	FLASH_OP_WRITE,
-	FLASH_OP_ERASE,
-};
-
 static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 		uint64_t buf, uint64_t size, uint64_t token)
 {
 	struct flash *flash = NULL;
+	uint64_t len;
 	int rc;
 
 	list_for_each(&flashes, flash, list)
@@ -445,9 +506,17 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 		prlog(PR_DEBUG, "Requested flash op %d beyond flash size %" PRIu64 "\n",
 				op, flash->size);
 		rc = OPAL_PARAMETER;
-		goto err;
+		goto out;
 	}
 
+	len = MIN(size, flash->block_size*10);
+	printf("Flash op %d len %llu\n", op, len);
+	flash->async.op = op;
+	flash->async.token = token;
+	flash->async.buf = buf + len;
+	flash->async.len = size - len;
+	flash->async.pos = offset + len;
+
 	/*
 	 * These ops intentionally have no smarts (ecc correction or erase
 	 * before write) to them.
@@ -457,29 +526,43 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	 */
 	switch (op) {
 	case FLASH_OP_READ:
-		rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, size);
+		rc = blocklevel_raw_read(flash->bl, offset, (void *)buf, len);
 		break;
 	case FLASH_OP_WRITE:
-		rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, size);
+		rc = blocklevel_raw_write(flash->bl, offset, (void *)buf, len);
 		break;
 	case FLASH_OP_ERASE:
-		rc = blocklevel_erase(flash->bl, offset, size);
+		rc = blocklevel_erase(flash->bl, offset, len);
 		break;
 	default:
 		assert(0);
 	}
 
 	if (rc) {
+		prlog(PR_ERR, "%s: Op %d failed with rc %d\n", __func__, op, rc);
 		rc = OPAL_HARDWARE;
-		goto err;
+		goto out;
 	}
 
-	flash_release(flash);
-
-	opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, token, rc);
-	return OPAL_ASYNC_COMPLETION;
-
-err:
+	if (size - len) {
+		/* Work remains */
+		schedule_timer(&flash->async.poller, 0);
+		/* Don't release the flash */
+		return OPAL_ASYNC_COMPLETION;
+	} else {
+		/*
+		 * As tempting as it might be here to return OPAL_SUCCESS
+		 * here, don't! As of 1/07/2017 the powernv_flash driver in
+		 * Linux will handle OPAL_SUCCESS as an error, the only thing
+		 * that makes it handle things as though they're working is
+		 * receiving OPAL_ASYNC_COMPLETION.
+		 *
+		 * XXX TODO: Revisit this in a few years *sigh*
+		 */
+		opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
+	}
+	rc = OPAL_ASYNC_COMPLETION;
+out:
 	flash_release(flash);
 	return rc;
 }
diff --git a/core/timer.c b/core/timer.c
index 5cf9069..53b6616 100644
--- a/core/timer.c
+++ b/core/timer.c
@@ -210,6 +210,7 @@  static void __check_poll_timers(uint64_t now)
 static void __check_timers(uint64_t now)
 {
 	struct timer *t;
+	uint64_t stop = now + msecs_to_tb(50); /* Run timers for max 5ms */
 
 	for (;;) {
 		t = list_top(&timer_list, struct timer, link);
@@ -240,6 +241,13 @@  static void __check_timers(uint64_t now)
 
 		/* Update time stamp */
 		now = mftb();
+
+		/* Only run timers for a limited time to avoid jitter */
+		if (now > stop) {
+			prlog(PR_TRACE, "Run timers for > 50ms total elapsed time=%lu\n",
+				tb_to_msecs(now - stop)+50);
+			break;
+		}
 	}
 }
 
-- 
2.7.4


From 5151a71adda86008eace8986ef1d8f8688e275e8 Mon Sep 17 00:00:00 2001
From: Stewart Smith <stewart@linux.ibm.com>
Date: Tue, 18 Dec 2018 16:42:29 +1100
Subject: [PATCH 05/13] qemu: 1ms heartbeat time

Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
---
 platforms/qemu/qemu.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/platforms/qemu/qemu.c b/platforms/qemu/qemu.c
index 757c086..71be716 100644
--- a/platforms/qemu/qemu.c
+++ b/platforms/qemu/qemu.c
@@ -42,6 +42,14 @@  static bool qemu_probe_powernv9(void)
 	return qemu_probe_common("qemu,powernv9");
 }
 
+static int qemu_heartbeat_time(void)
+{
+	/*
+	 * Fast polling to make up for lack of SBE timers
+	 */
+	return 1;
+}
+
 static void qemu_init(void)
 {
 	if (!bt_device_present) {
@@ -60,6 +68,7 @@  DECLARE_PLATFORM(qemu) = {
 	.cec_reboot     = astbmc_ipmi_reboot,
 	.start_preload_resource	= flash_start_preload_resource,
 	.resource_loaded	= flash_resource_loaded,
+	.heartbeat_time		= qemu_heartbeat_time,
 	.terminate	= ipmi_terminate,
 };
 
-- 
2.7.4


From fcc2f8bb35743f560cec3655d4f1b31bf95da9b2 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Wed, 29 May 2019 14:47:40 -0500
Subject: [PATCH 06/13] hiomap: Enable Async IPMI messaging

To provide higher layer async operations to access
the target flash, enable hiomap to perform async
ipmi messaging for call paths thru opal_flash_op.

Special considerations and constraints are to prevent
recursive locking and/or polling inside OPAL calls.

Objective is to remove the hiomap_queue_msg_sync for
moving windows (which already uses pollers) to allow
async requests to perform their functions.

Call paths thru opal_flash_op will determine if the
requested operation needs to be re-queued, to allow
skiboot to jump back out to Linux to prevent RCU or
scheduler issues.

For example, if a flash window move operation
is needed to erase a very large flash segment,
the time spent in opal firmware would be long
enough that Linux would stall.  To avoid the
long running duration of various aspects of this
inherent long running operation we divide up
the tasks in smaller chunks to avoid cumulating
time spent in opal firmware.  The return codes
are used to distinguish differences between cases
where a re-queue of the transaction would be
needed versus a true failure return code.

PR_TRACE used since PR_DEBUG seems to always trigger,
unable to figure out why.

Due to the nature of redefining the sync versus
async capabilities, this patch is larger than
desired due to interrelationships of changes
to functionality.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 core/flash.c           | 154 +++++++-
 libflash/blocklevel.h  |   5 +
 libflash/errors.h      |   1 +
 libflash/ipmi-hiomap.c | 955 ++++++++++++++++++++++++++++++++++++++-----------
 libflash/ipmi-hiomap.h |  41 ++-
 5 files changed, 936 insertions(+), 220 deletions(-)

diff --git a/core/flash.c b/core/flash.c
index e98c8e0..98614f7 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -28,6 +28,14 @@ 
 #include <timer.h>
 #include <timebase.h>
 
+/*
+ * need to keep this under the BT queue limit
+ * causes are when ipmi to bmc gets bogged down
+ * delay allows for congestion to clear
+ */
+#define FLASH_RETRY_LIMIT 10
+#define FLASH_SCHEDULE_DELAY 200
+
 enum flash_op {
 	FLASH_OP_READ,
 	FLASH_OP_WRITE,
@@ -41,6 +49,9 @@  struct flash_async_info {
 	uint64_t pos;
 	uint64_t len;
 	uint64_t buf;
+	int retry_counter;
+	int transaction_id;
+	int in_progress_schedule_delay;
 };
 
 struct flash {
@@ -80,13 +91,63 @@  static u32 nvram_offset, nvram_size;
 static bool flash_reserve(struct flash *flash)
 {
 	bool rc = false;
+	int lock_try_counter = 10;
+	uint64_t now;
+	uint64_t start_time;
+	uint64_t wait_time;
+	uint64_t flash_reserve_ticks = 10;
+	uint64_t timeout_counter;
+
+	start_time = mftb();
+	now = mftb();
+	wait_time = tb_to_msecs(now - start_time);
+	timeout_counter = 0;
+
+
+	while (wait_time < flash_reserve_ticks) {
+		++timeout_counter;
+		if (timeout_counter % 4 == 0) {
+			now = mftb();
+			wait_time = tb_to_msecs(now - start_time);
+		}
+		if (flash->busy == false) {
+			break;
+		}
+	}
 
-	if (!try_lock(&flash_lock))
+	while (!try_lock(&flash_lock)) {
+		--lock_try_counter;
+		if (lock_try_counter == 0) {
+			break;
+		}
+	}
+
+	if (lock_try_counter == 0) {
 		return false;
+	}
+
+	/* we have the lock if we got here */
 
 	if (!flash->busy) {
 		flash->busy = true;
 		rc = true;
+	} else {
+		/* probably beat a flash_release and grabbed the lock */
+		unlock(&flash_lock);
+		while (!try_lock(&flash_lock)) {
+			--lock_try_counter;
+			if (lock_try_counter == 0) {
+				break;
+			}
+		}
+		if (lock_try_counter == 0) {
+			return false;
+		}
+		/* we have the lock if we are here */
+		if (!flash->busy) {
+			flash->busy = true;
+			rc = true;
+		}
 	}
 	unlock(&flash_lock);
 
@@ -279,12 +340,31 @@  static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unus
 		assert(0);
 	}
 
-	if (rc)
-		rc = OPAL_HARDWARE;
+	if (rc == 0) {
+		/*
+		 * if we are good to proceed forward
+		 * otherwise we may have to try again
+		 */
+		flash->async.pos += len;
+		flash->async.buf += len;
+		flash->async.len -= len;
+		/* if we good clear */
+		flash->async.retry_counter = 0;
+		/*
+		 * clear the IN_PROGRESS flags
+		 * we only need IN_PROGRESS active on missed_cc
+		 */
+		flash->bl->flags &= IN_PROGRESS_MASK;
+		/* reset time for scheduling gap */
+		flash->async.in_progress_schedule_delay = FLASH_SCHEDULE_DELAY;
+	}
+
+	/*
+	 * corner cases if the move window misses and
+	 * the requested window is split (needing adjustment down) problem
+	 * if timing good on move_cb the world is good
+	 */
 
-	flash->async.pos += len;
-	flash->async.buf += len;
-	flash->async.len -= len;
 	if (!rc && flash->async.len) {
 		/*
 		 * We want to get called pretty much straight away, just have
@@ -293,10 +373,38 @@  static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unus
 		 */
 		schedule_timer(&flash->async.poller, 0);
 		return;
+	} else {
+		if (rc == FLASH_ERR_MISSED_CC) {
+			++flash->async.retry_counter;
+			flash->async.in_progress_schedule_delay += FLASH_SCHEDULE_DELAY;
+			if (flash->async.retry_counter >= FLASH_RETRY_LIMIT) {
+				rc = OPAL_HARDWARE;
+				prlog(PR_TRACE, "flash_poll PROBLEM FLASH_RETRY_LIMIT of %i reached on transaction_id=%i\n",
+					FLASH_RETRY_LIMIT,
+					flash->async.transaction_id);
+			} else {
+				/*
+				 * give the BT time to work and receive response
+				 * throttle back to allow for congestion to clear
+				 * cases observed were complete lack of ipmi response until very late
+				 * or cases immediately following an unaligned window read/move (so slow)
+				 */
+				flash->bl->flags |= IN_PROGRESS;
+				schedule_timer(&flash->async.poller, msecs_to_tb(flash->async.in_progress_schedule_delay));
+				return;
+			}
+		} else {
+			if (rc != 0) {
+				rc = OPAL_HARDWARE;
+			}
+		}
 	}
 
-	opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
+	flash->bl->flags &= IN_PROGRESS_MASK;
+	flash->bl->flags &= ASYNC_REQUIRED_MASK;
+	/* release the flash before we allow next opal entry */
 	flash_release(flash);
+	opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
 }
 
 static struct dt_node *flash_add_dt_node(struct flash *flash, int id)
@@ -454,6 +562,7 @@  int flash_register(struct blocklevel_device *bl)
 	flash->size = size;
 	flash->block_size = block_size;
 	flash->id = num_flashes();
+	flash->async.transaction_id = 0;
 	init_timer(&flash->async.poller, flash_poll, flash);
 
 	rc = ffs_init(0, flash->size, bl, &ffs, 1);
@@ -487,7 +596,7 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 		uint64_t buf, uint64_t size, uint64_t token)
 {
 	struct flash *flash = NULL;
-	uint64_t len;
+	uint64_t len = 0;
 	int rc;
 
 	list_for_each(&flashes, flash, list)
@@ -516,6 +625,10 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	flash->async.buf = buf + len;
 	flash->async.len = size - len;
 	flash->async.pos = offset + len;
+	flash->async.retry_counter = 0;
+	flash->async.in_progress_schedule_delay = FLASH_SCHEDULE_DELAY;
+	flash->bl->flags |= ASYNC_REQUIRED;
+	++flash->async.transaction_id;
 
 	/*
 	 * These ops intentionally have no smarts (ecc correction or erase
@@ -539,8 +652,27 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	}
 
 	if (rc) {
-		prlog(PR_ERR, "%s: Op %d failed with rc %d\n", __func__, op, rc);
-		rc = OPAL_HARDWARE;
+		if (rc == FLASH_ERR_MISSED_CC) {
+			++flash->async.retry_counter;
+			flash->async.buf = buf;
+			flash->async.len = size;
+			flash->async.pos = offset;
+			/* for completeness, opal_flash_op is first time pass so unless the retry limit set to 1 */
+			if (flash->async.retry_counter >= FLASH_RETRY_LIMIT) {
+				rc = OPAL_HARDWARE;
+				prlog(PR_TRACE, "opal_flash_op PROBLEM FLASH_RETRY_LIMIT of %i reached on transaction_id=%i\n",
+					FLASH_RETRY_LIMIT,
+					flash->async.transaction_id);
+				goto out;
+			}
+			flash->bl->flags |= IN_PROGRESS;
+			schedule_timer(&flash->async.poller, msecs_to_tb(FLASH_SCHEDULE_DELAY));
+			/* Don't release the flash */
+			return OPAL_ASYNC_COMPLETION;
+		} else {
+			prlog(PR_ERR, "%s: Op %d failed with rc %d\n", __func__, op, rc);
+			rc = OPAL_HARDWARE;
+		}
 		goto out;
 	}
 
@@ -564,6 +696,8 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	rc = OPAL_ASYNC_COMPLETION;
 out:
 	flash_release(flash);
+	flash->bl->flags &= IN_PROGRESS_MASK;
+	flash->bl->flags &= ASYNC_REQUIRED_MASK;
 	return rc;
 }
 
diff --git a/libflash/blocklevel.h b/libflash/blocklevel.h
index 492918e..63d8690 100644
--- a/libflash/blocklevel.h
+++ b/libflash/blocklevel.h
@@ -18,8 +18,13 @@  struct blocklevel_range {
 	int total_prot;
 };
 
+#define ASYNC_REQUIRED_MASK 0xFFFB
+#define IN_PROGRESS_MASK 0xFFF7
+
 enum blocklevel_flags {
 	WRITE_NEED_ERASE = 1,
+	ASYNC_REQUIRED = 4,
+	IN_PROGRESS = 8,
 };
 
 /*
diff --git a/libflash/errors.h b/libflash/errors.h
index c800ada..c24166d 100644
--- a/libflash/errors.h
+++ b/libflash/errors.h
@@ -21,6 +21,7 @@ 
 #define FLASH_ERR_BAD_READ		15
 #define FLASH_ERR_DEVICE_GONE	16
 #define FLASH_ERR_AGAIN	17
+#define FLASH_ERR_MISSED_CC	18
 
 #ifdef __SKIBOOT__
 #include <skiboot.h>
diff --git a/libflash/ipmi-hiomap.c b/libflash/ipmi-hiomap.c
index 7327b83..08af005 100644
--- a/libflash/ipmi-hiomap.c
+++ b/libflash/ipmi-hiomap.c
@@ -11,6 +11,10 @@ 
 #include <stdbool.h>
 #include <stdint.h>
 #include <string.h>
+#include <lock.h>
+#include <debug_descriptor.h>
+#include <timebase.h>
+#include <cpu.h>
 
 #include <ccan/container_of/container_of.h>
 
@@ -24,7 +28,7 @@  struct ipmi_hiomap_result {
 	int16_t cc;
 };
 
-#define RESULT_INIT(_name, _ctx) struct ipmi_hiomap_result _name = { _ctx, -1 }
+static struct hiomap_v2_create_window *window_parms;
 
 static inline uint32_t blocks_to_bytes(struct ipmi_hiomap *ctx, uint16_t blocks)
 {
@@ -62,9 +66,20 @@  static int hiomap_protocol_ready(struct ipmi_hiomap *ctx)
 	return 0;
 }
 
-static int hiomap_queue_msg_sync(struct ipmi_hiomap *ctx, struct ipmi_msg *msg)
+static int hiomap_queue_msg(struct ipmi_hiomap *ctx, struct ipmi_msg *msg)
 {
 	int rc;
+	int bl_flags;
+
+	lock(&ctx->lock);
+	bl_flags = ctx->bl.flags;
+	unlock(&ctx->lock);
+
+	/*
+	 * during boot caution to stay duration within skiboot/
+	 * no exit re-entry due to poller conflicts with synchronous window moves
+	 * asynchronous usage intended for opal_flash_op and flash_poll paths
+	 */
 
 	/*
 	 * There's an unavoidable TOCTOU race here with the BMC sending an
@@ -73,17 +88,23 @@  static int hiomap_queue_msg_sync(struct ipmi_hiomap *ctx, struct ipmi_msg *msg)
 	 * hiomap_queue_msg_sync() exists to capture the race in a single
 	 * location.
 	 */
-	lock(&ctx->lock);
-	rc = hiomap_protocol_ready(ctx);
-	unlock(&ctx->lock);
-	if (rc) {
-		ipmi_free_msg(msg);
-		return rc;
-	}
 
-	ipmi_queue_msg_sync(msg);
+	if ((opal_booting()) || (!(bl_flags & ASYNC_REQUIRED))) {
+		lock(&ctx->lock);
+		rc = hiomap_protocol_ready(ctx);
+		unlock(&ctx->lock);
+		if (rc) {
+			ipmi_free_msg(msg);
+			return rc;
+		}
+		prlog(PR_TRACE, "%s SENDING SYNC\n", __func__);
+		ipmi_queue_msg_sync(msg);
+	} else {
+		prlog(PR_TRACE, "%s SENDING ASYNC\n", __func__);
+		rc = ipmi_queue_msg(msg);
+	}
 
-	return 0;
+	return rc;
 }
 
 /* Call under ctx->lock */
@@ -100,12 +121,178 @@  static int hiomap_window_valid(struct ipmi_hiomap *ctx, uint64_t pos,
 		return FLASH_ERR_PARM_ERROR;
 	if (pos < ctx->current.cur_pos)
 		return FLASH_ERR_PARM_ERROR;
-	if ((pos + len) > (ctx->current.cur_pos + ctx->current.size))
-		return FLASH_ERR_PARM_ERROR;
+	if ((pos + len) > (ctx->current.cur_pos + ctx->current.size)) {
+		/* we will compensate the proper values in caller */
+		if ((pos + ctx->current.size) <= (ctx->current.cur_pos + ctx->current.size)) {
+			prlog(PR_TRACE, "%s OK pos=%llu "
+				"ctx->current.size=0x%x "
+				"ctx->current.cur_pos=0x%x\n",
+				__func__,
+				pos,
+				ctx->current.size,
+				ctx->current.cur_pos);
+		} else {
+			prlog(PR_TRACE, "%s CHECKING further pos=%llu "
+				"for len=%llu ctx->current.size=0x%x "
+				"ctx->current.cur_pos=0x%x\n",
+				__func__,
+				pos,
+				len,
+				ctx->current.size,
+				ctx->current.cur_pos);
+			if ((pos + ctx->current.adjusted_window_size) <= (ctx->current.cur_pos + ctx->current.size)) {
+				prlog(PR_TRACE, "%s OK use ADJUSTED pos=%llu "
+					"adjusted_len=%i for len=%llu "
+					"ctx->current.size=0x%x "
+					"ctx->current.cur_pos=0x%x\n",
+					__func__,
+					pos,
+					ctx->current.adjusted_window_size,
+					len,
+					ctx->current.size,
+					ctx->current.cur_pos);
+			} else {
+				prlog(PR_TRACE, "%s we need to MOVE the window\n", __func__);
+				return FLASH_ERR_PARM_ERROR;
+			}
+		}
+	}
 
+	prlog(PR_TRACE, "%s ALL GOOD, no move needed\n", __func__);
 	return 0;
 }
 
+static void move_cb(struct ipmi_msg *msg)
+{
+	/*
+	 * we leave the move_cb outside of the ipmi_hiomap_cmd_cb
+	 * based on the command we need to special close the window
+	 */
+
+	struct ipmi_hiomap_result *res = msg->user_data;
+	struct ipmi_hiomap *ctx = res->ctx;
+	/*
+	 * only a few iterations to try for lock/
+	 * contention is probably hiomap_window_move trying to setup again
+	 */
+	int lock_try_counter = 10;
+
+	if ((msg->resp_size != 8) || (msg->cc != IPMI_CC_NO_ERROR) || (msg->data[1] != ctx->inflight_seq)) {
+		prlog(PR_TRACE, "Command %u (4=READ 6=WRITE): move_cb "
+			"Unexpected results to check: response size we "
+			"expect 8 but received %u, ipmi cc=%d "
+			"(should be zero), expected ipmi seq %i but got "
+			"wrong ipmi seq %i\n",
+			msg->data[0],
+			msg->resp_size,
+			msg->cc,
+			ctx->inflight_seq,
+			msg->data[1]);
+		lock(&ctx->lock);
+		ctx->cc = OPAL_HARDWARE;
+		ctx->window_state = closed_window;
+		goto out;
+	} else {
+		prlog(PR_TRACE, "Entered %s for %s window from "
+			"OLD block pos 0x%x for 0x%x bytes at "
+			"lpc_addr 0x%x ipmi seq=%i\n",
+			__func__,
+			(msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW) ? "read" : "write",
+			ctx->current.cur_pos,
+			ctx->current.size,
+			ctx->current.lpc_addr,
+			ctx->inflight_seq);
+	}
+
+	window_parms = (struct hiomap_v2_create_window *)&msg->data[2];
+
+	while (!try_lock(&ctx->lock)) {
+		--lock_try_counter;
+		if (lock_try_counter == 0) {
+			break;
+		}
+	}
+	if (lock_try_counter == 0) {
+		/*
+		 * we cannot get the lock, but update anyway
+		 * because we cannot communicate this completion
+		 * and someone will need to retry
+		 * contention usually with handle_events or window_move
+		 * this code path is the critical path that will open the window
+		 */
+		ctx->window_state = closed_window;
+		ctx->cc = OPAL_PARAMETER;
+		goto out2;
+	}
+
+	/* If here, we got the lock, cc consumed higher up so need in ctx */
+
+	ctx->cc = IPMI_CC_NO_ERROR;
+	ctx->current.lpc_addr =
+		blocks_to_bytes(ctx, le16_to_cpu(window_parms->lpc_addr));
+	ctx->current.size =
+		blocks_to_bytes(ctx, le16_to_cpu(window_parms->size));
+	ctx->current.cur_pos =
+		blocks_to_bytes(ctx, le16_to_cpu(window_parms->offset));
+	/* refresh to current */
+	ctx->current.adjusted_window_size = ctx->current.size;
+
+	/* now that we have moved stuff the values */
+	*ctx->active_size = ctx->requested_len;
+
+	/*
+	 * Is length past the end of the window?
+	 * if this condition happens it can cause the async.retry_counter to fail
+	 */
+	if ((ctx->requested_pos + ctx->requested_len) > (ctx->current.cur_pos + ctx->current.size)) {
+		/*
+		 * Adjust size to meet current window
+		 * active_size goes back to caller,
+		 * but caller may expire and we need to store for future use
+		 */
+		*ctx->active_size = (ctx->current.cur_pos + ctx->current.size) - ctx->requested_pos;
+		ctx->current.adjusted_window_size = (ctx->current.cur_pos + ctx->current.size) - ctx->requested_pos;
+		prlog(PR_TRACE, "%s VALID MOVE ADJUSTMENT "
+			"*ctx->active_size=%llu "
+			"ctx->requested_pos=%llu "
+			"ctx->current.adjusted_window_size=%i\n",
+			__func__,
+			*ctx->active_size,
+			ctx->requested_pos,
+			ctx->current.adjusted_window_size);
+	}
+
+	if (ctx->requested_len != 0 && *ctx->active_size == 0) {
+		prlog(PR_NOTICE, "%s Invalid window properties: len: %llu, size: %llu\n",
+			__func__, ctx->requested_len, *ctx->active_size);
+		ctx->cc = OPAL_PARAMETER;
+		ctx->window_state = closed_window;
+		goto out;
+	}
+
+	if (msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW)
+		ctx->window_state = read_window;
+	else
+		ctx->window_state = write_window;
+
+	prlog(PR_TRACE, "Opened %s window to NEW block pos 0x%x for 0x%x bytes "
+		"at lpc_addr 0x%x ipmi seq=%i active size=%llu "
+		"adjusted_window_size=%i\n",
+		(msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW) ? "read" : "write",
+		ctx->current.cur_pos,
+		ctx->current.size,
+		ctx->current.lpc_addr,
+		ctx->inflight_seq,
+		*ctx->active_size,
+		ctx->current.adjusted_window_size);
+
+out:	prlog(PR_TRACE, "Exiting the move window callback "
+		"transaction ipmi seq=%i\n",
+		ctx->inflight_seq);
+	unlock(&ctx->lock);
+out2:	ipmi_free_msg(msg);
+}
+
 static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 {
 	struct ipmi_hiomap_result *res = msg->user_data;
@@ -125,9 +312,9 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 		return;
 	}
 
-	if (msg->data[1] != ctx->seq) {
+	if (msg->data[1] != ctx->inflight_seq) {
 		prerror("Unmatched sequence number: wanted %u got %u\n",
-			ctx->seq, msg->data[1]);
+			ctx->inflight_seq, msg->data[1]);
 		res->cc = IPMI_ERR_UNSPECIFIED;
 		ipmi_free_msg(msg);
 		return;
@@ -138,6 +325,7 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 	{
 		struct hiomap_v2_info *parms;
 
+		ctx->cc = IPMI_CC_NO_ERROR;
 		if (msg->resp_size != 6) {
 			prerror("%u: Unexpected response size: %u\n", msg->data[0],
 				msg->resp_size);
@@ -162,6 +350,7 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 	{
 		struct hiomap_v2_flash_info *parms;
 
+		ctx->cc = IPMI_CC_NO_ERROR;
 		if (msg->resp_size != 6) {
 			prerror("%u: Unexpected response size: %u\n", msg->data[0],
 				msg->resp_size);
@@ -176,36 +365,6 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 			blocks_to_bytes(ctx, le16_to_cpu(parms->erase_granule));
 		break;
 	}
-	case HIOMAP_C_CREATE_READ_WINDOW:
-	case HIOMAP_C_CREATE_WRITE_WINDOW:
-	{
-		struct hiomap_v2_create_window *parms;
-
-		if (msg->resp_size != 8) {
-			prerror("%u: Unexpected response size: %u\n", msg->data[0],
-				msg->resp_size);
-			res->cc = IPMI_ERR_UNSPECIFIED;
-			break;
-		}
-
-		parms = (struct hiomap_v2_create_window *)&msg->data[2];
-
-		ctx->current.lpc_addr =
-			blocks_to_bytes(ctx, le16_to_cpu(parms->lpc_addr));
-		ctx->current.size =
-			blocks_to_bytes(ctx, le16_to_cpu(parms->size));
-		ctx->current.cur_pos =
-			blocks_to_bytes(ctx, le16_to_cpu(parms->offset));
-
-		lock(&ctx->lock);
-		if (msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW)
-			ctx->window_state = read_window;
-		else
-			ctx->window_state = write_window;
-		unlock(&ctx->lock);
-
-		break;
-	}
 	case HIOMAP_C_MARK_DIRTY:
 	case HIOMAP_C_FLUSH:
 	case HIOMAP_C_ACK:
@@ -215,7 +374,15 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 			prerror("%u: Unexpected response size: %u\n", msg->data[0],
 				msg->resp_size);
 			res->cc = IPMI_ERR_UNSPECIFIED;
+			ctx->cc = OPAL_HARDWARE;
 			break;
+		} else {
+			prlog(PR_TRACE, "%s Command=%u 1=RESET 7=DIRTY 8=FLUSH 9=ACK 10=ERASE ipmi seq=%u ctx->inflight_seq=%u\n",
+				__func__,
+				msg->data[0],
+				msg->data[1],
+				ctx->inflight_seq);
+			ctx->cc = IPMI_CC_NO_ERROR;
 		}
 		break;
 	default:
@@ -237,57 +404,179 @@  static void hiomap_init(struct ipmi_hiomap *ctx)
 	unlock(&ctx->lock);
 }
 
+static int hiomap_wait_for_cc(struct ipmi_hiomap *ctx, int *cc, uint8_t *seq, uint64_t ticks)
+{
+	uint64_t now;
+	uint64_t start_time;
+	uint64_t wait_time;
+	uint64_t ipmi_hiomap_ticks;
+	uint64_t timeout_counter;
+	int rc;
+
+	prlog(PR_TRACE, "Start wait for cc ipmi seq=%i *cc=%i ticks=%llu\n", *seq, *cc, ticks);
+	rc = 0;
+	if (this_cpu()->tb_invalid) {
+		/*
+		 * SYNC paths already have *cc success
+		 * ASYNC will RE-QUEUE and retry
+		 * we just need to skip the tb logic handling
+		 * we need to close the window to have the logic try the move again
+		 */
+		if (*cc != IPMI_CC_NO_ERROR) {
+			lock(&ctx->lock);
+			ctx->window_state = closed_window;
+			++ctx->missed_cc_count;
+			prlog(PR_NOTICE, "%s tb_invalid, CLOSING WINDOW for cc "
+				"ipmi seq=%i ctx->missed_cc_count=%i\n",
+				__func__, *seq, ctx->missed_cc_count);
+			unlock(&ctx->lock);
+			rc = FLASH_ERR_MISSED_CC;
+		}
+		prlog(PR_NOTICE, "%s tb_invalid, hopefully this will "
+			"retry/recover rc=%i\n",
+			__func__, rc);
+		return rc;
+	}
+	start_time = mftb();
+	now = mftb();
+	wait_time = tb_to_msecs(now - start_time);
+	timeout_counter = 0;
+
+	if (ticks != 0) {
+		ipmi_hiomap_ticks = ticks;
+	} else {
+		ipmi_hiomap_ticks = IPMI_HIOMAP_TICKS;
+	}
+
+	prlog(PR_TRACE, "wait_time=%llu ipmi_hiomap_ticks=%llu ipmi seq=%i "
+			"ctx->missed_cc_count=%i\n",
+		wait_time, ticks, *seq, ctx->missed_cc_count);
+	while (wait_time < ipmi_hiomap_ticks) {
+		++timeout_counter;
+		if (timeout_counter % IPMI_SKIP_INC == 0) {
+			now = mftb();
+			wait_time = tb_to_msecs(now - start_time);
+		}
+		if (*cc == IPMI_CC_NO_ERROR) {
+			prlog(PR_TRACE, "Break cc ipmi seq=%i "
+				"ctx->missed_cc_count=%i\n",
+				*seq, ctx->missed_cc_count);
+			break;
+		}
+	}
+	prlog(PR_TRACE, "Status CHECK wait_for_cc wait_time=%llu *cc=%i "
+		"ipmi seq=%i ctx->missed_cc_count=%i\n",
+		wait_time, *cc, *seq, ctx->missed_cc_count);
+	if (*cc != IPMI_CC_NO_ERROR) {
+		lock(&ctx->lock);
+		ctx->window_state = closed_window;
+		++ctx->missed_cc_count;
+		prlog(PR_TRACE, "CLOSING WINDOW for cc ipmi seq=%i "
+			"ctx->missed_cc_count=%i\n",
+			*seq, ctx->missed_cc_count);
+		unlock(&ctx->lock);
+		rc = FLASH_ERR_MISSED_CC;
+	}
+
+	prlog(PR_TRACE, "Stop wait for *cc=%i ipmi seq=%i "
+		"ctx->missed_cc_count=%i\n",
+		*cc, *seq, ctx->missed_cc_count);
+	return rc;
+
+}
+
 static int hiomap_get_info(struct ipmi_hiomap *ctx)
 {
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result info_res;
 	unsigned char req[3];
 	struct ipmi_msg *msg;
+	uint8_t info_seq;
+	int orig_flags;
+	int tmp_sync_flags;
 	int rc;
 
+	info_res.ctx = ctx;
+	info_res.cc = -1;
+
+	lock(&ctx->lock);
+	orig_flags = ctx->bl.flags;
+	/* clear out async to always do sync */
+	tmp_sync_flags = ctx->bl.flags &= ASYNC_REQUIRED_MASK;
+	ctx->bl.flags = tmp_sync_flags;
+	ctx->cc = -1;
+	info_seq = ++ctx->seq;
+	ctx->inflight_seq = info_seq;
+	unlock(&ctx->lock);
+
 	/* Negotiate protocol version 2 */
 	req[0] = HIOMAP_C_GET_INFO;
-	req[1] = ++ctx->seq;
+	req[1] = info_seq;
 	req[2] = HIOMAP_V2;
 
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 6);
+			 ipmi_hiomap_cmd_cb, &info_res, req, sizeof(req), 6);
 
-	rc = hiomap_queue_msg_sync(ctx, msg);
+	rc = hiomap_queue_msg(ctx, msg);
+	lock(&ctx->lock);
+	ctx->bl.flags = orig_flags;
+	unlock(&ctx->lock);
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prerror("%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR; /* XXX: Find something better? */
+	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
+
+	if (rc) {
+		prlog(PR_TRACE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			__func__, rc, IPMI_ACK_DEFAULT);
 	}
 
-	return 0;
+	return rc;
 }
 
 static int hiomap_get_flash_info(struct ipmi_hiomap *ctx)
 {
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result flash_info_res;
 	unsigned char req[2];
 	struct ipmi_msg *msg;
+	uint8_t flash_info_seq;
+	int orig_flags;
+	int tmp_sync_flags;
 	int rc;
 
+	flash_info_res.ctx = ctx;
+	flash_info_res.cc = -1;
+
+	lock(&ctx->lock);
+	orig_flags = ctx->bl.flags;
+	/* clear out async to always do sync */
+	tmp_sync_flags = ctx->bl.flags &= ASYNC_REQUIRED_MASK;
+	ctx->bl.flags = tmp_sync_flags;
+	ctx->cc = -1;
+	flash_info_seq = ++ctx->seq;
+	ctx->inflight_seq = flash_info_seq;
+	unlock(&ctx->lock);
+
 	req[0] = HIOMAP_C_GET_FLASH_INFO;
-	req[1] = ++ctx->seq;
+	req[1] = flash_info_seq;
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2 + 2 + 2);
+			 ipmi_hiomap_cmd_cb, &flash_info_res, req, sizeof(req), 2 + 2 + 2);
 
-	rc = hiomap_queue_msg_sync(ctx, msg);
+	rc = hiomap_queue_msg(ctx, msg);
+	lock(&ctx->lock);
+	ctx->bl.flags = orig_flags;
+	unlock(&ctx->lock);
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prerror("%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR; /* XXX: Find something better? */
+	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
+	if (rc) {
+		prlog(PR_TRACE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			__func__, rc, IPMI_ACK_DEFAULT);
 	}
 
-	return 0;
+	return rc;
 }
 
 static int hiomap_window_move(struct ipmi_hiomap *ctx, uint8_t command,
@@ -295,32 +584,65 @@  static int hiomap_window_move(struct ipmi_hiomap *ctx, uint8_t command,
 {
 	enum lpc_window_state want_state;
 	struct hiomap_v2_range *range;
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result move_res;
 	unsigned char req[6];
 	struct ipmi_msg *msg;
+	uint8_t move_seq;
 	bool valid_state;
 	bool is_read;
 	int rc;
 
+	move_res.ctx = ctx;
+	move_res.cc = -1;
 	is_read = (command == HIOMAP_C_CREATE_READ_WINDOW);
 	want_state = is_read ? read_window : write_window;
 
+	/* there will be lock contention between hiomap_window_move and move_cb */
+
 	lock(&ctx->lock);
+	ctx->cc = -1;
+
+	if (ctx->bl.flags & IN_PROGRESS) {
+		pos = ctx->tracking_pos;
+		len = ctx->tracking_len;
+	} else {
+		ctx->tracking_pos = pos;
+		ctx->tracking_len = len;
+	}
 
 	valid_state = want_state == ctx->window_state;
 	rc = hiomap_window_valid(ctx, pos, len);
+
 	if (valid_state && !rc) {
+		/* if its valid stuff the proper maybe modified size */
+		if ((pos + len) > (ctx->current.cur_pos + ctx->current.size)) {
+			/* if we had bumped the adjusted_window_size down in move_cb */
+			if ((ctx->current.adjusted_window_size != ctx->current.size)) {
+				*size = ctx->current.adjusted_window_size;
+			} else {
+				*size = (ctx->current.cur_pos + ctx->current.size) - pos;
+			}
+		} else {
+			*size = len;
+		}
+		ctx->cc = IPMI_CC_NO_ERROR;
 		unlock(&ctx->lock);
-		*size = len;
 		return 0;
 	}
 
-	ctx->window_state = closed_window;
+	ctx->window_state = moving_window;
 
-	unlock(&ctx->lock);
+	ctx->active_size = size;
+	ctx->requested_pos = pos;
+	ctx->requested_len = len;
+
+	move_seq = ++ctx->seq;
+	ctx->inflight_seq = move_seq;
 
 	req[0] = command;
-	req[1] = ++ctx->seq;
+	req[1] = move_seq;
+
+	unlock(&ctx->lock);
 
 	range = (struct hiomap_v2_range *)&req[2];
 	range->offset = cpu_to_le16(bytes_to_blocks(ctx, pos));
@@ -328,38 +650,14 @@  static int hiomap_window_move(struct ipmi_hiomap *ctx, uint8_t command,
 
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req),
+			 move_cb, &move_res, req, sizeof(req),
 			 2 + 2 + 2 + 2);
 
-	rc = hiomap_queue_msg_sync(ctx, msg);
+	rc = hiomap_queue_msg(ctx, msg);
+
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prlog(PR_INFO, "%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR; /* XXX: Find something better? */
-	}
-
-	lock(&ctx->lock);
-	*size = len;
-	/* Is length past the end of the window? */
-	if ((pos + len) > (ctx->current.cur_pos + ctx->current.size))
-		/* Adjust size to meet current window */
-		*size = (ctx->current.cur_pos + ctx->current.size) - pos;
-
-	if (len != 0 && *size == 0) {
-		unlock(&ctx->lock);
-		prerror("Invalid window properties: len: %"PRIu64", size: %"PRIu64"\n",
-			len, *size);
-		return FLASH_ERR_PARM_ERROR;
-	}
-
-	prlog(PR_DEBUG, "Opened %s window from 0x%x for %u bytes at 0x%x\n",
-	      (command == HIOMAP_C_CREATE_READ_WINDOW) ? "read" : "write",
-	      ctx->current.cur_pos, ctx->current.size, ctx->current.lpc_addr);
-
-	unlock(&ctx->lock);
-
 	return 0;
 }
 
@@ -368,21 +666,27 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 {
 	struct hiomap_v2_range *range;
 	enum lpc_window_state state;
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result dirty_res;
 	unsigned char req[6];
 	struct ipmi_msg *msg;
+	uint8_t dirty_seq;
 	uint32_t pos;
 	int rc;
 
+	dirty_res.ctx = ctx;
+	dirty_res.cc = -1;
 	lock(&ctx->lock);
 	state = ctx->window_state;
+	dirty_seq = ++ctx->seq;
+	ctx->inflight_seq = dirty_seq;
+	ctx->cc = -1;
 	unlock(&ctx->lock);
 
 	if (state != write_window)
 		return FLASH_ERR_PARM_ERROR;
 
 	req[0] = HIOMAP_C_MARK_DIRTY;
-	req[1] = ++ctx->seq;
+	req[1] = dirty_seq;
 
 	pos = offset - ctx->current.cur_pos;
 	range = (struct hiomap_v2_range *)&req[2];
@@ -391,19 +695,15 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2);
+			 ipmi_hiomap_cmd_cb, &dirty_res, req, sizeof(req), 2);
+
+	rc = hiomap_queue_msg(ctx, msg);
 
-	rc = hiomap_queue_msg_sync(ctx, msg);
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prerror("%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR;
-	}
-
 	prlog(PR_DEBUG, "Marked flash dirty at 0x%" PRIx64 " for %" PRIu64 "\n",
-	      offset, size);
+		offset, size);
 
 	return 0;
 }
@@ -411,34 +711,36 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 static int hiomap_flush(struct ipmi_hiomap *ctx)
 {
 	enum lpc_window_state state;
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result flush_res;
 	unsigned char req[2];
 	struct ipmi_msg *msg;
+	uint8_t flush_seq;
 	int rc;
 
+	flush_res.ctx = ctx;
+	flush_res.cc = -1;
 	lock(&ctx->lock);
 	state = ctx->window_state;
+	flush_seq = ++ctx->seq;
+	ctx->inflight_seq = flush_seq;
+	ctx->cc = -1;
 	unlock(&ctx->lock);
 
 	if (state != write_window)
 		return FLASH_ERR_PARM_ERROR;
 
 	req[0] = HIOMAP_C_FLUSH;
-	req[1] = ++ctx->seq;
+	req[1] = flush_seq;
 
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2);
+			 ipmi_hiomap_cmd_cb, &flush_res, req, sizeof(req), 2);
+
+	rc = hiomap_queue_msg(ctx, msg);
 
-	rc = hiomap_queue_msg_sync(ctx, msg);
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prerror("%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR;
-	}
-
 	prlog(PR_DEBUG, "Flushed writes\n");
 
 	return 0;
@@ -446,26 +748,47 @@  static int hiomap_flush(struct ipmi_hiomap *ctx)
 
 static int hiomap_ack(struct ipmi_hiomap *ctx, uint8_t ack)
 {
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result ack_res;
 	unsigned char req[3];
 	struct ipmi_msg *msg;
+	uint8_t ack_seq;
+	int orig_flags;
+	int tmp_sync_flags;
 	int rc;
 
+	ack_res.ctx = ctx;
+	ack_res.cc = -1;
+
+	lock(&ctx->lock);
+	orig_flags = ctx->bl.flags;
+	/* clear out async to always do sync */
+	tmp_sync_flags = ctx->bl.flags &= ASYNC_REQUIRED_MASK;
+	ctx->bl.flags = tmp_sync_flags;
+	ctx->cc = -1;
+	ack_seq = ++ctx->seq;
+	ctx->inflight_seq = ack_seq;
+	unlock(&ctx->lock);
+
 	req[0] = HIOMAP_C_ACK;
-	req[1] = ++ctx->seq;
+	req[1] = ack_seq;
 	req[2] = ack;
 
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2);
+			 ipmi_hiomap_cmd_cb, &ack_res, req, sizeof(req), 2);
 
-	rc = hiomap_queue_msg_sync(ctx, msg);
+	rc = hiomap_queue_msg(ctx, msg);
+	lock(&ctx->lock);
+	ctx->bl.flags = orig_flags;
+	unlock(&ctx->lock);
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prlog(PR_DEBUG, "%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR;
+	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
+	if (rc) {
+		prlog(PR_TRACE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			__func__, rc, IPMI_ACK_DEFAULT);
+		return rc;
 	}
 
 	prlog(PR_DEBUG, "Acked events: 0x%x\n", ack);
@@ -478,21 +801,27 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 {
 	struct hiomap_v2_range *range;
 	enum lpc_window_state state;
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result erase_res;
 	unsigned char req[6];
 	struct ipmi_msg *msg;
+	uint8_t erase_seq;
 	uint32_t pos;
 	int rc;
 
+	erase_res.ctx = ctx;
+	erase_res.cc = -1;
 	lock(&ctx->lock);
 	state = ctx->window_state;
+	erase_seq = ++ctx->seq;
+	ctx->inflight_seq = erase_seq;
+	ctx->cc = -1;
 	unlock(&ctx->lock);
 
 	if (state != write_window)
 		return FLASH_ERR_PARM_ERROR;
 
 	req[0] = HIOMAP_C_ERASE;
-	req[1] = ++ctx->seq;
+	req[1] = erase_seq;
 
 	pos = offset - ctx->current.cur_pos;
 	range = (struct hiomap_v2_range *)&req[2];
@@ -501,16 +830,13 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2);
-	rc = hiomap_queue_msg_sync(ctx, msg);
+			 ipmi_hiomap_cmd_cb, &erase_res, req, sizeof(req), 2);
+
+	rc = hiomap_queue_msg(ctx, msg);
+
 	if (rc)
 		return rc;
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prerror("%s failed: %d\n", __func__, res.cc);
-		return FLASH_ERR_PARM_ERROR;
-	}
-
 	prlog(PR_DEBUG, "Erased flash at 0x%" PRIx64 " for %" PRIu64 "\n",
 	      offset, size);
 
@@ -519,24 +845,53 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 
 static bool hiomap_reset(struct ipmi_hiomap *ctx)
 {
-	RESULT_INIT(res, ctx);
+	static struct ipmi_hiomap_result reset_res;
 	unsigned char req[2];
 	struct ipmi_msg *msg;
+	uint8_t reset_seq;
+	int orig_flags;
+	int tmp_sync_flags;
+	int rc;
 
-	prlog(PR_NOTICE, "Reset\n");
+	prlog(PR_NOTICE, "%s Reset ENTRY\n", __func__);
+	reset_res.ctx = ctx;
+	reset_res.cc = -1;
+
+	lock(&ctx->lock);
+	orig_flags = ctx->bl.flags;
+	/* clear out async to always do sync */
+	tmp_sync_flags = ctx->bl.flags &= ASYNC_REQUIRED_MASK;
+	ctx->bl.flags = tmp_sync_flags;
+	reset_seq = ++ctx->seq;
+	ctx->cc = -1;
+	ctx->inflight_seq = reset_seq;
+	unlock(&ctx->lock);
 
 	req[0] = HIOMAP_C_RESET;
-	req[1] = ++ctx->seq;
+	req[1] = reset_seq;
 	msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE,
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
-			 ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2);
-	ipmi_queue_msg_sync(msg);
+			 ipmi_hiomap_cmd_cb, &reset_res, req, sizeof(req), 2);
+
+	rc = hiomap_queue_msg(ctx, msg);
+	lock(&ctx->lock);
+	ctx->bl.flags = orig_flags;
+	unlock(&ctx->lock);
+
+	if (rc) {
+		prlog(PR_NOTICE, "%s reset queue msg failed: rc=%d\n", __func__, rc);
+		return false;
+	}
 
-	if (res.cc != IPMI_CC_NO_ERROR) {
-		prlog(PR_ERR, "%s failed: %d\n", __func__, res.cc);
+	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
+
+	if (rc) {
+		prlog(PR_NOTICE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			__func__, rc, IPMI_ACK_DEFAULT);
 		return false;
 	}
 
+	prlog(PR_NOTICE, "%s Reset EXIT\n", __func__);
 	return true;
 }
 
@@ -666,6 +1021,7 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	 * Therefore it is enough to mark the window as closed to consider it
 	 * recovered.
 	 */
+
 	if (status & (HIOMAP_E_PROTOCOL_RESET | HIOMAP_E_WINDOW_RESET))
 		ctx->window_state = closed_window;
 
@@ -737,8 +1093,9 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 			    void *buf, uint64_t len)
 {
 	struct ipmi_hiomap *ctx;
-	uint64_t size;
-	int rc = 0;
+	enum lpc_window_state state;
+	static uint64_t size;
+	int rc;
 
 	/* LPC is only 32bit */
 	if (pos > UINT_MAX || len > UINT_MAX)
@@ -746,88 +1103,208 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 
 	ctx = container_of(bl, struct ipmi_hiomap, bl);
 
+	lock(&ctx->transaction_lock);
+
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc)
-		return rc;
+		goto out;
+
+	lock(&ctx->lock);
+	if (ctx->bl.flags & IN_PROGRESS) {
+		buf = ctx->tracking_buf;
+		pos = ctx->tracking_pos;
+		len = ctx->tracking_len;
+	} else {
+		ctx->tracking_buf = buf;
+		ctx->tracking_pos = 0;
+		ctx->tracking_len = 0;
+	}
+	unlock(&ctx->lock);
 
 	prlog(PR_TRACE, "Flash read at %#" PRIx64 " for %#" PRIx64 "\n", pos,
 	      len);
 	while (len > 0) {
-		/* Move window and get a new size to read */
-		rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_READ_WINDOW, pos,
-				        len, &size);
-		if (rc)
-			return rc;
-
-		/* Perform the read for this window */
-		rc = lpc_window_read(ctx, pos, buf, size);
-		if (rc)
-			return rc;
-
-		/* Check we can trust what we read */
 		lock(&ctx->lock);
-		rc = hiomap_window_valid(ctx, pos, size);
+		state = ctx->window_state;
 		unlock(&ctx->lock);
-		if (rc)
-			return rc;
+		if (state != moving_window) {
+			/* Move window and get a new size to read */
+			rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_READ_WINDOW, pos,
+				len, &size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_window_move failed: rc=%d\n",
+					__func__, rc);
+				goto out;
+			}
+		} else {
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_HIOMAP_TICKS_DEFAULT);
+			if (rc) {
+				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_HIOMAP_TICKS_DEFAULT);
+				goto out;
+			}
+		}
 
-		len -= size;
-		pos += size;
-		buf += size;
+		lock(&ctx->lock);
+		state = ctx->window_state;
+		unlock(&ctx->lock);
+		if (state == read_window) {
+			/*
+			 * don't lock in case move_cb in progress
+			 * if we get here the state is good
+			 * just double-checking
+			 */
+			if (ctx->cc != IPMI_CC_NO_ERROR) {
+				prlog(PR_NOTICE, "%s failed: cc=%d\n", __func__, ctx->cc);
+				rc = OPAL_HARDWARE;
+				goto out;
+			}
+			/* Perform the read for this window */
+			rc = lpc_window_read(ctx, pos, buf, size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s lpc_window_read failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+
+			/* Check we can trust what we read */
+			lock(&ctx->lock);
+			rc = hiomap_window_valid(ctx, pos, size);
+			unlock(&ctx->lock);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_window_valid failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+
+			len -= size;
+			pos += size;
+			buf += size;
+			lock(&ctx->lock);
+			ctx->tracking_len = len;
+			ctx->tracking_pos = pos;
+			ctx->tracking_buf = buf;
+			unlock(&ctx->lock);
+		}
 	}
-	return rc;
 
+out:	unlock(&ctx->transaction_lock);
+	return rc;
 }
 
 static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 			     const void *buf, uint64_t len)
 {
 	struct ipmi_hiomap *ctx;
-	uint64_t size;
-	int rc = 0;
+	enum lpc_window_state state;
+	static uint64_t size;
+	int rc;
 
 	/* LPC is only 32bit */
 	if (pos > UINT_MAX || len > UINT_MAX)
 		return FLASH_ERR_PARM_ERROR;
 
 	ctx = container_of(bl, struct ipmi_hiomap, bl);
+	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc)
-		return rc;
+		goto out;
+
+	lock(&ctx->lock);
+	if (ctx->bl.flags & IN_PROGRESS) {
+		buf = ctx->tracking_buf;
+		pos = ctx->tracking_pos;
+		len = ctx->tracking_len;
+	} else {
+		ctx->tracking_buf = (void *) buf;
+		ctx->tracking_pos = 0;
+		ctx->tracking_len = 0;
+	}
+	unlock(&ctx->lock);
 
 	prlog(PR_TRACE, "Flash write at %#" PRIx64 " for %#" PRIx64 "\n", pos,
 	      len);
 	while (len > 0) {
-		/* Move window and get a new size to read */
-		rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos,
-				        len, &size);
-		if (rc)
-			return rc;
-
-		/* Perform the write for this window */
-		rc = lpc_window_write(ctx, pos, buf, size);
-		if (rc)
-			return rc;
-
-		rc = hiomap_mark_dirty(ctx, pos, size);
-		if (rc)
-			return rc;
+		lock(&ctx->lock);
+		state = ctx->window_state;
+		unlock(&ctx->lock);
+		if (state != moving_window) {
+			/* Move window and get a new size to read */
+			rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos,
+					        len, &size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_window_move failed: rc=%d\n",
+					__func__, rc);
+				goto out;
+			}
+		} else {
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
+			if (rc) {
+				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_LONG_TICKS);
+				goto out;
+			}
+		}
 
-		/*
-		 * The BMC *should* flush if the window is implicitly closed,
-		 * but do an explicit flush here to be sure.
-		 *
-		 * XXX: Removing this could improve performance
-		 */
-		rc = hiomap_flush(ctx);
-		if (rc)
-			return rc;
+		lock(&ctx->lock);
+		state = ctx->window_state;
+		unlock(&ctx->lock);
 
-		len -= size;
-		pos += size;
-		buf += size;
+		if (state == write_window) {
+			if (ctx->cc != IPMI_CC_NO_ERROR) {
+				prlog(PR_NOTICE, "%s failed: cc=%d\n", __func__, ctx->cc);
+				rc = OPAL_HARDWARE;
+				goto out;
+			}
+
+			/* Perform the write for this window */
+			rc = lpc_window_write(ctx, pos, buf, size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s lpc_window_write failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+
+			rc = hiomap_mark_dirty(ctx, pos, size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_mark_dirty failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
+			if (rc) {
+				prlog(PR_TRACE, "%s dirty hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_LONG_TICKS);
+				goto out;
+			}
+
+			/*
+			 * The BMC *should* flush if the window is implicitly closed,
+			 * but do an explicit flush here to be sure.
+			 *
+			 * XXX: Removing this could improve performance
+			 */
+			rc = hiomap_flush(ctx);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_flush failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
+			if (rc) {
+				prlog(PR_TRACE, "%s flush hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_LONG_TICKS);
+				goto out;
+			}
+
+			len -= size;
+			pos += size;
+			buf += size;
+			lock(&ctx->lock);
+			ctx->tracking_len = len;
+			ctx->tracking_pos = pos;
+			ctx->tracking_buf = (void *) buf;
+			unlock(&ctx->lock);
+		}
 	}
+
+out:	unlock(&ctx->transaction_lock);
 	return rc;
 }
 
@@ -835,6 +1312,8 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 			     uint64_t len)
 {
 	struct ipmi_hiomap *ctx;
+	enum lpc_window_state state;
+	static uint64_t size;
 	int rc;
 
 	/* LPC is only 32bit */
@@ -842,39 +1321,94 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 		return FLASH_ERR_PARM_ERROR;
 
 	ctx = container_of(bl, struct ipmi_hiomap, bl);
+	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc)
-		return rc;
+		goto out;
+
+	lock(&ctx->lock);
+	if (ctx->bl.flags & IN_PROGRESS) {
+		pos = ctx->tracking_pos;
+		len = ctx->tracking_len;
+	} else {
+		ctx->tracking_pos = 0;
+		ctx->tracking_len = 0;
+	}
+	unlock(&ctx->lock);
 
 	prlog(PR_TRACE, "Flash erase at 0x%08x for 0x%08x\n", (u32) pos,
 	      (u32) len);
 	while (len > 0) {
-		uint64_t size;
-
-		/* Move window and get a new size to erase */
-		rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos,
-				        len, &size);
-		if (rc)
-			return rc;
-
-		rc = hiomap_erase(ctx, pos, size);
-		if (rc)
-			return rc;
-
-		/*
-		 * Flush directly, don't mark that region dirty otherwise it
-		 * isn't clear if a write happened there or not
-		 */
-		rc = hiomap_flush(ctx);
-		if (rc)
-			return rc;
+		lock(&ctx->lock);
+		state = ctx->window_state;
+		unlock(&ctx->lock);
+		if (state != moving_window) {
+			/* Move window and get a new size to erase */
+			rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos,
+					        len, &size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_window_move failed: rc=%d\n",
+					__func__, rc);
+				goto out;
+			}
+		} else {
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
+			if (rc) {
+				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_LONG_TICKS);
+				goto out;
+			}
+		}
 
-		len -= size;
-		pos += size;
+		lock(&ctx->lock);
+		state = ctx->window_state;
+		unlock(&ctx->lock);
+		if (state == write_window) {
+			if (ctx->cc != IPMI_CC_NO_ERROR) {
+				prlog(PR_NOTICE, "%s failed: cc=%d\n", __func__, ctx->cc);
+				rc = OPAL_HARDWARE;
+				goto out;
+			}
+			rc = hiomap_erase(ctx, pos, size);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_erase failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
+			if (rc) {
+				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_LONG_TICKS);
+				goto out;
+			}
+
+			/*
+			 * Flush directly, don't mark that region dirty otherwise it
+			 * isn't clear if a write happened there or not
+			 */
+			rc = hiomap_flush(ctx);
+			if (rc) {
+				prlog(PR_NOTICE, "%s hiomap_flush failed: rc=%d\n", __func__, rc);
+				goto out;
+			}
+			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
+			if (rc) {
+				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					__func__, rc, IPMI_LONG_TICKS);
+				goto out;
+			}
+
+			len -= size;
+			pos += size;
+			lock(&ctx->lock);
+			ctx->tracking_len = len;
+			ctx->tracking_pos = pos;
+			unlock(&ctx->lock);
+		}
 	}
 
-	return 0;
+out:	unlock(&ctx->transaction_lock);
+	return rc;
 }
 
 static int ipmi_hiomap_get_flash_info(struct blocklevel_device *bl,
@@ -885,6 +1419,7 @@  static int ipmi_hiomap_get_flash_info(struct blocklevel_device *bl,
 	int rc;
 
 	ctx = container_of(bl, struct ipmi_hiomap, bl);
+	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc)
@@ -903,6 +1438,7 @@  static int ipmi_hiomap_get_flash_info(struct blocklevel_device *bl,
 	if (erase_granule)
 		*erase_granule = ctx->erase_granule;
 
+	unlock(&ctx->transaction_lock);
 	return 0;
 }
 
@@ -925,6 +1461,7 @@  int ipmi_hiomap_init(struct blocklevel_device **bl)
 		return FLASH_ERR_MALLOC_FAILED;
 
 	init_lock(&ctx->lock);
+	init_lock(&ctx->transaction_lock);
 
 	ctx->bl.read = &ipmi_hiomap_read;
 	ctx->bl.write = &ipmi_hiomap_write;
diff --git a/libflash/ipmi-hiomap.h b/libflash/ipmi-hiomap.h
index 489d55e..4870fc5 100644
--- a/libflash/ipmi-hiomap.h
+++ b/libflash/ipmi-hiomap.h
@@ -10,12 +10,36 @@ 
 
 #include "blocklevel.h"
 
-enum lpc_window_state { closed_window, read_window, write_window };
+/*
+ * we basically check for a quick response
+ * otherwise we catch the updated window in the next cycle
+ */
+#define IPMI_HIOMAP_TICKS 5
+#define IPMI_HIOMAP_TICKS_DEFAULT 0
+
+/* time to wait for write/erase/dirty ops */
+#define IPMI_LONG_TICKS 500
+
+/*
+ * default for ack'ing typically 1-10 wait_time's
+ * allow upper bounds because if we cannot ack
+ * we make no forward progress post protocol reset
+ * async paths will retry
+ * sync paths always hit with zero wait_time elapsed
+ * with ASYNC_REQUIRED_MASK'd out, this is not used
+ */
+#define IPMI_ACK_DEFAULT 500
+
+/* increment to skip the waiting loop */
+#define IPMI_SKIP_INC 2
+
+enum lpc_window_state { closed_window, read_window, write_window, moving_window };
 
 struct lpc_window {
 	uint32_t lpc_addr; /* Offset into LPC space */
 	uint32_t cur_pos;  /* Current position of the window in the flash */
 	uint32_t size;     /* Size of the window into the flash */
+	uint32_t adjusted_window_size; /* store adjusted window size */
 };
 
 struct ipmi_hiomap {
@@ -35,6 +59,21 @@  struct ipmi_hiomap {
 	 * three variables are protected by lock to avoid conflict.
 	 */
 	struct lock lock;
+	struct lock transaction_lock;
+
+	/* callers transaction info */
+	uint64_t *active_size;
+	uint64_t requested_len;
+	uint64_t requested_pos;
+	uint64_t tracking_len;
+	uint64_t tracking_pos;
+	void *tracking_buf;
+
+	int missed_cc_count;
+	int cc;
+	/* inflight_seq used to aide debug */
+	/* with other OPAL ipmi msg's      */
+	uint8_t inflight_seq;
 	uint8_t bmc_state;
 	enum lpc_window_state window_state;
 };
-- 
2.7.4


From c5c03a2e9d9017f962883f89e98bdda7ff43fd06 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Tue, 6 Aug 2019 13:16:27 -0500
Subject: [PATCH 07/13] lpc: Fix lpc probe irqmask handling

Properly handle the irqmask from the callers
of lpc_probe_prepare and lpc_probe_test.

lpc_probe_prepare reads the existing irqmask
which is then later used to restore after
lpc_probe_test clears the irqmask.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 hw/lpc.c | 27 +++++++++++----------------
 1 file changed, 11 insertions(+), 16 deletions(-)

diff --git a/hw/lpc.c b/hw/lpc.c
index 354d2b4..fedceb4 100644
--- a/hw/lpc.c
+++ b/hw/lpc.c
@@ -461,30 +461,28 @@  static const struct lpc_error_entry lpc_error_table[] = {
 	LPC_ERROR(LPC_HC_IRQ_SYNC_ABNORM_ERR, OPAL_WRONG_STATE, "Got SYNC abnormal error."),
 };
 
-static int64_t lpc_probe_prepare(struct lpcm *lpc)
+static int64_t lpc_probe_prepare(struct lpcm *lpc, uint32_t *irqmask)
 {
 	const uint32_t irqmask_addr = lpc_reg_opb_base + LPC_HC_IRQMASK;
 	const uint32_t irqstat_addr = lpc_reg_opb_base + LPC_HC_IRQSTAT;
-	uint32_t irqmask;
 	int rc;
 
-	rc = opb_read(lpc, irqmask_addr, &irqmask, 4);
+	rc = opb_read(lpc, irqmask_addr, irqmask, 4);
 	if (rc)
 		return rc;
 
-	irqmask &= ~LPC_HC_IRQ_SYNC_NORESP_ERR;
-	rc = opb_write(lpc, irqmask_addr, irqmask, 4);
+	rc = opb_write(lpc, irqmask_addr, 0, 4);
 	if (rc)
 		return rc;
 
 	return opb_write(lpc, irqstat_addr, LPC_HC_IRQ_SYNC_NORESP_ERR, 4);
 }
 
-static int64_t lpc_probe_test(struct lpcm *lpc)
+static int64_t lpc_probe_test(struct lpcm *lpc, uint32_t irqmask)
 {
 	const uint32_t irqmask_addr = lpc_reg_opb_base + LPC_HC_IRQMASK;
 	const uint32_t irqstat_addr = lpc_reg_opb_base + LPC_HC_IRQSTAT;
-	uint32_t irqmask, irqstat;
+	uint32_t irqstat;
 	int64_t idx;
 	int rc;
 
@@ -496,11 +494,6 @@  static int64_t lpc_probe_test(struct lpcm *lpc)
 	if (rc)
 		return rc;
 
-	rc = opb_read(lpc, irqmask_addr, &irqmask, 4);
-	if (rc)
-		return rc;
-
-	irqmask |= LPC_HC_IRQ_SYNC_NORESP_ERR;
 	rc = opb_write(lpc, irqmask_addr, irqmask, 4);
 	if (rc)
 		return rc;
@@ -525,11 +518,12 @@  static int64_t __lpc_write(struct lpcm *lpc, enum OpalLPCAddressType addr_type,
 			   bool probe)
 {
 	uint32_t opb_base;
+	uint32_t irqmask;
 	int64_t rc;
 
 	lock(&lpc->lock);
 	if (probe) {
-		rc = lpc_probe_prepare(lpc);
+		rc = lpc_probe_prepare(lpc, &irqmask);
 		if (rc)
 			goto bail;
 	}
@@ -548,7 +542,7 @@  static int64_t __lpc_write(struct lpcm *lpc, enum OpalLPCAddressType addr_type,
 		goto bail;
 
 	if (probe)
-		rc = lpc_probe_test(lpc);
+		rc = lpc_probe_test(lpc, irqmask);
  bail:
 	unlock(&lpc->lock);
 	return rc;
@@ -612,11 +606,12 @@  static int64_t __lpc_read(struct lpcm *lpc, enum OpalLPCAddressType addr_type,
 			  bool probe)
 {
 	uint32_t opb_base;
+	uint32_t irqmask;
 	int64_t rc;
 
 	lock(&lpc->lock);
 	if (probe) {
-		rc = lpc_probe_prepare(lpc);
+		rc = lpc_probe_prepare(lpc, &irqmask);
 		if (rc)
 			goto bail;
 	}
@@ -635,7 +630,7 @@  static int64_t __lpc_read(struct lpcm *lpc, enum OpalLPCAddressType addr_type,
 		goto bail;
 
 	if (probe)
-		rc = lpc_probe_test(lpc);
+		rc = lpc_probe_test(lpc, irqmask);
  bail:
 	unlock(&lpc->lock);
 	return rc;
-- 
2.7.4


From 5cf004354e76755303637d00bc0e47768d3e0aa6 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Tue, 6 Aug 2019 13:33:40 -0500
Subject: [PATCH 08/13] hw/bt: Add some more queue debug

Enable some more BT_QUEUE_DEBUG.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 hw/bt.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/hw/bt.c b/hw/bt.c
index 2f3143f..824a1f2 100644
--- a/hw/bt.c
+++ b/hw/bt.c
@@ -463,8 +463,12 @@  static void bt_send_and_unlock(void)
 	 * Timeouts and retries happen in bt_expire_old_msg()
 	 * called from bt_poll()
 	 */
-	if (bt_idle() && inflight_bt_msg->send_count == 0)
+	if (bt_idle() && inflight_bt_msg->send_count == 0) {
+#if BT_QUEUE_DEBUG
+		BT_Q_DBG(inflight_bt_msg, "Sending inflight_bt_msg NOW");
+#endif
 		bt_send_msg(inflight_bt_msg);
+	}
 
 out_unlock:
 	unlock(&bt.lock);
-- 
2.7.4


From 00ed7a3fb5dfa17d3896d97b6d7266162c17c6ab Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Wed, 4 Sep 2019 12:46:05 -0500
Subject: [PATCH 09/13] core/flash: Update some logging

Update some logging to help in problem determination.

PR_TRACE used since PR_DEBUG seems to always trigger,
unable to figure out why.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 core/flash.c | 37 ++++++++++++++++++++++++++++++++-----
 1 file changed, 32 insertions(+), 5 deletions(-)

diff --git a/core/flash.c b/core/flash.c
index 98614f7..579af7b 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -40,8 +40,11 @@  enum flash_op {
 	FLASH_OP_READ,
 	FLASH_OP_WRITE,
 	FLASH_OP_ERASE,
+	FLASH_MAX
 };
 
+const char flash_string[FLASH_MAX][10]={"READ", "WRITE", "ERASE"};
+
 struct flash_async_info {
 	enum flash_op op;
 	struct timer poller;
@@ -324,7 +327,8 @@  static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unus
 	offset = flash->async.pos;
 	buf = flash->async.buf;
 	len = MIN(flash->async.len, flash->block_size*10);
-	printf("Flash poll op %d len %llu\n", flash->async.op, len);
+	prlog(PR_TRACE, "%s flash_poll transaction_id=%i flash->bl->flags=%i Async WORKING chunk len=%llu offset=%llu buf=%p\n",
+		flash_string[flash->async.op], flash->async.transaction_id, flash->bl->flags, len, offset, (void *)buf);
 
 	switch (flash->async.op) {
 	case FLASH_OP_READ:
@@ -371,6 +375,8 @@  static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unus
 		 * to be sure that we jump back out to Linux so that if this
 		 * very long we don't cause RCU or the scheduler to freak
 		 */
+		prlog(PR_TRACE, "%s flash_poll transaction_id=%i Async work REMAINS working chunk len=%llu pos=%llu buf=%p\n",
+			flash_string[flash->async.op], flash->async.transaction_id, flash->async.len, flash->async.pos, (void *)flash->async.buf);
 		schedule_timer(&flash->async.poller, 0);
 		return;
 	} else {
@@ -390,6 +396,8 @@  static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unus
 				 * or cases immediately following an unaligned window read/move (so slow)
 				 */
 				flash->bl->flags |= IN_PROGRESS;
+				prlog(PR_TRACE, "flash_poll RE-QUEUE transaction_id=%i flash->async.retry_counter=%i in_progress_schedule_delay=%i\n",
+					flash->async.transaction_id, flash->async.retry_counter, flash->async.in_progress_schedule_delay);
 				schedule_timer(&flash->async.poller, msecs_to_tb(flash->async.in_progress_schedule_delay));
 				return;
 			}
@@ -400,6 +408,8 @@  static void flash_poll(struct timer *t __unused, void *data, uint64_t now __unus
 		}
 	}
 
+	prlog(PR_TRACE, "%s flash_poll transaction_id=%i END len=%llu pos=%llu buf=%p rc=%i\n",
+		flash_string[flash->async.op], flash->async.transaction_id, flash->async.len, flash->async.pos, (void *)flash->async.buf, rc);
 	flash->bl->flags &= IN_PROGRESS_MASK;
 	flash->bl->flags &= ASYNC_REQUIRED_MASK;
 	/* release the flash before we allow next opal entry */
@@ -612,14 +622,13 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 
 	if (size > flash->size || offset >= flash->size
 			|| offset + size > flash->size) {
-		prlog(PR_DEBUG, "Requested flash op %d beyond flash size %" PRIu64 "\n",
+		prlog(PR_NOTICE, "Requested flash op %d beyond flash size %" PRIu64 "\n",
 				op, flash->size);
 		rc = OPAL_PARAMETER;
 		goto out;
 	}
 
 	len = MIN(size, flash->block_size*10);
-	printf("Flash op %d len %llu\n", op, len);
 	flash->async.op = op;
 	flash->async.token = token;
 	flash->async.buf = buf + len;
@@ -629,6 +638,8 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	flash->async.in_progress_schedule_delay = FLASH_SCHEDULE_DELAY;
 	flash->bl->flags |= ASYNC_REQUIRED;
 	++flash->async.transaction_id;
+	prlog(PR_TRACE, "%s opal_flash_op transaction_id=%i flash->bl->flags=%i BEGIN total size=%llu Async WORKING chunk len=%llu offset=%llu buf=%p\n",
+		flash_string[op], flash->async.transaction_id, flash->bl->flags, size, len, offset, (void *)buf);
 
 	/*
 	 * These ops intentionally have no smarts (ecc correction or erase
@@ -666,11 +677,15 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 				goto out;
 			}
 			flash->bl->flags |= IN_PROGRESS;
+			prlog(PR_TRACE, "opal_flash_op RE-QUEUE transaction_id=%i flash->async.retry_counter=%i\n",
+				flash->async.transaction_id, flash->async.retry_counter);
 			schedule_timer(&flash->async.poller, msecs_to_tb(FLASH_SCHEDULE_DELAY));
 			/* Don't release the flash */
 			return OPAL_ASYNC_COMPLETION;
 		} else {
-			prlog(PR_ERR, "%s: Op %d failed with rc %d\n", __func__, op, rc);
+			/* PR_NOTICE since invalid requests can produce problem which is not ERR */
+			prlog(PR_NOTICE, "%s: %s (%d) failed rc=%d opal_flash_op transaction_id=%i\n", __func__,
+				flash_string[op], op, rc, flash->async.transaction_id);
 			rc = OPAL_HARDWARE;
 		}
 		goto out;
@@ -679,6 +694,8 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 	if (size - len) {
 		/* Work remains */
 		schedule_timer(&flash->async.poller, 0);
+	        prlog(PR_TRACE, "%s opal_flash_op transaction_id=%i Async work REMAINS size=%llu working chunk len=%llu offset=%llu buf=%p\n",
+			flash_string[flash->async.op], flash->async.transaction_id, size, len, offset, (void *)buf);
 		/* Don't release the flash */
 		return OPAL_ASYNC_COMPLETION;
 	} else {
@@ -693,9 +710,19 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 		 */
 		opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, flash->async.token, rc);
 	}
+        prlog(PR_TRACE, "%s opal_flash_op transaction_id=%i Async work COMPLETE size=%llu chunk len=%llu offset=%llu buf=%p\n",
+		flash_string[flash->async.op], flash->async.transaction_id, size, len, offset, (void *)buf);
 	rc = OPAL_ASYNC_COMPLETION;
 out:
 	flash_release(flash);
+	if (rc != OPAL_ASYNC_COMPLETION) {
+		prlog(PR_NOTICE, "%s opal_flash_op transaction_id=%i retry_counter=%i PROBLEM rc=%d size=%llu chunk len=%llu offset=%llu buf=%p\n",
+			flash_string[flash->async.op], flash->async.transaction_id, flash->async.retry_counter, rc, size, len, offset, (void *)buf);
+	} else {
+		prlog(PR_TRACE, "%s opal_flash_op transaction_id=%i END retry_counter=%i size=%llu chunk len=%llu offset=%llu buf=%p\n",
+			flash_string[flash->async.op], flash->async.transaction_id, flash->async.retry_counter, size, len, offset, (void *)buf);
+	}
+
 	flash->bl->flags &= IN_PROGRESS_MASK;
 	flash->bl->flags &= ASYNC_REQUIRED_MASK;
 	return rc;
@@ -898,7 +925,7 @@  static int flash_load_resource(enum resource_id id, uint32_t subid,
 		}
 
 		if (*len > ffs_part_size) {
-			prerror("FLASH: Cannot load %s. Content is larger than the partition\n",
+			prerror("Cannot load %s. Content is larger than the partition\n",
 					name);
 			rc = OPAL_PARAMETER;
 			goto out_free_ffs;
-- 
2.7.4


From 2fc75888d8356430543c22f50b074705901a7b76 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Wed, 4 Sep 2019 13:28:19 -0500
Subject: [PATCH 10/13] libflash/ipmi-hiomap: Add some debug logging

Add some additional debug logging for problem determination.

PR_TRACE used since PR_DEBUG seems to always trigger,
unable to figure out why.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 core/flash.c           |   4 +-
 libflash/ipmi-hiomap.c | 177 ++++++++++++++++++++++++++++++++++---------------
 2 files changed, 125 insertions(+), 56 deletions(-)

diff --git a/core/flash.c b/core/flash.c
index 579af7b..43d5818 100644
--- a/core/flash.c
+++ b/core/flash.c
@@ -677,8 +677,8 @@  static int64_t opal_flash_op(enum flash_op op, uint64_t id, uint64_t offset,
 				goto out;
 			}
 			flash->bl->flags |= IN_PROGRESS;
-			prlog(PR_TRACE, "opal_flash_op RE-QUEUE transaction_id=%i flash->async.retry_counter=%i\n",
-				flash->async.transaction_id, flash->async.retry_counter);
+			prlog(PR_TRACE, "opal_flash_op RE-QUEUE transaction_id=%i flash->async.retry_counter=%i in_progress_schedule_delay=%i\n",
+				flash->async.transaction_id, flash->async.retry_counter, FLASH_SCHEDULE_DELAY);
 			schedule_timer(&flash->async.poller, msecs_to_tb(FLASH_SCHEDULE_DELAY));
 			/* Don't release the flash */
 			return OPAL_ASYNC_COMPLETION;
diff --git a/libflash/ipmi-hiomap.c b/libflash/ipmi-hiomap.c
index 08af005..df67173 100644
--- a/libflash/ipmi-hiomap.c
+++ b/libflash/ipmi-hiomap.c
@@ -58,10 +58,15 @@  static inline uint16_t bytes_to_blocks_align_up(struct ipmi_hiomap *ctx,
 /* Call under ctx->lock */
 static int hiomap_protocol_ready(struct ipmi_hiomap *ctx)
 {
-	if (!(ctx->bmc_state & HIOMAP_E_DAEMON_READY))
+	prlog(PR_TRACE, "%s ctx->bmc_state=%i\n", __func__, ctx->bmc_state);
+	if (!(ctx->bmc_state & HIOMAP_E_DAEMON_READY)) {
+		prlog(PR_NOTICE, "%s FLASH_ERR_DEVICE_GONE\n", __func__);
 		return FLASH_ERR_DEVICE_GONE;
-	if (ctx->bmc_state & HIOMAP_E_FLASH_LOST)
+	}
+	if (ctx->bmc_state & HIOMAP_E_FLASH_LOST) {
+		prlog(PR_NOTICE, "%s HIOMAP_E_FLASH_LOST\n", __func__);
 		return FLASH_ERR_AGAIN;
+	}
 
 	return 0;
 }
@@ -111,16 +116,27 @@  static int hiomap_queue_msg(struct ipmi_hiomap *ctx, struct ipmi_msg *msg)
 static int hiomap_window_valid(struct ipmi_hiomap *ctx, uint64_t pos,
 			        uint64_t len)
 {
-	if (ctx->bmc_state & HIOMAP_E_FLASH_LOST)
+	if (ctx->bmc_state & HIOMAP_E_FLASH_LOST) {
+		prlog(PR_NOTICE, "%s HIOMAP_E_FLASH_LOST\n", __func__);
 		return FLASH_ERR_AGAIN;
-	if (ctx->bmc_state & HIOMAP_E_PROTOCOL_RESET)
+	}
+	if (ctx->bmc_state & HIOMAP_E_PROTOCOL_RESET) {
+		prlog(PR_NOTICE, "%s HIOMAP_E_PROTOCOL_RESET\n", __func__);
 		return FLASH_ERR_AGAIN;
-	if (ctx->bmc_state & HIOMAP_E_WINDOW_RESET)
+	}
+	if (ctx->bmc_state & HIOMAP_E_WINDOW_RESET) {
+		prlog(PR_NOTICE, "%s HIOMAP_E_WINDOW_RESET\n", __func__);
 		return FLASH_ERR_AGAIN;
-	if (ctx->window_state == closed_window)
+	}
+	if (ctx->window_state == closed_window) {
+		prlog(PR_TRACE, "%s window_state=closed_window\n", __func__);
 		return FLASH_ERR_PARM_ERROR;
-	if (pos < ctx->current.cur_pos)
+	}
+	if (pos < ctx->current.cur_pos) {
+		prlog(PR_TRACE, "%s we need to move the window pos=%llu < ctx->current.cur_pos=0x%x\n",
+			__func__, pos, ctx->current.cur_pos);
 		return FLASH_ERR_PARM_ERROR;
+	}
 	if ((pos + len) > (ctx->current.cur_pos + ctx->current.size)) {
 		/* we will compensate the proper values in caller */
 		if ((pos + ctx->current.size) <= (ctx->current.cur_pos + ctx->current.size)) {
@@ -313,8 +329,10 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 	}
 
 	if (msg->data[1] != ctx->inflight_seq) {
-		prerror("Unmatched sequence number: wanted %u got %u\n",
-			ctx->inflight_seq, msg->data[1]);
+		prlog(PR_TRACE, "%s Unmatched ipmi sequence number: wanted %u got %u\n",
+			__func__,
+			ctx->inflight_seq,
+			msg->data[1]);
 		res->cc = IPMI_ERR_UNSPECIFIED;
 		ipmi_free_msg(msg);
 		return;
@@ -371,7 +389,9 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 	case HIOMAP_C_ERASE:
 	case HIOMAP_C_RESET:
 		if (msg->resp_size != 2) {
-			prerror("%u: Unexpected response size: %u\n", msg->data[0],
+			prerror("%s %u: Unexpected response size: %u\n",
+				__func__,
+				msg->data[0],
 				msg->resp_size);
 			res->cc = IPMI_ERR_UNSPECIFIED;
 			ctx->cc = OPAL_HARDWARE;
@@ -386,8 +406,8 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 		}
 		break;
 	default:
-		prlog(PR_WARNING, "Unimplemented command handler: %u\n",
-		      msg->data[0]);
+		prlog(PR_WARNING, "%s Unimplemented command handler: %u\n",
+		      __func__, msg->data[0]);
 		break;
 	};
 	ipmi_free_msg(msg);
@@ -655,8 +675,10 @@  static int hiomap_window_move(struct ipmi_hiomap *ctx, uint8_t command,
 
 	rc = hiomap_queue_msg(ctx, msg);
 
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s move queue msg failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
 	return 0;
 }
@@ -682,8 +704,10 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 	ctx->cc = -1;
 	unlock(&ctx->lock);
 
-	if (state != write_window)
+	if (state != write_window) {
+		prlog(PR_NOTICE, "%s failed: state=%i\n", __func__, state);
 		return FLASH_ERR_PARM_ERROR;
+	}
 
 	req[0] = HIOMAP_C_MARK_DIRTY;
 	req[1] = dirty_seq;
@@ -699,11 +723,13 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 
 	rc = hiomap_queue_msg(ctx, msg);
 
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s dirty queue msg failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
-	prlog(PR_DEBUG, "Marked flash dirty at 0x%" PRIx64 " for %" PRIu64 "\n",
-		offset, size);
+	prlog(PR_TRACE, "%s Start to mark flash dirty at pos %llu size %llu bytes ipmi seq=%i\n",
+		__func__, offset, size, dirty_seq);
 
 	return 0;
 }
@@ -726,8 +752,10 @@  static int hiomap_flush(struct ipmi_hiomap *ctx)
 	ctx->cc = -1;
 	unlock(&ctx->lock);
 
-	if (state != write_window)
+	if (state != write_window) {
+		prlog(PR_NOTICE, "%s failed: state=%i\n", __func__, state);
 		return FLASH_ERR_PARM_ERROR;
+	}
 
 	req[0] = HIOMAP_C_FLUSH;
 	req[1] = flush_seq;
@@ -738,10 +766,12 @@  static int hiomap_flush(struct ipmi_hiomap *ctx)
 
 	rc = hiomap_queue_msg(ctx, msg);
 
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s flush queue msg failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
-	prlog(PR_DEBUG, "Flushed writes\n");
+	prlog(PR_TRACE, "%s Start to flush writes ipmi seq=%i\n", __func__, flush_seq);
 
 	return 0;
 }
@@ -777,12 +807,15 @@  static int hiomap_ack(struct ipmi_hiomap *ctx, uint8_t ack)
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
 			 ipmi_hiomap_cmd_cb, &ack_res, req, sizeof(req), 2);
 
+	prlog(PR_TRACE, "%s SENDING req[1]=%i\n", __func__, req[1]);
 	rc = hiomap_queue_msg(ctx, msg);
 	lock(&ctx->lock);
 	ctx->bl.flags = orig_flags;
 	unlock(&ctx->lock);
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s queue msg failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
 	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
 	if (rc) {
@@ -791,7 +824,7 @@  static int hiomap_ack(struct ipmi_hiomap *ctx, uint8_t ack)
 		return rc;
 	}
 
-	prlog(PR_DEBUG, "Acked events: 0x%x\n", ack);
+	prlog(PR_NOTICE, "%s Acked events: 0x%x\n", __func__, ack);
 
 	return 0;
 }
@@ -817,8 +850,10 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 	ctx->cc = -1;
 	unlock(&ctx->lock);
 
-	if (state != write_window)
+	if (state != write_window) {
+		prlog(PR_NOTICE, "%s failed: state=%i\n", __func__, state);
 		return FLASH_ERR_PARM_ERROR;
+	}
 
 	req[0] = HIOMAP_C_ERASE;
 	req[1] = erase_seq;
@@ -834,11 +869,14 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 
 	rc = hiomap_queue_msg(ctx, msg);
 
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s erase queue msg failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
-	prlog(PR_DEBUG, "Erased flash at 0x%" PRIx64 " for %" PRIu64 "\n",
-	      offset, size);
+	prlog(PR_TRACE, "%s Erasing flash at pos %llu for size %llu\n",
+		__func__,
+		offset, size);
 
 	return 0;
 }
@@ -899,10 +937,17 @@  static void hiomap_event(uint8_t events, void *context)
 {
 	struct ipmi_hiomap *ctx = context;
 
-	prlog(PR_DEBUG, "Received events: 0x%x\n", events);
+	prlog(PR_NOTICE, "%s Received events: 0x%x ctx->bmc_state=%i\n",
+		__func__,
+		events,
+		ctx->bmc_state);
 
 	lock(&ctx->lock);
 	ctx->bmc_state = events | (ctx->bmc_state & HIOMAP_E_ACK_MASK);
+	prlog(PR_NOTICE, "%s Updated bmc_state Received events: 0x%x ctx->bmc_state=%i\n",
+		__func__,
+		events,
+		ctx->bmc_state);
 	unlock(&ctx->lock);
 }
 
@@ -915,7 +960,7 @@  static int lpc_window_read(struct ipmi_hiomap *ctx, uint32_t pos,
 	if ((ctx->current.lpc_addr + ctx->current.size) < (off + len))
 		return FLASH_ERR_PARM_ERROR;
 
-	prlog(PR_TRACE, "Reading at 0x%08x for 0x%08x offset: 0x%08x\n",
+	prlog(PR_TRACE, "LPC Reading at 0x%08x for 0x%08x offset: 0x%08x\n",
 	      pos, len, off);
 
 	while(len) {
@@ -963,7 +1008,7 @@  static int lpc_window_write(struct ipmi_hiomap *ctx, uint32_t pos,
 	if ((ctx->current.lpc_addr + ctx->current.size) < (off + len))
 		return FLASH_ERR_PARM_ERROR;
 
-	prlog(PR_TRACE, "Writing at 0x%08x for 0x%08x offset: 0x%08x\n",
+	prlog(PR_TRACE, "LPC Writing at 0x%08x for 0x%08x offset: 0x%08x\n",
 	      pos, len, off);
 
 	while(len) {
@@ -979,7 +1024,7 @@  static int lpc_window_write(struct ipmi_hiomap *ctx, uint32_t pos,
 			chunk = 1;
 		}
 		if (rc) {
-			prlog(PR_ERR, "lpc_write failure %d to FW 0x%08x\n", rc, off);
+			prlog(PR_ERR, "%s failure %d to FW 0x%08x\n", __func__, rc, off);
 			return rc;
 		}
 		len -= chunk;
@@ -999,6 +1044,7 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	lock(&ctx->lock);
 
 	status = ctx->bmc_state;
+	prlog(PR_TRACE, "%s status=%i\n", __func__, status);
 
 	/*
 	 * Immediately clear the ackable events to make sure we don't race to
@@ -1021,9 +1067,18 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	 * Therefore it is enough to mark the window as closed to consider it
 	 * recovered.
 	 */
+	if (status & HIOMAP_E_PROTOCOL_RESET) {
+		prlog(PR_TRACE, "%s status=HIOMAP_E_PROTOCOL_RESET\n", __func__);
+	}
 
-	if (status & (HIOMAP_E_PROTOCOL_RESET | HIOMAP_E_WINDOW_RESET))
+	if (status & HIOMAP_E_WINDOW_RESET) {
+		prlog(PR_TRACE, "%s status=HIOMAP_E_WINDOW_RESET\n", __func__);
+	}
+
+	if (status & (HIOMAP_E_PROTOCOL_RESET | HIOMAP_E_WINDOW_RESET)) {
 		ctx->window_state = closed_window;
+		prlog(PR_TRACE, "%s closed_window\n", __func__);
+	}
 
 	unlock(&ctx->lock);
 
@@ -1041,21 +1096,22 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	 * the BMC's cache must be valid if opening the window is successful.
 	 */
 	if (status & HIOMAP_E_ACK_MASK) {
+		prlog(PR_TRACE, "%s status=%i HIOMAP_E_ACK_MASK so TRY to ACK\n", __func__, status);
 		/* ACK is unversioned, can send it if the daemon is ready */
 		rc = hiomap_ack(ctx, status & HIOMAP_E_ACK_MASK);
 		if (rc) {
-			prlog(PR_DEBUG, "Failed to ack events: 0x%x\n",
-			      status & HIOMAP_E_ACK_MASK);
+			prlog(PR_NOTICE, "%s Failed to ack events rc=%i: status & HIOMAP_E_ACK_MASK=0x%x status=%i\n",
+			      __func__, rc, (status & HIOMAP_E_ACK_MASK), status);
 			goto restore;
 		}
 	}
 
 	if (status & HIOMAP_E_PROTOCOL_RESET) {
-		prlog(PR_INFO, "Protocol was reset\n");
+		prlog(PR_INFO, "%s Protocol was reset\n", __func__);
 
 		rc = hiomap_get_info(ctx);
 		if (rc) {
-			prerror("Failure to renegotiate after protocol reset\n");
+			prerror("%s Failure to renegotiate after protocol reset\n", __func__);
 			goto restore;
 		}
 
@@ -1065,7 +1121,7 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 			goto restore;
 		}
 
-		prlog(PR_INFO, "Restored state after protocol reset\n");
+		prlog(PR_INFO, "%s Restored state after protocol reset\n", __func__);
 	}
 
 	/*
@@ -1083,7 +1139,9 @@  restore:
 	 * than necessary, but never less than necessary.
 	 */
 	lock(&ctx->lock);
+	prlog(PR_TRACE, "%s PRE restore status=%i PRE ctx->bmc_state=%i rc=%i\n", __func__, status, ctx->bmc_state, rc);
 	ctx->bmc_state |= (status & HIOMAP_E_ACK_MASK);
+	prlog(PR_TRACE, "%s POST restored status=%i POST ctx->bmc_state=%i rc=%i\n", __func__, status, ctx->bmc_state, rc);
 	unlock(&ctx->lock);
 
 	return rc;
@@ -1099,15 +1157,17 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 
 	/* LPC is only 32bit */
 	if (pos > UINT_MAX || len > UINT_MAX)
-		return FLASH_ERR_PARM_ERROR;
+		return OPAL_PARAMETER;
 
 	ctx = container_of(bl, struct ipmi_hiomap, bl);
 
 	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
 		goto out;
+	}
 
 	lock(&ctx->lock);
 	if (ctx->bl.flags & IN_PROGRESS) {
@@ -1121,8 +1181,7 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 	}
 	unlock(&ctx->lock);
 
-	prlog(PR_TRACE, "Flash read at %#" PRIx64 " for %#" PRIx64 "\n", pos,
-	      len);
+	prlog(PR_TRACE, "Flash READ at pos %llu for %llu bytes\n", pos, len);
 	while (len > 0) {
 		lock(&ctx->lock);
 		state = ctx->window_state;
@@ -1206,8 +1265,10 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
 		goto out;
+	}
 
 	lock(&ctx->lock);
 	if (ctx->bl.flags & IN_PROGRESS) {
@@ -1221,8 +1282,7 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 	}
 	unlock(&ctx->lock);
 
-	prlog(PR_TRACE, "Flash write at %#" PRIx64 " for %#" PRIx64 "\n", pos,
-	      len);
+	prlog(PR_TRACE, "Flash WRITE at pos %llu for %llu bytes\n", pos, len);
 	while (len > 0) {
 		lock(&ctx->lock);
 		state = ctx->window_state;
@@ -1318,14 +1378,17 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 
 	/* LPC is only 32bit */
 	if (pos > UINT_MAX || len > UINT_MAX)
-		return FLASH_ERR_PARM_ERROR;
+		return OPAL_PARAMETER;
 
 	ctx = container_of(bl, struct ipmi_hiomap, bl);
 	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
 		goto out;
+	}
+
 
 	lock(&ctx->lock);
 	if (ctx->bl.flags & IN_PROGRESS) {
@@ -1337,8 +1400,8 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 	}
 	unlock(&ctx->lock);
 
-	prlog(PR_TRACE, "Flash erase at 0x%08x for 0x%08x\n", (u32) pos,
-	      (u32) len);
+	prlog(PR_TRACE, "Flash ERASE at pos %llu for %llu bytes\n", pos, len);
+
 	while (len > 0) {
 		lock(&ctx->lock);
 		state = ctx->window_state;
@@ -1422,12 +1485,16 @@  static int ipmi_hiomap_get_flash_info(struct blocklevel_device *bl,
 	lock(&ctx->transaction_lock);
 
 	rc = ipmi_hiomap_handle_events(ctx);
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
 	rc = hiomap_get_flash_info(ctx);
-	if (rc)
+	if (rc) {
+		prlog(PR_NOTICE, "%s hiomap_get_flash failed: rc=%d\n", __func__, rc);
 		return rc;
+	}
 
 	ctx->bl.erase_mask = ctx->erase_granule - 1;
 
@@ -1474,26 +1541,28 @@  int ipmi_hiomap_init(struct blocklevel_device **bl)
 	/* Ack all pending ack-able events to avoid spurious failures */
 	rc = hiomap_ack(ctx, HIOMAP_E_ACK_MASK);
 	if (rc) {
-		prlog(PR_DEBUG, "Failed to ack events: 0x%x\n",
-		      HIOMAP_E_ACK_MASK);
+		prlog(PR_NOTICE, "%s Failed to ack events: 0x%x\n",
+		      __func__, HIOMAP_E_ACK_MASK);
 		goto err;
 	}
 
 	rc = ipmi_sel_register(CMD_OP_HIOMAP_EVENT, hiomap_event, ctx);
-	if (rc < 0)
+	if (rc < 0) {
+		prerror("%s Failed ipmi_sel_register: %d\n", __func__, rc);
 		goto err;
+	}
 
 	/* Negotiate protocol behaviour */
 	rc = hiomap_get_info(ctx);
 	if (rc) {
-		prerror("Failed to get hiomap parameters: %d\n", rc);
+		prerror("%s Failed to get hiomap parameters: %d\n", __func__, rc);
 		goto err;
 	}
 
 	/* Grab the flash parameters */
 	rc = hiomap_get_flash_info(ctx);
 	if (rc) {
-		prerror("Failed to get flash parameters: %d\n", rc);
+		prerror("%s Failed to get flash parameters: %d\n", __func__, rc);
 		goto err;
 	}
 
-- 
2.7.4


From e7a3ac656ac170b36728c9cd2d424e650f354c85 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Mon, 28 Oct 2019 08:48:42 -0500
Subject: [PATCH 11/13] lpc: Add logging for hardware diagnostics

Add some logging to help in problem determination for
hardware failure cases to aide in triage.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 hw/lpc.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/hw/lpc.c b/hw/lpc.c
index fedceb4..b835f14 100644
--- a/hw/lpc.c
+++ b/hw/lpc.c
@@ -504,12 +504,16 @@  static int64_t lpc_probe_test(struct lpcm *lpc, uint32_t irqmask)
 	/* Ensure we can perform a valid lookup in the error table */
 	idx = LPC_ERROR_IDX(irqstat);
 	if (idx < 0 || idx >= ARRAY_SIZE(lpc_error_table)) {
-		prerror("LPC bus error translation failed with status 0x%x\n",
-			irqstat);
+		prlog(PR_ERR, "%s bus error translation failed with idx=%llu irqstat=0x%x\n",
+			__func__, idx, irqstat);
 		return OPAL_PARAMETER;
 	}
 
 	rc = lpc_error_table[idx].rc;
+	/* purpose here is to log for any anomalies, something other than the expected NORESP_ERR */
+	if (idx != LPC_ERROR_IDX(LPC_HC_IRQ_SYNC_NORESP_ERR)) {
+		prlog(PR_NOTICE, "%s LOOKUP rc=0x%x idx=%llu\n", __func__, rc, idx);
+	}
 	return rc;
 }
 
@@ -1032,6 +1036,10 @@  static void lpc_dispatch_err_irqs(struct lpcm *lpc, uint32_t irqs)
 	/* Find and report the error */
 	err = &lpc_error_table[idx];
 	lpc_bus_err_count++;
+	/* purpose here is to log for any anomalies, e.g. hardware issues */
+	prlog(PR_NOTICE, "%s lpc_bus_err_count=%i\n",
+		__func__,
+		lpc_bus_err_count);
 	if (manufacturing_mode && (lpc_bus_err_count > LPC_BUS_DEGRADED_PERF_THRESHOLD))
 		info = &e_info(OPAL_RC_LPC_SYNC_PERF);
 	else
-- 
2.7.4


From f127de1c59d445898010618a6e6a18abc070dbf2 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Tue, 29 Oct 2019 06:37:45 -0500
Subject: [PATCH 12/13] hw/bt: Allow ipmi regular intervals

Found during investigation of slow ipmi messages in OPAL_FLASH_READ/WRITE
operations.

When bt irqs are operational perform default timer scheduling.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 hw/bt.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hw/bt.c b/hw/bt.c
index 824a1f2..72393a2 100644
--- a/hw/bt.c
+++ b/hw/bt.c
@@ -518,7 +518,7 @@  static void bt_poll(struct timer *t __unused, void *data __unused,
 	bt_send_and_unlock();
 
 	schedule_timer(&bt.poller,
-		       bt.irq_ok ? TIMER_POLL : msecs_to_tb(BT_DEFAULT_POLL_MS));
+		       bt.irq_ok ? msecs_to_tb(BT_DEFAULT_POLL_MS) : TIMER_POLL);
 }
 
 static void bt_ipmi_poll(void)
-- 
2.7.4


From 9004e409ed3918955bf3ddcbfe1cf4eb203ea467 Mon Sep 17 00:00:00 2001
From: Deb McLemore <debmc@linux.ibm.com>
Date: Tue, 29 Oct 2019 13:37:36 -0500
Subject: [PATCH 13/13] libflash/ipmi-hiomap: Add helpers for logging

Add some logging helpers.

Signed-off-by: Deb McLemore <debmc@linux.ibm.com>
---
 libflash/ipmi-hiomap.c | 287 ++++++++++++++++++++++++-------------------------
 1 file changed, 140 insertions(+), 147 deletions(-)

diff --git a/libflash/ipmi-hiomap.c b/libflash/ipmi-hiomap.c
index df67173..f4a50e4 100644
--- a/libflash/ipmi-hiomap.c
+++ b/libflash/ipmi-hiomap.c
@@ -1,8 +1,6 @@ 
 // SPDX-License-Identifier: Apache-2.0
 /* Copyright 2018-2019 IBM Corp. */
 
-#define pr_fmt(fmt) "HIOMAP: " fmt
-
 #include <hiomap.h>
 #include <inttypes.h>
 #include <ipmi.h>
@@ -23,6 +21,12 @@ 
 
 #define CMD_OP_HIOMAP_EVENT	0x0f
 
+#define hiomap_trace(fmt, a...)    prlog(PR_TRACE,"HIOMAP: %s " fmt, __func__, ## a)
+#define hiomap_info(fmt, a...)     prlog(PR_INFO,"HIOMAP: %s " fmt, __func__, ## a)
+#define hiomap_notice(fmt, a...)   prlog(PR_NOTICE,"HIOMAP: %s " fmt, __func__, ## a)
+#define hiomap_warn(fmt, a...)     prlog(PR_WARNING,"HIOMAP: %s " fmt, __func__, ## a)
+#define hiomap_error(fmt, a...)    prlog(PR_ERR,"HIOMAP: %s " fmt, __func__, ## a)
+
 struct ipmi_hiomap_result {
 	struct ipmi_hiomap *ctx;
 	int16_t cc;
@@ -58,13 +62,13 @@  static inline uint16_t bytes_to_blocks_align_up(struct ipmi_hiomap *ctx,
 /* Call under ctx->lock */
 static int hiomap_protocol_ready(struct ipmi_hiomap *ctx)
 {
-	prlog(PR_TRACE, "%s ctx->bmc_state=%i\n", __func__, ctx->bmc_state);
+	hiomap_trace("ctx->bmc_state=%i\n", ctx->bmc_state);
 	if (!(ctx->bmc_state & HIOMAP_E_DAEMON_READY)) {
-		prlog(PR_NOTICE, "%s FLASH_ERR_DEVICE_GONE\n", __func__);
+		hiomap_notice("FLASH_ERR_DEVICE_GONE\n");
 		return FLASH_ERR_DEVICE_GONE;
 	}
 	if (ctx->bmc_state & HIOMAP_E_FLASH_LOST) {
-		prlog(PR_NOTICE, "%s HIOMAP_E_FLASH_LOST\n", __func__);
+		hiomap_notice("HIOMAP_E_FLASH_LOST\n");
 		return FLASH_ERR_AGAIN;
 	}
 
@@ -102,10 +106,10 @@  static int hiomap_queue_msg(struct ipmi_hiomap *ctx, struct ipmi_msg *msg)
 			ipmi_free_msg(msg);
 			return rc;
 		}
-		prlog(PR_TRACE, "%s SENDING SYNC\n", __func__);
+		hiomap_trace("SENDING SYNC\n");
 		ipmi_queue_msg_sync(msg);
 	} else {
-		prlog(PR_TRACE, "%s SENDING ASYNC\n", __func__);
+		hiomap_trace("SENDING ASYNC\n");
 		rc = ipmi_queue_msg(msg);
 	}
 
@@ -117,64 +121,61 @@  static int hiomap_window_valid(struct ipmi_hiomap *ctx, uint64_t pos,
 			        uint64_t len)
 {
 	if (ctx->bmc_state & HIOMAP_E_FLASH_LOST) {
-		prlog(PR_NOTICE, "%s HIOMAP_E_FLASH_LOST\n", __func__);
+		hiomap_notice("HIOMAP_E_FLASH_LOST\n");
 		return FLASH_ERR_AGAIN;
 	}
 	if (ctx->bmc_state & HIOMAP_E_PROTOCOL_RESET) {
-		prlog(PR_NOTICE, "%s HIOMAP_E_PROTOCOL_RESET\n", __func__);
+		hiomap_notice("HIOMAP_E_PROTOCOL_RESET\n");
 		return FLASH_ERR_AGAIN;
 	}
 	if (ctx->bmc_state & HIOMAP_E_WINDOW_RESET) {
-		prlog(PR_NOTICE, "%s HIOMAP_E_WINDOW_RESET\n", __func__);
+		hiomap_notice("HIOMAP_E_WINDOW_RESET\n");
 		return FLASH_ERR_AGAIN;
 	}
 	if (ctx->window_state == closed_window) {
-		prlog(PR_TRACE, "%s window_state=closed_window\n", __func__);
+		hiomap_trace("window_state=closed_window\n");
 		return FLASH_ERR_PARM_ERROR;
 	}
 	if (pos < ctx->current.cur_pos) {
-		prlog(PR_TRACE, "%s we need to move the window pos=%llu < ctx->current.cur_pos=0x%x\n",
-			__func__, pos, ctx->current.cur_pos);
+		hiomap_trace("we need to move the window pos=%llu < ctx->current.cur_pos=0x%x\n",
+			pos, ctx->current.cur_pos);
 		return FLASH_ERR_PARM_ERROR;
 	}
 	if ((pos + len) > (ctx->current.cur_pos + ctx->current.size)) {
 		/* we will compensate the proper values in caller */
 		if ((pos + ctx->current.size) <= (ctx->current.cur_pos + ctx->current.size)) {
-			prlog(PR_TRACE, "%s OK pos=%llu "
+			hiomap_trace("OK pos=%llu "
 				"ctx->current.size=0x%x "
 				"ctx->current.cur_pos=0x%x\n",
-				__func__,
 				pos,
 				ctx->current.size,
 				ctx->current.cur_pos);
 		} else {
-			prlog(PR_TRACE, "%s CHECKING further pos=%llu "
+			hiomap_trace("CHECKING further pos=%llu "
 				"for len=%llu ctx->current.size=0x%x "
 				"ctx->current.cur_pos=0x%x\n",
-				__func__,
 				pos,
 				len,
 				ctx->current.size,
 				ctx->current.cur_pos);
 			if ((pos + ctx->current.adjusted_window_size) <= (ctx->current.cur_pos + ctx->current.size)) {
-				prlog(PR_TRACE, "%s OK use ADJUSTED pos=%llu "
+				hiomap_trace("OK use ADJUSTED pos=%llu "
 					"adjusted_len=%i for len=%llu "
 					"ctx->current.size=0x%x "
 					"ctx->current.cur_pos=0x%x\n",
-					__func__,
 					pos,
 					ctx->current.adjusted_window_size,
 					len,
 					ctx->current.size,
 					ctx->current.cur_pos);
 			} else {
-				prlog(PR_TRACE, "%s we need to MOVE the window\n", __func__);
+				hiomap_trace("we need to MOVE the window\n");
 				return FLASH_ERR_PARM_ERROR;
 			}
 		}
 	}
 
-	prlog(PR_TRACE, "%s ALL GOOD, no move needed\n", __func__);
+	hiomap_trace("ALL GOOD, no move needed\n");
 	return 0;
 }
 
@@ -194,7 +195,7 @@  static void move_cb(struct ipmi_msg *msg)
 	int lock_try_counter = 10;
 
 	if ((msg->resp_size != 8) || (msg->cc != IPMI_CC_NO_ERROR) || (msg->data[1] != ctx->inflight_seq)) {
-		prlog(PR_TRACE, "Command %u (4=READ 6=WRITE): move_cb "
+		hiomap_trace("Command %u (4=READ 6=WRITE): "
 			"Unexpected results to check: response size we "
 			"expect 8 but received %u, ipmi cc=%d "
 			"(should be zero), expected ipmi seq %i but got "
@@ -209,10 +210,9 @@  static void move_cb(struct ipmi_msg *msg)
 		ctx->window_state = closed_window;
 		goto out;
 	} else {
-		prlog(PR_TRACE, "Entered %s for %s window from "
+		hiomap_trace("Entered for %s window from "
 			"OLD block pos 0x%x for 0x%x bytes at "
 			"lpc_addr 0x%x ipmi seq=%i\n",
-			__func__,
 			(msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW) ? "read" : "write",
 			ctx->current.cur_pos,
 			ctx->current.size,
@@ -268,19 +268,18 @@  static void move_cb(struct ipmi_msg *msg)
 		 */
 		*ctx->active_size = (ctx->current.cur_pos + ctx->current.size) - ctx->requested_pos;
 		ctx->current.adjusted_window_size = (ctx->current.cur_pos + ctx->current.size) - ctx->requested_pos;
-		prlog(PR_TRACE, "%s VALID MOVE ADJUSTMENT "
+		hiomap_trace("VALID MOVE ADJUSTMENT "
 			"*ctx->active_size=%llu "
 			"ctx->requested_pos=%llu "
 			"ctx->current.adjusted_window_size=%i\n",
-			__func__,
 			*ctx->active_size,
 			ctx->requested_pos,
 			ctx->current.adjusted_window_size);
 	}
 
 	if (ctx->requested_len != 0 && *ctx->active_size == 0) {
-		prlog(PR_NOTICE, "%s Invalid window properties: len: %llu, size: %llu\n",
-			__func__, ctx->requested_len, *ctx->active_size);
+		hiomap_notice("Invalid window properties: len: %llu, size: %llu\n",
+			ctx->requested_len, *ctx->active_size);
 		ctx->cc = OPAL_PARAMETER;
 		ctx->window_state = closed_window;
 		goto out;
@@ -291,7 +290,7 @@  static void move_cb(struct ipmi_msg *msg)
 	else
 		ctx->window_state = write_window;
 
-	prlog(PR_TRACE, "Opened %s window to NEW block pos 0x%x for 0x%x bytes "
+	hiomap_trace("Opened %s window to NEW block pos 0x%x for 0x%x bytes "
 		"at lpc_addr 0x%x ipmi seq=%i active size=%llu "
 		"adjusted_window_size=%i\n",
 		(msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW) ? "read" : "write",
@@ -302,7 +301,7 @@  static void move_cb(struct ipmi_msg *msg)
 		*ctx->active_size,
 		ctx->current.adjusted_window_size);
 
-out:	prlog(PR_TRACE, "Exiting the move window callback "
+out:	hiomap_trace("Exiting the move window callback "
 		"transaction ipmi seq=%i\n",
 		ctx->inflight_seq);
 	unlock(&ctx->lock);
@@ -322,15 +321,14 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 
 	/* We at least need the command and sequence */
 	if (msg->resp_size < 2) {
-		prerror("Illegal response size: %u\n", msg->resp_size);
+		hiomap_error("Illegal response size: %u\n", msg->resp_size);
 		res->cc = IPMI_ERR_UNSPECIFIED;
 		ipmi_free_msg(msg);
 		return;
 	}
 
 	if (msg->data[1] != ctx->inflight_seq) {
-		prlog(PR_TRACE, "%s Unmatched ipmi sequence number: wanted %u got %u\n",
-			__func__,
+		hiomap_trace("Unmatched ipmi sequence number: wanted %u got %u\n",
 			ctx->inflight_seq,
 			msg->data[1]);
 		res->cc = IPMI_ERR_UNSPECIFIED;
@@ -345,7 +343,7 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 
 		ctx->cc = IPMI_CC_NO_ERROR;
 		if (msg->resp_size != 6) {
-			prerror("%u: Unexpected response size: %u\n", msg->data[0],
+			hiomap_error("%u: Unexpected response size: %u\n", msg->data[0],
 				msg->resp_size);
 			res->cc = IPMI_ERR_UNSPECIFIED;
 			break;
@@ -353,7 +351,7 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 
 		ctx->version = msg->data[2];
 		if (ctx->version < 2) {
-			prerror("Failed to negotiate protocol v2 or higher: %d\n",
+			hiomap_error("Failed to negotiate protocol v2 or higher: %d\n",
 				ctx->version);
 			res->cc = IPMI_ERR_UNSPECIFIED;
 			break;
@@ -370,7 +368,7 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 
 		ctx->cc = IPMI_CC_NO_ERROR;
 		if (msg->resp_size != 6) {
-			prerror("%u: Unexpected response size: %u\n", msg->data[0],
+			hiomap_error("%u: Unexpected response size: %u\n", msg->data[0],
 				msg->resp_size);
 			res->cc = IPMI_ERR_UNSPECIFIED;
 			break;
@@ -389,16 +387,14 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 	case HIOMAP_C_ERASE:
 	case HIOMAP_C_RESET:
 		if (msg->resp_size != 2) {
-			prerror("%s %u: Unexpected response size: %u\n",
-				__func__,
+			hiomap_error("%u: Unexpected response size: %u\n",
 				msg->data[0],
 				msg->resp_size);
 			res->cc = IPMI_ERR_UNSPECIFIED;
 			ctx->cc = OPAL_HARDWARE;
 			break;
 		} else {
-			prlog(PR_TRACE, "%s Command=%u 1=RESET 7=DIRTY 8=FLUSH 9=ACK 10=ERASE ipmi seq=%u ctx->inflight_seq=%u\n",
-				__func__,
+			hiomap_trace("Command=%u 1=RESET 7=DIRTY 8=FLUSH 9=ACK 10=ERASE ipmi seq=%u ctx->inflight_seq=%u\n",
 				msg->data[0],
 				msg->data[1],
 				ctx->inflight_seq);
@@ -406,8 +402,8 @@  static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg)
 		}
 		break;
 	default:
-		prlog(PR_WARNING, "%s Unimplemented command handler: %u\n",
-		      __func__, msg->data[0]);
+		hiomap_warn("Unimplemented command handler: %u\n",
+		      msg->data[0]);
 		break;
 	};
 	ipmi_free_msg(msg);
@@ -433,7 +429,7 @@  static int hiomap_wait_for_cc(struct ipmi_hiomap *ctx, int *cc, uint8_t *seq, ui
 	uint64_t timeout_counter;
 	int rc;
 
-	prlog(PR_TRACE, "Start wait for cc ipmi seq=%i *cc=%i ticks=%llu\n", *seq, *cc, ticks);
+	hiomap_trace("Start wait for cc ipmi seq=%i *cc=%i ticks=%llu\n", *seq, *cc, ticks);
 	rc = 0;
 	if (this_cpu()->tb_invalid) {
 		/*
@@ -446,15 +442,15 @@  static int hiomap_wait_for_cc(struct ipmi_hiomap *ctx, int *cc, uint8_t *seq, ui
 			lock(&ctx->lock);
 			ctx->window_state = closed_window;
 			++ctx->missed_cc_count;
-			prlog(PR_NOTICE, "%s tb_invalid, CLOSING WINDOW for cc "
+			hiomap_notice("tb_invalid, CLOSING WINDOW for cc "
 				"ipmi seq=%i ctx->missed_cc_count=%i\n",
-				__func__, *seq, ctx->missed_cc_count);
+				*seq, ctx->missed_cc_count);
 			unlock(&ctx->lock);
 			rc = FLASH_ERR_MISSED_CC;
 		}
-		prlog(PR_NOTICE, "%s tb_invalid, hopefully this will "
+		hiomap_notice("tb_invalid, hopefully this will "
 			"retry/recover rc=%i\n",
-			__func__, rc);
+			rc);
 		return rc;
 	}
 	start_time = mftb();
@@ -468,7 +464,7 @@  static int hiomap_wait_for_cc(struct ipmi_hiomap *ctx, int *cc, uint8_t *seq, ui
 		ipmi_hiomap_ticks = IPMI_HIOMAP_TICKS;
 	}
 
-	prlog(PR_TRACE, "wait_time=%llu ipmi_hiomap_ticks=%llu ipmi seq=%i "
+	hiomap_trace("wait_time=%llu ipmi_hiomap_ticks=%llu ipmi seq=%i "
 			"ctx->missed_cc_count=%i\n",
 		wait_time, ticks, *seq, ctx->missed_cc_count);
 	while (wait_time < ipmi_hiomap_ticks) {
@@ -478,27 +474,27 @@  static int hiomap_wait_for_cc(struct ipmi_hiomap *ctx, int *cc, uint8_t *seq, ui
 			wait_time = tb_to_msecs(now - start_time);
 		}
 		if (*cc == IPMI_CC_NO_ERROR) {
-			prlog(PR_TRACE, "Break cc ipmi seq=%i "
+			hiomap_trace("Break cc ipmi seq=%i "
 				"ctx->missed_cc_count=%i\n",
 				*seq, ctx->missed_cc_count);
 			break;
 		}
 	}
-	prlog(PR_TRACE, "Status CHECK wait_for_cc wait_time=%llu *cc=%i "
+	hiomap_trace("Status CHECK wait_time=%llu *cc=%i "
 		"ipmi seq=%i ctx->missed_cc_count=%i\n",
 		wait_time, *cc, *seq, ctx->missed_cc_count);
 	if (*cc != IPMI_CC_NO_ERROR) {
 		lock(&ctx->lock);
 		ctx->window_state = closed_window;
 		++ctx->missed_cc_count;
-		prlog(PR_TRACE, "CLOSING WINDOW for cc ipmi seq=%i "
+		hiomap_trace("CLOSING WINDOW for cc ipmi seq=%i "
 			"ctx->missed_cc_count=%i\n",
 			*seq, ctx->missed_cc_count);
 		unlock(&ctx->lock);
 		rc = FLASH_ERR_MISSED_CC;
 	}
 
-	prlog(PR_TRACE, "Stop wait for *cc=%i ipmi seq=%i "
+	hiomap_trace("Stop wait for *cc=%i ipmi seq=%i "
 		"ctx->missed_cc_count=%i\n",
 		*cc, *seq, ctx->missed_cc_count);
 	return rc;
@@ -547,8 +543,8 @@  static int hiomap_get_info(struct ipmi_hiomap *ctx)
 	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
 
 	if (rc) {
-		prlog(PR_TRACE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-			__func__, rc, IPMI_ACK_DEFAULT);
+		hiomap_trace("hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			rc, IPMI_ACK_DEFAULT);
 	}
 
 	return rc;
@@ -592,8 +588,8 @@  static int hiomap_get_flash_info(struct ipmi_hiomap *ctx)
 
 	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
 	if (rc) {
-		prlog(PR_TRACE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-			__func__, rc, IPMI_ACK_DEFAULT);
+		hiomap_trace("hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			rc, IPMI_ACK_DEFAULT);
 	}
 
 	return rc;
@@ -676,7 +672,7 @@  static int hiomap_window_move(struct ipmi_hiomap *ctx, uint8_t command,
 	rc = hiomap_queue_msg(ctx, msg);
 
 	if (rc) {
-		prlog(PR_NOTICE, "%s move queue msg failed: rc=%d\n", __func__, rc);
+		hiomap_notice("move queue msg failed: rc=%d\n", rc);
 		return rc;
 	}
 
@@ -705,7 +701,7 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 	unlock(&ctx->lock);
 
 	if (state != write_window) {
-		prlog(PR_NOTICE, "%s failed: state=%i\n", __func__, state);
+		hiomap_notice("failed: state=%i\n", state);
 		return FLASH_ERR_PARM_ERROR;
 	}
 
@@ -724,12 +720,12 @@  static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset,
 	rc = hiomap_queue_msg(ctx, msg);
 
 	if (rc) {
-		prlog(PR_NOTICE, "%s dirty queue msg failed: rc=%d\n", __func__, rc);
+		hiomap_notice("dirty queue msg failed: rc=%d\n", rc);
 		return rc;
 	}
 
-	prlog(PR_TRACE, "%s Start to mark flash dirty at pos %llu size %llu bytes ipmi seq=%i\n",
-		__func__, offset, size, dirty_seq);
+	hiomap_trace("Start to mark flash dirty at pos %llu size %llu bytes ipmi seq=%i\n",
+		offset, size, dirty_seq);
 
 	return 0;
 }
@@ -753,7 +749,7 @@  static int hiomap_flush(struct ipmi_hiomap *ctx)
 	unlock(&ctx->lock);
 
 	if (state != write_window) {
-		prlog(PR_NOTICE, "%s failed: state=%i\n", __func__, state);
+		hiomap_notice("failed: state=%i\n", state);
 		return FLASH_ERR_PARM_ERROR;
 	}
 
@@ -767,11 +763,11 @@  static int hiomap_flush(struct ipmi_hiomap *ctx)
 	rc = hiomap_queue_msg(ctx, msg);
 
 	if (rc) {
-		prlog(PR_NOTICE, "%s flush queue msg failed: rc=%d\n", __func__, rc);
+		hiomap_notice("flush queue msg failed: rc=%d\n", rc);
 		return rc;
 	}
 
-	prlog(PR_TRACE, "%s Start to flush writes ipmi seq=%i\n", __func__, flush_seq);
+	hiomap_trace("Start to flush writes ipmi seq=%i\n", flush_seq);
 
 	return 0;
 }
@@ -807,24 +803,24 @@  static int hiomap_ack(struct ipmi_hiomap *ctx, uint8_t ack)
 		         bmc_platform->sw->ipmi_oem_hiomap_cmd,
 			 ipmi_hiomap_cmd_cb, &ack_res, req, sizeof(req), 2);
 
-	prlog(PR_TRACE, "%s SENDING req[1]=%i\n", __func__, req[1]);
+	hiomap_trace("SENDING req[1]=%i\n", req[1]);
 	rc = hiomap_queue_msg(ctx, msg);
 	lock(&ctx->lock);
 	ctx->bl.flags = orig_flags;
 	unlock(&ctx->lock);
 	if (rc) {
-		prlog(PR_NOTICE, "%s queue msg failed: rc=%d\n", __func__, rc);
+		hiomap_notice("queue msg failed: rc=%d\n", rc);
 		return rc;
 	}
 
 	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
 	if (rc) {
-		prlog(PR_TRACE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-			__func__, rc, IPMI_ACK_DEFAULT);
+		hiomap_trace("hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			rc, IPMI_ACK_DEFAULT);
 		return rc;
 	}
 
-	prlog(PR_NOTICE, "%s Acked events: 0x%x\n", __func__, ack);
+	hiomap_notice("Acked events: 0x%x\n", ack);
 
 	return 0;
 }
@@ -851,7 +847,7 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 	unlock(&ctx->lock);
 
 	if (state != write_window) {
-		prlog(PR_NOTICE, "%s failed: state=%i\n", __func__, state);
+		hiomap_notice("failed: state=%i\n", state);
 		return FLASH_ERR_PARM_ERROR;
 	}
 
@@ -870,12 +866,11 @@  static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset,
 	rc = hiomap_queue_msg(ctx, msg);
 
 	if (rc) {
-		prlog(PR_NOTICE, "%s erase queue msg failed: rc=%d\n", __func__, rc);
+		hiomap_notice("erase queue msg failed: rc=%d\n", rc);
 		return rc;
 	}
 
-	prlog(PR_TRACE, "%s Erasing flash at pos %llu for size %llu\n",
-		__func__,
+	hiomap_trace("Erasing flash at pos %llu for size %llu\n",
 		offset, size);
 
 	return 0;
@@ -891,7 +886,7 @@  static bool hiomap_reset(struct ipmi_hiomap *ctx)
 	int tmp_sync_flags;
 	int rc;
 
-	prlog(PR_NOTICE, "%s Reset ENTRY\n", __func__);
+	hiomap_notice("Reset ENTRY\n");
 	reset_res.ctx = ctx;
 	reset_res.cc = -1;
 
@@ -917,19 +912,19 @@  static bool hiomap_reset(struct ipmi_hiomap *ctx)
 	unlock(&ctx->lock);
 
 	if (rc) {
-		prlog(PR_NOTICE, "%s reset queue msg failed: rc=%d\n", __func__, rc);
+		hiomap_notice("reset queue msg failed: rc=%d\n", rc);
 		return false;
 	}
 
 	rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_ACK_DEFAULT);
 
 	if (rc) {
-		prlog(PR_NOTICE, "%s hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-			__func__, rc, IPMI_ACK_DEFAULT);
+		hiomap_notice("hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+			rc, IPMI_ACK_DEFAULT);
 		return false;
 	}
 
-	prlog(PR_NOTICE, "%s Reset EXIT\n", __func__);
+	hiomap_notice("Reset EXIT\n");
 	return true;
 }
 
@@ -937,15 +932,13 @@  static void hiomap_event(uint8_t events, void *context)
 {
 	struct ipmi_hiomap *ctx = context;
 
-	prlog(PR_NOTICE, "%s Received events: 0x%x ctx->bmc_state=%i\n",
-		__func__,
+	hiomap_notice("Received events: 0x%x ctx->bmc_state=%i\n",
 		events,
 		ctx->bmc_state);
 
 	lock(&ctx->lock);
 	ctx->bmc_state = events | (ctx->bmc_state & HIOMAP_E_ACK_MASK);
-	prlog(PR_NOTICE, "%s Updated bmc_state Received events: 0x%x ctx->bmc_state=%i\n",
-		__func__,
+	hiomap_notice("Updated bmc_state Received events: 0x%x ctx->bmc_state=%i\n",
 		events,
 		ctx->bmc_state);
 	unlock(&ctx->lock);
@@ -960,7 +953,7 @@  static int lpc_window_read(struct ipmi_hiomap *ctx, uint32_t pos,
 	if ((ctx->current.lpc_addr + ctx->current.size) < (off + len))
 		return FLASH_ERR_PARM_ERROR;
 
-	prlog(PR_TRACE, "LPC Reading at 0x%08x for 0x%08x offset: 0x%08x\n",
+	hiomap_trace("LPC Reading at 0x%08x for 0x%08x offset: 0x%08x\n",
 	      pos, len, off);
 
 	while(len) {
@@ -980,7 +973,7 @@  static int lpc_window_read(struct ipmi_hiomap *ctx, uint32_t pos,
 			chunk = 1;
 		}
 		if (rc) {
-			prlog(PR_ERR, "lpc_read failure %d to FW 0x%08x\n", rc, off);
+			hiomap_error("lpc_read failure %d to FW 0x%08x\n", rc, off);
 			return rc;
 		}
 		len -= chunk;
@@ -1008,7 +1001,7 @@  static int lpc_window_write(struct ipmi_hiomap *ctx, uint32_t pos,
 	if ((ctx->current.lpc_addr + ctx->current.size) < (off + len))
 		return FLASH_ERR_PARM_ERROR;
 
-	prlog(PR_TRACE, "LPC Writing at 0x%08x for 0x%08x offset: 0x%08x\n",
+	hiomap_trace("LPC Writing at 0x%08x for 0x%08x offset: 0x%08x\n",
 	      pos, len, off);
 
 	while(len) {
@@ -1024,7 +1017,7 @@  static int lpc_window_write(struct ipmi_hiomap *ctx, uint32_t pos,
 			chunk = 1;
 		}
 		if (rc) {
-			prlog(PR_ERR, "%s failure %d to FW 0x%08x\n", __func__, rc, off);
+			hiomap_error("failure %d to FW 0x%08x\n", rc, off);
 			return rc;
 		}
 		len -= chunk;
@@ -1044,7 +1037,7 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	lock(&ctx->lock);
 
 	status = ctx->bmc_state;
-	prlog(PR_TRACE, "%s status=%i\n", __func__, status);
+	hiomap_trace("status=%i\n", status);
 
 	/*
 	 * Immediately clear the ackable events to make sure we don't race to
@@ -1068,16 +1061,16 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	 * recovered.
 	 */
 	if (status & HIOMAP_E_PROTOCOL_RESET) {
-		prlog(PR_TRACE, "%s status=HIOMAP_E_PROTOCOL_RESET\n", __func__);
+		hiomap_trace("status=HIOMAP_E_PROTOCOL_RESET\n");
 	}
 
 	if (status & HIOMAP_E_WINDOW_RESET) {
-		prlog(PR_TRACE, "%s status=HIOMAP_E_WINDOW_RESET\n", __func__);
+		hiomap_trace("status=HIOMAP_E_WINDOW_RESET\n");
 	}
 
 	if (status & (HIOMAP_E_PROTOCOL_RESET | HIOMAP_E_WINDOW_RESET)) {
 		ctx->window_state = closed_window;
-		prlog(PR_TRACE, "%s closed_window\n", __func__);
+		hiomap_trace("closed_window\n");
 	}
 
 	unlock(&ctx->lock);
@@ -1096,32 +1089,32 @@  static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx)
 	 * the BMC's cache must be valid if opening the window is successful.
 	 */
 	if (status & HIOMAP_E_ACK_MASK) {
-		prlog(PR_TRACE, "%s status=%i HIOMAP_E_ACK_MASK so TRY to ACK\n", __func__, status);
+		hiomap_trace("status=%i HIOMAP_E_ACK_MASK so TRY to ACK\n", status);
 		/* ACK is unversioned, can send it if the daemon is ready */
 		rc = hiomap_ack(ctx, status & HIOMAP_E_ACK_MASK);
 		if (rc) {
-			prlog(PR_NOTICE, "%s Failed to ack events rc=%i: status & HIOMAP_E_ACK_MASK=0x%x status=%i\n",
-			      __func__, rc, (status & HIOMAP_E_ACK_MASK), status);
+			hiomap_notice("Failed to ack events rc=%i: status & HIOMAP_E_ACK_MASK=0x%x status=%i\n",
+			      rc, (status & HIOMAP_E_ACK_MASK), status);
 			goto restore;
 		}
 	}
 
 	if (status & HIOMAP_E_PROTOCOL_RESET) {
-		prlog(PR_INFO, "%s Protocol was reset\n", __func__);
+		hiomap_info("Protocol was reset\n");
 
 		rc = hiomap_get_info(ctx);
 		if (rc) {
-			prerror("%s Failure to renegotiate after protocol reset\n", __func__);
+			hiomap_error("Failure to renegotiate after protocol reset\n");
 			goto restore;
 		}
 
 		rc = hiomap_get_flash_info(ctx);
 		if (rc) {
-			prerror("Failure to fetch flash info after protocol reset\n");
+			hiomap_error("Failure to fetch flash info after protocol reset\n");
 			goto restore;
 		}
 
-		prlog(PR_INFO, "%s Restored state after protocol reset\n", __func__);
+		hiomap_info("Restored state after protocol reset\n");
 	}
 
 	/*
@@ -1139,9 +1132,9 @@  restore:
 	 * than necessary, but never less than necessary.
 	 */
 	lock(&ctx->lock);
-	prlog(PR_TRACE, "%s PRE restore status=%i PRE ctx->bmc_state=%i rc=%i\n", __func__, status, ctx->bmc_state, rc);
+	hiomap_trace("PRE restore status=%i PRE ctx->bmc_state=%i rc=%i\n", status, ctx->bmc_state, rc);
 	ctx->bmc_state |= (status & HIOMAP_E_ACK_MASK);
-	prlog(PR_TRACE, "%s POST restored status=%i POST ctx->bmc_state=%i rc=%i\n", __func__, status, ctx->bmc_state, rc);
+	hiomap_trace("POST restored status=%i POST ctx->bmc_state=%i rc=%i\n", status, ctx->bmc_state, rc);
 	unlock(&ctx->lock);
 
 	return rc;
@@ -1165,7 +1158,7 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc) {
-		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
+		hiomap_notice("ipmi_hiomap_handle_events failed: rc=%d\n", rc);
 		goto out;
 	}
 
@@ -1181,7 +1174,7 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 	}
 	unlock(&ctx->lock);
 
-	prlog(PR_TRACE, "Flash READ at pos %llu for %llu bytes\n", pos, len);
+	hiomap_trace("Flash READ at pos %llu for %llu bytes\n", pos, len);
 	while (len > 0) {
 		lock(&ctx->lock);
 		state = ctx->window_state;
@@ -1191,15 +1184,15 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 			rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_READ_WINDOW, pos,
 				len, &size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_window_move failed: rc=%d\n",
-					__func__, rc);
+				hiomap_notice("hiomap_window_move failed: rc=%d\n",
+					rc);
 				goto out;
 			}
 		} else {
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_HIOMAP_TICKS_DEFAULT);
 			if (rc) {
-				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_HIOMAP_TICKS_DEFAULT);
+				hiomap_trace("move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_HIOMAP_TICKS_DEFAULT);
 				goto out;
 			}
 		}
@@ -1214,14 +1207,14 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 			 * just double-checking
 			 */
 			if (ctx->cc != IPMI_CC_NO_ERROR) {
-				prlog(PR_NOTICE, "%s failed: cc=%d\n", __func__, ctx->cc);
+				hiomap_notice("failed: cc=%d\n", ctx->cc);
 				rc = OPAL_HARDWARE;
 				goto out;
 			}
 			/* Perform the read for this window */
 			rc = lpc_window_read(ctx, pos, buf, size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s lpc_window_read failed: rc=%d\n", __func__, rc);
+				hiomap_notice("lpc_window_read failed: rc=%d\n", rc);
 				goto out;
 			}
 
@@ -1230,7 +1223,7 @@  static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos,
 			rc = hiomap_window_valid(ctx, pos, size);
 			unlock(&ctx->lock);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_window_valid failed: rc=%d\n", __func__, rc);
+				hiomap_notice("hiomap_window_valid failed: rc=%d\n", rc);
 				goto out;
 			}
 
@@ -1266,7 +1259,7 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc) {
-		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
+		hiomap_notice("ipmi_hiomap_handle_events failed: rc=%d\n", rc);
 		goto out;
 	}
 
@@ -1282,7 +1275,7 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 	}
 	unlock(&ctx->lock);
 
-	prlog(PR_TRACE, "Flash WRITE at pos %llu for %llu bytes\n", pos, len);
+	hiomap_trace("Flash WRITE at pos %llu for %llu bytes\n", pos, len);
 	while (len > 0) {
 		lock(&ctx->lock);
 		state = ctx->window_state;
@@ -1292,15 +1285,15 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 			rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos,
 					        len, &size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_window_move failed: rc=%d\n",
-					__func__, rc);
+				hiomap_notice("hiomap_window_move failed: rc=%d\n",
+					rc);
 				goto out;
 			}
 		} else {
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
 			if (rc) {
-				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_LONG_TICKS);
+				hiomap_trace("move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_LONG_TICKS);
 				goto out;
 			}
 		}
@@ -1311,7 +1304,7 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 
 		if (state == write_window) {
 			if (ctx->cc != IPMI_CC_NO_ERROR) {
-				prlog(PR_NOTICE, "%s failed: cc=%d\n", __func__, ctx->cc);
+				hiomap_notice("failed: cc=%d\n", ctx->cc);
 				rc = OPAL_HARDWARE;
 				goto out;
 			}
@@ -1319,19 +1312,19 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 			/* Perform the write for this window */
 			rc = lpc_window_write(ctx, pos, buf, size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s lpc_window_write failed: rc=%d\n", __func__, rc);
+				hiomap_notice("lpc_window_write failed: rc=%d\n", rc);
 				goto out;
 			}
 
 			rc = hiomap_mark_dirty(ctx, pos, size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_mark_dirty failed: rc=%d\n", __func__, rc);
+				hiomap_notice("hiomap_mark_dirty failed: rc=%d\n", rc);
 				goto out;
 			}
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
 			if (rc) {
-				prlog(PR_TRACE, "%s dirty hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_LONG_TICKS);
+				hiomap_trace("dirty hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_LONG_TICKS);
 				goto out;
 			}
 
@@ -1343,13 +1336,13 @@  static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos,
 			 */
 			rc = hiomap_flush(ctx);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_flush failed: rc=%d\n", __func__, rc);
+				hiomap_notice("hiomap_flush failed: rc=%d\n", rc);
 				goto out;
 			}
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
 			if (rc) {
-				prlog(PR_TRACE, "%s flush hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_LONG_TICKS);
+				hiomap_trace("flush hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_LONG_TICKS);
 				goto out;
 			}
 
@@ -1385,7 +1378,7 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc) {
-		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
+		hiomap_notice("ipmi_hiomap_handle_events failed: rc=%d\n", rc);
 		goto out;
 	}
 
@@ -1400,7 +1393,7 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 	}
 	unlock(&ctx->lock);
 
-	prlog(PR_TRACE, "Flash ERASE at pos %llu for %llu bytes\n", pos, len);
+	hiomap_trace("Flash ERASE at pos %llu for %llu bytes\n", pos, len);
 
 	while (len > 0) {
 		lock(&ctx->lock);
@@ -1411,15 +1404,15 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 			rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos,
 					        len, &size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_window_move failed: rc=%d\n",
-					__func__, rc);
+				hiomap_notice("hiomap_window_move failed: rc=%d\n",
+					rc);
 				goto out;
 			}
 		} else {
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
 			if (rc) {
-				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_LONG_TICKS);
+				hiomap_trace("move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_LONG_TICKS);
 				goto out;
 			}
 		}
@@ -1429,19 +1422,19 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 		unlock(&ctx->lock);
 		if (state == write_window) {
 			if (ctx->cc != IPMI_CC_NO_ERROR) {
-				prlog(PR_NOTICE, "%s failed: cc=%d\n", __func__, ctx->cc);
+				hiomap_notice("failed: cc=%d\n", ctx->cc);
 				rc = OPAL_HARDWARE;
 				goto out;
 			}
 			rc = hiomap_erase(ctx, pos, size);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_erase failed: rc=%d\n", __func__, rc);
+				hiomap_notice("hiomap_erase failed: rc=%d\n", rc);
 				goto out;
 			}
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
 			if (rc) {
-				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_LONG_TICKS);
+				hiomap_trace("move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_LONG_TICKS);
 				goto out;
 			}
 
@@ -1451,13 +1444,13 @@  static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos,
 			 */
 			rc = hiomap_flush(ctx);
 			if (rc) {
-				prlog(PR_NOTICE, "%s hiomap_flush failed: rc=%d\n", __func__, rc);
+				hiomap_notice("hiomap_flush failed: rc=%d\n", rc);
 				goto out;
 			}
 			rc = hiomap_wait_for_cc(ctx, &ctx->cc, &ctx->inflight_seq, IPMI_LONG_TICKS);
 			if (rc) {
-				prlog(PR_TRACE, "%s move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
-					__func__, rc, IPMI_LONG_TICKS);
+				hiomap_trace("move hiomap_wait_for_cc failed: rc=%d ticks=%i\n",
+					rc, IPMI_LONG_TICKS);
 				goto out;
 			}
 
@@ -1486,13 +1479,13 @@  static int ipmi_hiomap_get_flash_info(struct blocklevel_device *bl,
 
 	rc = ipmi_hiomap_handle_events(ctx);
 	if (rc) {
-		prlog(PR_NOTICE, "%s ipmi_hiomap_handle_events failed: rc=%d\n", __func__, rc);
+		hiomap_notice("ipmi_hiomap_handle_events failed: rc=%d\n", rc);
 		return rc;
 	}
 
 	rc = hiomap_get_flash_info(ctx);
 	if (rc) {
-		prlog(PR_NOTICE, "%s hiomap_get_flash failed: rc=%d\n", __func__, rc);
+		hiomap_notice("hiomap_get_flash_info failed: rc=%d\n", rc);
 		return rc;
 	}
 
@@ -1541,37 +1534,37 @@  int ipmi_hiomap_init(struct blocklevel_device **bl)
 	/* Ack all pending ack-able events to avoid spurious failures */
 	rc = hiomap_ack(ctx, HIOMAP_E_ACK_MASK);
 	if (rc) {
-		prlog(PR_NOTICE, "%s Failed to ack events: 0x%x\n",
-		      __func__, HIOMAP_E_ACK_MASK);
+		hiomap_notice("Failed to ack events: 0x%x\n",
+		      HIOMAP_E_ACK_MASK);
 		goto err;
 	}
 
 	rc = ipmi_sel_register(CMD_OP_HIOMAP_EVENT, hiomap_event, ctx);
 	if (rc < 0) {
-		prerror("%s Failed ipmi_sel_register: %d\n", __func__, rc);
+		hiomap_error("Failed ipmi_sel_register: %d\n", rc);
 		goto err;
 	}
 
 	/* Negotiate protocol behaviour */
 	rc = hiomap_get_info(ctx);
 	if (rc) {
-		prerror("%s Failed to get hiomap parameters: %d\n", __func__, rc);
+		hiomap_error("Failed to get hiomap parameters: %d\n", rc);
 		goto err;
 	}
 
 	/* Grab the flash parameters */
 	rc = hiomap_get_flash_info(ctx);
 	if (rc) {
-		prerror("%s Failed to get flash parameters: %d\n", __func__, rc);
+		hiomap_error("Failed to get flash parameters: %d\n", rc);
 		goto err;
 	}
 
-	prlog(PR_NOTICE, "Negotiated hiomap protocol v%u\n", ctx->version);
-	prlog(PR_NOTICE, "Block size is %uKiB\n",
+	hiomap_notice("Negotiated hiomap protocol v%u\n", ctx->version);
+	hiomap_notice("Block size is %uKiB\n",
 	      1 << (ctx->block_size_shift - 10));
-	prlog(PR_NOTICE, "BMC suggested flash timeout of %us\n", ctx->timeout);
-	prlog(PR_NOTICE, "Flash size is %uMiB\n", ctx->total_size >> 20);
-	prlog(PR_NOTICE, "Erase granule size is %uKiB\n",
+	hiomap_notice("BMC suggested flash timeout of %us\n", ctx->timeout);
+	hiomap_notice("Flash size is %uMiB\n", ctx->total_size >> 20);
+	hiomap_notice("Erase granule size is %uKiB\n",
 	      ctx->erase_granule >> 10);
 
 	ctx->bl.keep_alive = 0;