@@ -41,6 +41,9 @@ struct sbi_ipi_event_ops {
* Update callback to save/enqueue data for remote HART
* Note: This is an optional callback and it is called just before
* triggering IPI to remote HART.
+ * @return 0 success
+ * @return -1 break IPI, done on local hart
+ * @return -2 need retry
*/
int (* update)(struct sbi_scratch *scratch,
struct sbi_scratch *remote_scratch,
@@ -53,7 +53,7 @@ static int sbi_ipi_update(struct sbi_scratch *scratch, u32 remote_hartid,
if (ipi_ops->update) {
ret = ipi_ops->update(scratch, remote_scratch,
remote_hartid, data);
- if (ret < 0)
+ if (ret != 0)
return ret;
}
@@ -94,51 +94,50 @@ static int sbi_ipi_sync(struct sbi_scratch *scratch, u32 event)
*/
int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data)
{
- int rc;
- ulong i, m, n;
+ int rc, done;
+ ulong i, m;
+ struct sbi_hartmask target_mask = {0};
+ struct sbi_hartmask retry_mask = {0};
struct sbi_domain *dom = sbi_domain_thishart_ptr();
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+ /* find target harts */
if (hbase != -1UL) {
rc = sbi_hsm_hart_interruptible_mask(dom, hbase, &m);
if (rc)
return rc;
m &= hmask;
- n = m;
- /* update IPIs */
for (i = hbase; m; i++, m >>= 1) {
if (m & 1UL)
- sbi_ipi_update(scratch, i, event, data);
- }
-
- /* sync IPIs */
- m = n;
- for (i = hbase; m; i++, m >>= 1) {
- if (m & 1UL)
- sbi_ipi_sync(scratch, event);
+ sbi_hartmask_set_hart(i, &target_mask);
}
} else {
- /* update IPIs */
hbase = 0;
while (!sbi_hsm_hart_interruptible_mask(dom, hbase, &m)) {
for (i = hbase; m; i++, m >>= 1) {
if (m & 1UL)
- sbi_ipi_update(scratch, i, event, data);
+ sbi_hartmask_set_hart(i, &target_mask);
}
hbase += BITS_PER_LONG;
}
+ }
- /* sync IPIs */
- hbase = 0;
- while (!sbi_hsm_hart_interruptible_mask(dom, hbase, &m)) {
- for (i = hbase; m; i++, m >>= 1) {
- if (m & 1UL)
- sbi_ipi_sync(scratch, event);
- }
- hbase += BITS_PER_LONG;
+ /* update IPIs */
+ retry_mask = target_mask;
+ do {
+ done = true;
+ sbi_hartmask_for_each_hart(i, &retry_mask) {
+ rc = sbi_ipi_update(scratch, i, event, data);
+ if (rc == -2)
+ done = false;
+ else
+ sbi_hartmask_clear_hart(i, &retry_mask);
}
- }
+ } while (!done);
+
+ /* sync IPIs */
+ sbi_ipi_sync(scratch, event);
return 0;
}
@@ -364,7 +364,7 @@ static int tlb_update(struct sbi_scratch *scratch,
ret = sbi_fifo_inplace_update(tlb_fifo_r, data, tlb_update_cb);
- while (ret == SBI_FIFO_UNCHANGED && sbi_fifo_enqueue(tlb_fifo_r, data) < 0) {
+ if (ret == SBI_FIFO_UNCHANGED && sbi_fifo_enqueue(tlb_fifo_r, data) < 0) {
/**
* For now, Busy loop until there is space in the fifo.
* There may be case where target hart is also
@@ -376,6 +376,7 @@ static int tlb_update(struct sbi_scratch *scratch,
tlb_process_once(scratch);
sbi_dprintf("hart%d: hart%d tlb fifo full\n",
curr_hartid, remote_hartid);
+ return -2;
}
tlb_sync = sbi_scratch_offset_ptr(scratch, tlb_sync_off);
When tlb_fifo is full, it will wait and affect the ipi update to other harts. This patch is optimized. Signed-off-by: Xiang W <wxjstz@126.com> --- include/sbi/sbi_ipi.h | 3 +++ lib/sbi/sbi_ipi.c | 47 +++++++++++++++++++++---------------------- lib/sbi/sbi_tlb.c | 3 ++- 3 files changed, 28 insertions(+), 25 deletions(-)