diff mbox series

[v3,06/10] mtd: intel-dg: wake card on operations

Message ID 20241119140112.790720-7-alexander.usyskin@intel.com
State New
Headers show
Series [v3,01/10] mtd: add driver for intel graphics non-volatile memory device | expand

Commit Message

Usyskin, Alexander Nov. 19, 2024, 2:01 p.m. UTC
Enable runtime PM in mtd driver to notify graphics driver that
whole card should be kept awake while nvm operations are
performed through this driver.

CC: Lucas De Marchi <lucas.demarchi@intel.com>
Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
---
 drivers/mtd/devices/mtd-intel-dg.c | 70 +++++++++++++++++++++++++-----
 1 file changed, 58 insertions(+), 12 deletions(-)

Comments

Rodrigo Vivi Dec. 17, 2024, 10:49 p.m. UTC | #1
On Tue, Nov 19, 2024 at 04:01:08PM +0200, Alexander Usyskin wrote:
> Enable runtime PM in mtd driver to notify graphics driver that
> whole card should be kept awake while nvm operations are
> performed through this driver.
> 
> CC: Lucas De Marchi <lucas.demarchi@intel.com>
> Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
> Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
> ---
>  drivers/mtd/devices/mtd-intel-dg.c | 70 +++++++++++++++++++++++++-----
>  1 file changed, 58 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c
> index 230bf444b7fe..9dd23b11ee95 100644
> --- a/drivers/mtd/devices/mtd-intel-dg.c
> +++ b/drivers/mtd/devices/mtd-intel-dg.c
> @@ -15,11 +15,14 @@
>  #include <linux/module.h>
>  #include <linux/mtd/mtd.h>
>  #include <linux/mtd/partitions.h>
> +#include <linux/pm_runtime.h>
>  #include <linux/string.h>
>  #include <linux/slab.h>
>  #include <linux/sizes.h>
>  #include <linux/types.h>
>  
> +#define INTEL_DG_NVM_RPM_TIMEOUT 500
> +
>  struct intel_dg_nvm {
>  	struct kref refcnt;
>  	struct mtd_info mtd;
> @@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>  	loff_t from;
>  	size_t len;
>  	size_t total_len;
> +	int ret = 0;
>  
>  	if (WARN_ON(!nvm))
>  		return -EINVAL;
> @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>  	total_len = info->len;
>  	addr = info->addr;
>  
> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);

on this, I really don't believe this is right and we should use
the parent child relation ship in our favor and only have the mtd
device to handle their own runtime pm... 

> +	if (ret < 0) {
> +		dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
> +		return ret;
> +	}
> +
>  	guard(mutex)(&nvm->lock);
>  
>  	while (total_len > 0) {
>  		if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
>  			dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
>  			info->fail_addr = addr;
> -			return -ERANGE;
> +			ret = -ERANGE;
> +			goto out;
>  		}
>  
>  		idx = idg_nvm_get_region(nvm, addr);
>  		if (idx >= nvm->nregions) {
>  			dev_err(&mtd->dev, "out of range");
>  			info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
> -			return -ERANGE;
> +			ret = -ERANGE;
> +			goto out;
>  		}
>  
>  		from = addr - nvm->regions[idx].offset;
> @@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>  		if (bytes < 0) {
>  			dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
>  			info->fail_addr += nvm->regions[idx].offset;
> -			return bytes;
> +			ret = bytes;
> +			goto out;
>  		}
>  
>  		addr += len;
>  		total_len -= len;
>  	}
>  
> -	return 0;
> +out:
> +	pm_runtime_mark_last_busy(mtd->dev.parent);
> +	pm_runtime_put_autosuspend(mtd->dev.parent);
> +	return ret;
>  }
>  
>  static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
> @@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
>  	if (len > nvm->regions[idx].size - from)
>  		len = nvm->regions[idx].size - from;
>  
> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
> +	if (ret < 0) {
> +		dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
> +		return ret;
> +	}
> +
>  	guard(mutex)(&nvm->lock);
>  
>  	ret = idg_read(nvm, region, from, len, buf);
>  	if (ret < 0) {
>  		dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
> -		return ret;
> +	} else {
> +		*retlen = ret;
> +		ret = 0;
>  	}
>  
> -	*retlen = ret;
> -
> -	return 0;
> +	pm_runtime_mark_last_busy(mtd->dev.parent);
> +	pm_runtime_put_autosuspend(mtd->dev.parent);
> +	return ret;
>  }
>  
>  static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> @@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>  	if (len > nvm->regions[idx].size - to)
>  		len = nvm->regions[idx].size - to;
>  
> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
> +	if (ret < 0) {
> +		dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
> +		return ret;
> +	}
> +
>  	guard(mutex)(&nvm->lock);
>  
>  	ret = idg_write(nvm, region, to, len, buf);
>  	if (ret < 0) {
>  		dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
> -		return ret;
> +	} else {
> +		*retlen = ret;
> +		ret = 0;
>  	}
>  
> -	*retlen = ret;
> -
> -	return 0;
> +	pm_runtime_mark_last_busy(mtd->dev.parent);
> +	pm_runtime_put_autosuspend(mtd->dev.parent);
> +	return ret;
>  }
>  
>  static void intel_dg_nvm_release(struct kref *kref)
> @@ -720,6 +752,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>  		n++;
>  	}
>  
> +	devm_pm_runtime_enable(device);
> +
> +	pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
> +	pm_runtime_use_autosuspend(device);
> +
> +	ret = pm_runtime_resume_and_get(device);
> +	if (ret < 0) {
> +		dev_err(device, "rpm: get failed %d\n", ret);
> +		goto err_norpm;
> +	}
> +
>  	nvm->base = devm_ioremap_resource(device, &invm->bar);
>  	if (IS_ERR(nvm->base)) {
>  		dev_err(device, "mmio not mapped\n");
> @@ -742,9 +785,12 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>  
>  	dev_set_drvdata(&aux_dev->dev, nvm);
>  
> +	pm_runtime_put(device);
>  	return 0;
>  
>  err:
> +	pm_runtime_put(device);
> +err_norpm:
>  	kref_put(&nvm->refcnt, intel_dg_nvm_release);
>  	return ret;
>  }
> -- 
> 2.43.0
>
Poosa, Karthik Dec. 18, 2024, 5:13 a.m. UTC | #2
On 18-12-2024 04:19, Rodrigo Vivi wrote:
> On Tue, Nov 19, 2024 at 04:01:08PM +0200, Alexander Usyskin wrote:
>> Enable runtime PM in mtd driver to notify graphics driver that
>> whole card should be kept awake while nvm operations are
>> performed through this driver.
>>
>> CC: Lucas De Marchi <lucas.demarchi@intel.com>
>> Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
>> Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
>> ---
>>   drivers/mtd/devices/mtd-intel-dg.c | 70 +++++++++++++++++++++++++-----
>>   1 file changed, 58 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c
>> index 230bf444b7fe..9dd23b11ee95 100644
>> --- a/drivers/mtd/devices/mtd-intel-dg.c
>> +++ b/drivers/mtd/devices/mtd-intel-dg.c
>> @@ -15,11 +15,14 @@
>>   #include <linux/module.h>
>>   #include <linux/mtd/mtd.h>
>>   #include <linux/mtd/partitions.h>
>> +#include <linux/pm_runtime.h>
>>   #include <linux/string.h>
>>   #include <linux/slab.h>
>>   #include <linux/sizes.h>
>>   #include <linux/types.h>
>>   
>> +#define INTEL_DG_NVM_RPM_TIMEOUT 500
>> +
>>   struct intel_dg_nvm {
>>   	struct kref refcnt;
>>   	struct mtd_info mtd;
>> @@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>>   	loff_t from;
>>   	size_t len;
>>   	size_t total_len;
>> +	int ret = 0;
>>   
>>   	if (WARN_ON(!nvm))
>>   		return -EINVAL;
>> @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>>   	total_len = info->len;
>>   	addr = info->addr;
>>   
>> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
> on this, I really don't believe this is right and we should use
> the parent child relation ship in our favor and only have the mtd
> device to handle their own runtime pm...
I concur with Rodrigo. If the parent-child relationship is preserved, 
the parent will resume before the child, eliminating the need to 
explicitly wake the parent.
Please refer to https://docs.kernel.org/driver-api/pm/devices.html

The ordering of the device hierarchy is defined by the order in which 
devices get registered:
"a child can never be registered, probed or
resumed before its parent; "
and can’t be removed or suspended after that parent.
>
>> +	if (ret < 0) {
>> +		dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
>> +		return ret;
>> +	}
>> +
>>   	guard(mutex)(&nvm->lock);
>>   
>>   	while (total_len > 0) {
>>   		if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
>>   			dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
>>   			info->fail_addr = addr;
>> -			return -ERANGE;
>> +			ret = -ERANGE;
>> +			goto out;
>>   		}
>>   
>>   		idx = idg_nvm_get_region(nvm, addr);
>>   		if (idx >= nvm->nregions) {
>>   			dev_err(&mtd->dev, "out of range");
>>   			info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
>> -			return -ERANGE;
>> +			ret = -ERANGE;
>> +			goto out;
>>   		}
>>   
>>   		from = addr - nvm->regions[idx].offset;
>> @@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>>   		if (bytes < 0) {
>>   			dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
>>   			info->fail_addr += nvm->regions[idx].offset;
>> -			return bytes;
>> +			ret = bytes;
>> +			goto out;
>>   		}
>>   
>>   		addr += len;
>>   		total_len -= len;
>>   	}
>>   
>> -	return 0;
>> +out:
>> +	pm_runtime_mark_last_busy(mtd->dev.parent);
>> +	pm_runtime_put_autosuspend(mtd->dev.parent);
>> +	return ret;
>>   }
>>   
>>   static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
>> @@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
>>   	if (len > nvm->regions[idx].size - from)
>>   		len = nvm->regions[idx].size - from;
>>   
>> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
>> +	if (ret < 0) {
>> +		dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
>> +		return ret;
>> +	}
>> +
>>   	guard(mutex)(&nvm->lock);
>>   
>>   	ret = idg_read(nvm, region, from, len, buf);
>>   	if (ret < 0) {
>>   		dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
>> -		return ret;
>> +	} else {
>> +		*retlen = ret;
>> +		ret = 0;
>>   	}
>>   
>> -	*retlen = ret;
>> -
>> -	return 0;
>> +	pm_runtime_mark_last_busy(mtd->dev.parent);
>> +	pm_runtime_put_autosuspend(mtd->dev.parent);
>> +	return ret;
>>   }
>>   
>>   static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>> @@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>>   	if (len > nvm->regions[idx].size - to)
>>   		len = nvm->regions[idx].size - to;
>>   
>> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
>> +	if (ret < 0) {
>> +		dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
>> +		return ret;
>> +	}
>> +
>>   	guard(mutex)(&nvm->lock);
>>   
>>   	ret = idg_write(nvm, region, to, len, buf);
>>   	if (ret < 0) {
>>   		dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
>> -		return ret;
>> +	} else {
>> +		*retlen = ret;
>> +		ret = 0;
>>   	}
>>   
>> -	*retlen = ret;
>> -
>> -	return 0;
>> +	pm_runtime_mark_last_busy(mtd->dev.parent);
>> +	pm_runtime_put_autosuspend(mtd->dev.parent);
>> +	return ret;
>>   }
>>   
>>   static void intel_dg_nvm_release(struct kref *kref)
>> @@ -720,6 +752,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>>   		n++;
>>   	}
>>   
>> +	devm_pm_runtime_enable(device);
>> +
>> +	pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
>> +	pm_runtime_use_autosuspend(device);
>> +
>> +	ret = pm_runtime_resume_and_get(device);
>> +	if (ret < 0) {
>> +		dev_err(device, "rpm: get failed %d\n", ret);
>> +		goto err_norpm;
>> +	}
>> +
>>   	nvm->base = devm_ioremap_resource(device, &invm->bar);
>>   	if (IS_ERR(nvm->base)) {
>>   		dev_err(device, "mmio not mapped\n");
>> @@ -742,9 +785,12 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>>   
>>   	dev_set_drvdata(&aux_dev->dev, nvm);
>>   
>> +	pm_runtime_put(device);
>>   	return 0;
>>   
>>   err:
>> +	pm_runtime_put(device);
>> +err_norpm:
>>   	kref_put(&nvm->refcnt, intel_dg_nvm_release);
>>   	return ret;
>>   }
>> -- 
>> 2.43.0
>>
Usyskin, Alexander Dec. 18, 2024, 7:38 a.m. UTC | #3
> >> @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info
> *mtd, struct erase_info *info)
> >>   	total_len = info->len;
> >>   	addr = info->addr;
> >>
> >> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
> > on this, I really don't believe this is right and we should use
> > the parent child relation ship in our favor and only have the mtd
> > device to handle their own runtime pm...
> I concur with Rodrigo. If the parent-child relationship is preserved,
> the parent will resume before the child, eliminating the need to
> explicitly wake the parent.
> Please refer to https://docs.kernel.org/driver-api/pm/devices.html
> 
> The ordering of the device hierarchy is defined by the order in which
> devices get registered:
> "a child can never be registered, probed or
> resumed before its parent; "
> and can’t be removed or suspended after that parent.
> >

If so, I have to add patch for mtd subsystem to always have device for master
initialized regardless of kernel flag.
Only to initialize struct device, not to create full mtd node.

Miquel - are you agree to this?

- - 
Thanks,
Sasha
Usyskin, Alexander Dec. 18, 2024, 3:58 p.m. UTC | #4
> > >> @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info
> > *mtd, struct erase_info *info)
> > >>   	total_len = info->len;
> > >>   	addr = info->addr;
> > >>
> > >> +	ret = pm_runtime_resume_and_get(mtd->dev.parent);
> > > on this, I really don't believe this is right and we should use
> > > the parent child relation ship in our favor and only have the mtd
> > > device to handle their own runtime pm...
> > I concur with Rodrigo. If the parent-child relationship is preserved,
> > the parent will resume before the child, eliminating the need to
> > explicitly wake the parent.
> > Please refer to https://docs.kernel.org/driver-api/pm/devices.html
> >
> > The ordering of the device hierarchy is defined by the order in which
> > devices get registered:
> > "a child can never be registered, probed or
> > resumed before its parent; "
> > and can’t be removed or suspended after that parent.
> > >
> 
> If so, I have to add patch for mtd subsystem to always have device for master
> initialized regardless of kernel flag.
> Only to initialize struct device, not to create full mtd node.
> 
> Miquel - are you agree to this?

I've looked deeply in the mtd code and there is some interesting discrepancy:
- the mtd partition creates device and puts parent of parent in its parent pointer if master does not exist
- the callbacks, like _write/_read/_erase receive master object pointer
Thus, we can't use good partition device for power management...

Maybe rewrite these callbacks to receive actual partition (huge change all over)?

> 
> - -
> Thanks,
> Sasha
>
Miquel Raynal Dec. 23, 2024, 7:21 p.m. UTC | #5
Hello Alexander,

>> If so, I have to add patch for mtd subsystem to always have device for master
>> initialized regardless of kernel flag.
>> Only to initialize struct device, not to create full mtd node.
>> 
>> Miquel - are you agree to this?

Conceptually yes, but please mind one thing: we do not break
userspace. So if you want to keep the master mtd device, fine, but you
need to do it in a consistent way so that people not enabling the kernel
flag won't get a new device in their rootfs, shifting all indexes
upwards.

That being said, you are probably going in the right direction by doing
that.

Thanks,
Miquèl
Usyskin, Alexander Dec. 29, 2024, 3:08 p.m. UTC | #6
> 
> Hello Alexander,
> 
> >> If so, I have to add patch for mtd subsystem to always have device for
> master
> >> initialized regardless of kernel flag.
> >> Only to initialize struct device, not to create full mtd node.
> >>
> >> Miquel - are you agree to this?
> 
> Conceptually yes, but please mind one thing: we do not break
> userspace. So if you want to keep the master mtd device, fine, but you
> need to do it in a consistent way so that people not enabling the kernel
> flag won't get a new device in their rootfs, shifting all indexes
> upwards.
> 
> That being said, you are probably going in the right direction by doing
> that.
> 
> Thanks,
> Miquèl

I've looked into this endeavour and seemed that there a need for special
device class and careful attention in release flow to use right class.
It will take time to do right.
Miquel, Rodrigo, Karthik, Lucas - may the DG NVM code be merged in the current
form and this device be added later?

- - 
Thanks,
Sasha
Miquel Raynal Dec. 30, 2024, 8:39 a.m. UTC | #7
On 29/12/2024 at 15:08:56 GMT, "Usyskin, Alexander" <alexander.usyskin@intel.com> wrote:

>> 
>> Hello Alexander,
>> 
>> >> If so, I have to add patch for mtd subsystem to always have device for
>> master
>> >> initialized regardless of kernel flag.
>> >> Only to initialize struct device, not to create full mtd node.
>> >>
>> >> Miquel - are you agree to this?
>> 
>> Conceptually yes, but please mind one thing: we do not break
>> userspace. So if you want to keep the master mtd device, fine, but you
>> need to do it in a consistent way so that people not enabling the kernel
>> flag won't get a new device in their rootfs, shifting all indexes
>> upwards.
>> 
>> That being said, you are probably going in the right direction by doing
>> that.
>> 
>> Thanks,
>> Miquèl
>
> I've looked into this endeavour and seemed that there a need for special
> device class and careful attention in release flow to use right class.
> It will take time to do right.
> Miquel, Rodrigo, Karthik, Lucas - may the DG NVM code be merged in the current
> form and this device be added later?

In general, yes. But maybe you want to select
CONFIG_MTD_PARTITIONED_MASTER (IIUC your problem).

Thanks,
Miquèl
Usyskin, Alexander Jan. 1, 2025, 3:54 p.m. UTC | #8
> >>
> >> >> If so, I have to add patch for mtd subsystem to always have device for
> >> master
> >> >> initialized regardless of kernel flag.
> >> >> Only to initialize struct device, not to create full mtd node.
> >> >>
> >> >> Miquel - are you agree to this?
> >>
> >> Conceptually yes, but please mind one thing: we do not break
> >> userspace. So if you want to keep the master mtd device, fine, but you
> >> need to do it in a consistent way so that people not enabling the kernel
> >> flag won't get a new device in their rootfs, shifting all indexes
> >> upwards.
> >>
> >> That being said, you are probably going in the right direction by doing
> >> that.
> >>
> >> Thanks,
> >> Miquèl
> >
> > I've looked into this endeavour and seemed that there a need for special
> > device class and careful attention in release flow to use right class.
> > It will take time to do right.
> > Miquel, Rodrigo, Karthik, Lucas - may the DG NVM code be merged in the
> current
> > form and this device be added later?
> 
> In general, yes. But maybe you want to select
> CONFIG_MTD_PARTITIONED_MASTER (IIUC your problem).
> 
> Thanks,
> Miquèl

Our target is usual distribution kernel, and it never enables CONFIG_MTD_PARTITIONED_MASTER.
Anyway, I've prepared patch that creates master device always and pushed
a new series revision.
Miquel, if you prefer to review and push the master device patch before
the whole series, I can split it out.

- - 
Thanks,
Sasha
diff mbox series

Patch

diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c
index 230bf444b7fe..9dd23b11ee95 100644
--- a/drivers/mtd/devices/mtd-intel-dg.c
+++ b/drivers/mtd/devices/mtd-intel-dg.c
@@ -15,11 +15,14 @@ 
 #include <linux/module.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/pm_runtime.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/sizes.h>
 #include <linux/types.h>
 
+#define INTEL_DG_NVM_RPM_TIMEOUT 500
+
 struct intel_dg_nvm {
 	struct kref refcnt;
 	struct mtd_info mtd;
@@ -460,6 +463,7 @@  static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
 	loff_t from;
 	size_t len;
 	size_t total_len;
+	int ret = 0;
 
 	if (WARN_ON(!nvm))
 		return -EINVAL;
@@ -474,20 +478,28 @@  static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
 	total_len = info->len;
 	addr = info->addr;
 
+	ret = pm_runtime_resume_and_get(mtd->dev.parent);
+	if (ret < 0) {
+		dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
+		return ret;
+	}
+
 	guard(mutex)(&nvm->lock);
 
 	while (total_len > 0) {
 		if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
 			dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
 			info->fail_addr = addr;
-			return -ERANGE;
+			ret = -ERANGE;
+			goto out;
 		}
 
 		idx = idg_nvm_get_region(nvm, addr);
 		if (idx >= nvm->nregions) {
 			dev_err(&mtd->dev, "out of range");
 			info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
-			return -ERANGE;
+			ret = -ERANGE;
+			goto out;
 		}
 
 		from = addr - nvm->regions[idx].offset;
@@ -503,14 +515,18 @@  static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
 		if (bytes < 0) {
 			dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
 			info->fail_addr += nvm->regions[idx].offset;
-			return bytes;
+			ret = bytes;
+			goto out;
 		}
 
 		addr += len;
 		total_len -= len;
 	}
 
-	return 0;
+out:
+	pm_runtime_mark_last_busy(mtd->dev.parent);
+	pm_runtime_put_autosuspend(mtd->dev.parent);
+	return ret;
 }
 
 static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -539,17 +555,25 @@  static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 	if (len > nvm->regions[idx].size - from)
 		len = nvm->regions[idx].size - from;
 
+	ret = pm_runtime_resume_and_get(mtd->dev.parent);
+	if (ret < 0) {
+		dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+		return ret;
+	}
+
 	guard(mutex)(&nvm->lock);
 
 	ret = idg_read(nvm, region, from, len, buf);
 	if (ret < 0) {
 		dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
-		return ret;
+	} else {
+		*retlen = ret;
+		ret = 0;
 	}
 
-	*retlen = ret;
-
-	return 0;
+	pm_runtime_mark_last_busy(mtd->dev.parent);
+	pm_runtime_put_autosuspend(mtd->dev.parent);
+	return ret;
 }
 
 static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -578,17 +602,25 @@  static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
 	if (len > nvm->regions[idx].size - to)
 		len = nvm->regions[idx].size - to;
 
+	ret = pm_runtime_resume_and_get(mtd->dev.parent);
+	if (ret < 0) {
+		dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+		return ret;
+	}
+
 	guard(mutex)(&nvm->lock);
 
 	ret = idg_write(nvm, region, to, len, buf);
 	if (ret < 0) {
 		dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
-		return ret;
+	} else {
+		*retlen = ret;
+		ret = 0;
 	}
 
-	*retlen = ret;
-
-	return 0;
+	pm_runtime_mark_last_busy(mtd->dev.parent);
+	pm_runtime_put_autosuspend(mtd->dev.parent);
+	return ret;
 }
 
 static void intel_dg_nvm_release(struct kref *kref)
@@ -720,6 +752,17 @@  static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
 		n++;
 	}
 
+	devm_pm_runtime_enable(device);
+
+	pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
+	pm_runtime_use_autosuspend(device);
+
+	ret = pm_runtime_resume_and_get(device);
+	if (ret < 0) {
+		dev_err(device, "rpm: get failed %d\n", ret);
+		goto err_norpm;
+	}
+
 	nvm->base = devm_ioremap_resource(device, &invm->bar);
 	if (IS_ERR(nvm->base)) {
 		dev_err(device, "mmio not mapped\n");
@@ -742,9 +785,12 @@  static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
 
 	dev_set_drvdata(&aux_dev->dev, nvm);
 
+	pm_runtime_put(device);
 	return 0;
 
 err:
+	pm_runtime_put(device);
+err_norpm:
 	kref_put(&nvm->refcnt, intel_dg_nvm_release);
 	return ret;
 }