[linux-pm] [PATCH 04/13] PM: QoS: implement the per-device latency constraints

Jean Pihet jean.pihet at newoldbits.com
Tue Aug 2 03:05:17 PDT 2011


Rafael,

2011/7/31 Rafael J. Wysocki <rjw at sisk.pl>:
> On Thursday, July 28, 2011, jean.pihet at newoldbits.com wrote:
>> From: Jean Pihet <j-pihet at ti.com>
>>
>> Re-design the PM QoS implementation to support the per-device
>> constraints:
>
> Well, I guess I should have reviewed this patch before [03/13].
Hmm indeed. The split of patches into API and implementation patches
wakes it rather confusing. I could add a comment in [03/13] about it.

...

>> - Misc fixes to improve code readability:
>>   . rename of fields names (request, list, constraints, class),
>
> Please avoid doing renames along with functional changes.  It makes reviewing
> extremely hard.
Ok I will move the renames in a different patch.

...
>> @@ -97,7 +97,12 @@ void device_pm_add(struct device *dev)
>>                       dev_name(dev->parent));
>>       list_add_tail(&dev->power.entry, &dpm_list);
>>       mutex_unlock(&dpm_list_mtx);
>> -     plist_head_init(&dev->power.latency_constraints, &dev->power.lock);
>> +     plist_head_init(&dev->power.latency_constraints.list, &dev->power.lock);
>> +     dev->power.latency_constraints.target_value =
>> +                                     PM_QOS_DEV_LAT_DEFAULT_VALUE;
>> +     dev->power.latency_constraints.default_value =
>> +                                     PM_QOS_DEV_LAT_DEFAULT_VALUE;
>> +     dev->power.latency_constraints.type = PM_QOS_MIN;
>
> Perhaps add a helper doing these assignments?
This code is later moved into the device insertion and removal
functions. Cf. [05/13].

...
>> @@ -464,7 +465,7 @@ struct dev_pm_info {
>>       unsigned long           accounting_timestamp;
>>       void                    *subsys_data;  /* Owned by the subsystem. */
>>  #endif
>> -     struct plist_head       latency_constraints;
>> +     struct pm_qos_constraints       latency_constraints;
>
> Why don't you simply call it "qos"?  The data type provides the information
> about what it's for now.
Ok

...
>> +struct pm_qos_constraints {
>> +     struct plist_head list;
>> +     /*
>> +      * Do not change target_value to 64 bit in order to guarantee
>> +      * accesses atomicity
>> +      */
>
> The comment doesn't belong here.  Please put it outside of the structure
> definition or after the field name (or both, in which case the "inline"
> one may be shorter, like "must not be 64-bit").
Ok

>
>> +     s32 target_value;
>> +     s32 default_value;
>> +     enum pm_qos_type type;
>> +};
>> +
>> +/*
>> + * Struct that is pre-allocated by the caller.
>> + * The handle is kept for future use (update, removal)
>> + */
>>  struct pm_qos_request {
>> -     struct plist_node list;
>> +     struct plist_node node;
>
> Please avoid doing such things along with functional changes.
Ok

...
>> +enum pm_qos_req_action {
>> +     PM_QOS_ADD_REQ,
>> +     PM_QOS_UPDATE_REQ,
>> +     PM_QOS_REMOVE_REQ
>>  };
>
> A comment describing the meaning of these would be helpful.
Ok

...
>> -static inline int pm_qos_get_value(struct pm_qos_object *o)
>> +static inline int pm_qos_get_value(struct pm_qos_constraints *c)
>
> I'd remove the "inline" if you're at it.  It's only a hint if the kernel
> is not built with "always inline" and the compiler should do the inlining
> anyway if that's a better choice.
Ok

...
>> -static inline s32 pm_qos_read_value(struct pm_qos_object *o)
>> +/*
>> + * pm_qos_read_value atomically reads and returns target_value.
>
> We have a standard for writing kerneldoc comments, please follow it.
Ok

>
>> + * target_value is updated upon update of the constraints list, using
>> + * pm_qos_set_value.
>> + *
>> + * Note: The lockless read path depends on the CPU accessing
>> + * target_value atomically.  Atomic access is only guaranteed on all CPU
>> + * types linux supports for 32 bit quantites
>
> You should say "data types" rather than "quantities" here.
Ok

>
>> + */
>> +static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
>>  {
>> -     return o->target_value;
>> +     if (c)
>> +             return c->target_value;
>> +     else
>> +             return 0;
>>  }
>>
>> -static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
>> +static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
>>  {
>> -     o->target_value = value;
>> +     c->target_value = value;
>>  }
>
> Well, I'm not sure that this function is necessary at all.  You might as well
> simply remove it as far as I'm concerned.
The idea is to provide an efficient and lockless way to get the
aggregated constraint class value. When the constraints a re changing
the new value is calculated and stored using pm_qos_get_value and
pm_qos_set_value. Then pm_qos_read_value is used to retrieve the
value. For example cpuidle calls pm_qos_request which uses
pm_qos_read_value to get the CPU/DMA minimum latency.

>
>> -static void update_target(struct pm_qos_object *o, struct plist_node *node,
>> -                       int del, int value)
>> +static void update_target(struct pm_qos_request *req,
>> +                       enum pm_qos_req_action action, int value)
>>  {
>>       unsigned long flags;
>> -     int prev_value, curr_value;
>> +     int prev_value, curr_value, new_value;
>> +     struct pm_qos_object *o = pm_qos_array[req->class];
>> +     struct pm_qos_constraints *c;
>> +
>> +     switch (req->class) {
>> +     case PM_QOS_DEV_LATENCY:
>> +             if (!req->dev) {
>> +                     WARN(1, KERN_ERR "PM QoS API called with NULL dev\n");
>> +                     return;
>> +             }
>> +             c = &req->dev->power.latency_constraints;
>> +             break;
>> +     case PM_QOS_CPU_DMA_LATENCY:
>> +     case PM_QOS_NETWORK_LATENCY:
>> +     case PM_QOS_NETWORK_THROUGHPUT:
>> +             c = o->constraints;
>> +             break;
>> +     case PM_QOS_RESERVED:
>> +     default:
>> +             WARN(1, KERN_ERR "PM QoS API called with wrong class %d, "
>> +                     "req 0x%p\n", req->class, req);
>> +             return;
>> +     }
>
> Do we _really_ need that switch()?
>
> What about introducing dev_pm_qos_add_request() and friends specifically
> for devices, such that they will take the target device object (dev) as
> their first argument?  Then, you could keep pm_qos_add_request() pretty
> much as is, right?
Yes but in that case I need to duplicate the API functions for devices
(add, update, remove). Those functions will call update_target.
I prefer the way this patch does it: simplify the API functions and
have only 1 place with the constraints management and notification
logic (in update_target).

>
>>
>>       spin_lock_irqsave(&pm_qos_lock, flags);
>> -     prev_value = pm_qos_get_value(o);
>> -     /* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */
>> -     if (value != PM_QOS_DEFAULT_VALUE) {
>> +
>> +     prev_value = pm_qos_get_value(c);
>> +     if (value == PM_QOS_DEFAULT_VALUE)
>> +             new_value = c->default_value;
>> +     else
>> +             new_value = value;
>
> What about writing that as
>
>        new_value = value != PM_QOS_DEFAULT_VALUE ? value : c->default_value;
This is shorter but more difficult to read ;8

>
>> +
>> +     switch (action) {
>> +     case PM_QOS_REMOVE_REQ:
>> +             plist_del(&req->node, &c->list);
>> +             break;
>> +     case PM_QOS_UPDATE_REQ:
>>               /*
>>                * to change the list, we atomically remove, reinit
>>                * with new value and add, then see if the extremal
>>                * changed
>>                */
>> -             plist_del(node, &o->requests);
>> -             plist_node_init(node, value);
>> -             plist_add(node, &o->requests);
>> -     } else if (del) {
>> -             plist_del(node, &o->requests);
>> -     } else {
>> -             plist_add(node, &o->requests);
>> +             plist_del(&req->node, &c->list);
>> +     case PM_QOS_ADD_REQ:
>> +             plist_node_init(&req->node, new_value);
>> +             plist_add(&req->node, &c->list);
>> +             break;
>> +     default:
>> +             /* no action */
>> +             ;
>>       }
>> -     curr_value = pm_qos_get_value(o);
>> -     pm_qos_set_value(o, curr_value);
>> +
>> +     curr_value = pm_qos_get_value(c);
>> +     pm_qos_set_value(c, curr_value);
>>       spin_unlock_irqrestore(&pm_qos_lock, flags);
>>
>>       if (prev_value != curr_value)
>>               blocking_notifier_call_chain(o->notifiers,
>
> That's why I'm thinking that it would be helpful to have a pointer
> to the notifier list from struct pm_qos_constraints .
I think having a per-device notifier list is complicating things. If a
subsystem needs te be notified it needs to register a notifier
callback for _every_ device in the system.
As a first implementation for OMAP the low-level PM code is using a
single notifier to retrieve all the devices latency constraints and
apply them to the pwer domains. I do not see how this could work with
a per-device notifier list.

>
> Besides, you can use "pm_qos_array[req->class]->notifiers" instead of
> "o->notifiers".
Ok

...
>> +static int find_pm_qos_object_by_minor(int minor)
>> +{
>> +     int pm_qos_class;
>> +
>> +     for (pm_qos_class = 0;
>> +             pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
>> +             if (minor ==
>> +                     pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
>> +                     return pm_qos_class;
>> +     }
>> +     return -1;
>> +}
>
> This function doesn't seem to be used anywhere, what's the purpose of it?
It is used by pm_qos_power_open in order to retrieve the class
associated with the MISC device.
BTW this patch is moving the code so that all the MISC related
functions are grouped together.

>
>> +
>>  static int pm_qos_power_open(struct inode *inode, struct file *filp)
>>  {
>>       struct pm_qos_parameters pm_qos_params;
...
>
> Thanks,
> Rafael

Thanks,
Jean


More information about the linux-pm mailing list