[PATCH v3 14/32] xfs: convert buftarg LRU to generic code

Greg Thelen gthelen at google.com
Mon Apr 15 05:38:32 UTC 2013


On Mon, Apr 08 2013, Glauber Costa wrote:

> From: Dave Chinner <dchinner at redhat.com>
>
> Convert the buftarg LRU to use the new generic LRU list and take
> advantage of the functionality it supplies to make the buffer cache
> shrinker node aware.
>
> Signed-off-by: Glauber Costa <glommer at parallels.com>
> Signed-off-by: Dave Chinner <dchinner at redhat.com>
>
> Conflicts with 3b19034d4f:
> 	fs/xfs/xfs_buf.c
> ---
>  fs/xfs/xfs_buf.c | 167 +++++++++++++++++++++++++------------------------------
>  fs/xfs/xfs_buf.h |   5 +-
>  2 files changed, 79 insertions(+), 93 deletions(-)
>
> diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
> index 8459b5d..4cc6632 100644
> --- a/fs/xfs/xfs_buf.c
> +++ b/fs/xfs/xfs_buf.c
> @@ -85,20 +85,14 @@ xfs_buf_vmap_len(
>   * The LRU takes a new reference to the buffer so that it will only be freed
>   * once the shrinker takes the buffer off the LRU.
>   */
> -STATIC void
> +static void
>  xfs_buf_lru_add(
>  	struct xfs_buf	*bp)
>  {
> -	struct xfs_buftarg *btp = bp->b_target;
> -
> -	spin_lock(&btp->bt_lru_lock);
> -	if (list_empty(&bp->b_lru)) {
> -		atomic_inc(&bp->b_hold);
> -		list_add_tail(&bp->b_lru, &btp->bt_lru);
> -		btp->bt_lru_nr++;
> +	if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
>  		bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
> +		atomic_inc(&bp->b_hold);
>  	}
> -	spin_unlock(&btp->bt_lru_lock);
>  }
>  
>  /*
> @@ -107,24 +101,13 @@ xfs_buf_lru_add(
>   * The unlocked check is safe here because it only occurs when there are not
>   * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
>   * to optimise the shrinker removing the buffer from the LRU and calling
> - * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
> - * bt_lru_lock.
> + * xfs_buf_free().
>   */
> -STATIC void
> +static void
>  xfs_buf_lru_del(
>  	struct xfs_buf	*bp)
>  {
> -	struct xfs_buftarg *btp = bp->b_target;
> -
> -	if (list_empty(&bp->b_lru))
> -		return;
> -
> -	spin_lock(&btp->bt_lru_lock);
> -	if (!list_empty(&bp->b_lru)) {
> -		list_del_init(&bp->b_lru);
> -		btp->bt_lru_nr--;
> -	}
> -	spin_unlock(&btp->bt_lru_lock);
> +	list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
>  }
>  
>  /*
> @@ -151,18 +134,10 @@ xfs_buf_stale(
>  	bp->b_flags &= ~_XBF_DELWRI_Q;
>  
>  	atomic_set(&(bp)->b_lru_ref, 0);
> -	if (!list_empty(&bp->b_lru)) {
> -		struct xfs_buftarg *btp = bp->b_target;
> -
> -		spin_lock(&btp->bt_lru_lock);
> -		if (!list_empty(&bp->b_lru) &&
> -		    !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
> -			list_del_init(&bp->b_lru);
> -			btp->bt_lru_nr--;
> -			atomic_dec(&bp->b_hold);
> -		}
> -		spin_unlock(&btp->bt_lru_lock);
> -	}
> +	if (!(bp->b_lru_flags & _XBF_LRU_DISPOSE) &&
> +	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
> +		atomic_dec(&bp->b_hold);
> +
>  	ASSERT(atomic_read(&bp->b_hold) >= 1);
>  }
>  
> @@ -1498,83 +1473,95 @@ xfs_buf_iomove(
>   * returned. These buffers will have an elevated hold count, so wait on those
>   * while freeing all the buffers only held by the LRU.
>   */
> -void
> -xfs_wait_buftarg(
> -	struct xfs_buftarg	*btp)
> +static int

static enum lru_status

> +xfs_buftarg_wait_rele(
> +	struct list_head	*item,
> +	spinlock_t		*lru_lock,
> +	void			*arg)
> +
>  {
> -	struct xfs_buf		*bp;
> +	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
>  
> -restart:
> -	spin_lock(&btp->bt_lru_lock);
> -	while (!list_empty(&btp->bt_lru)) {
> -		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
> -		if (atomic_read(&bp->b_hold) > 1) {
> -			trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
> -			list_move_tail(&bp->b_lru, &btp->bt_lru);
> -			spin_unlock(&btp->bt_lru_lock);
> -			delay(100);
> -			goto restart;
> -		}
> +	if (atomic_read(&bp->b_hold) > 1) {
> +		/* need to wait */
> +		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
> +		spin_unlock(lru_lock);
> +		delay(100);
> +	} else {
>  		/*
>  		 * clear the LRU reference count so the buffer doesn't get
>  		 * ignored in xfs_buf_rele().
>  		 */
>  		atomic_set(&bp->b_lru_ref, 0);
> -		spin_unlock(&btp->bt_lru_lock);
> +		spin_unlock(lru_lock);
>  		xfs_buf_rele(bp);
> -		spin_lock(&btp->bt_lru_lock);
>  	}
> -	spin_unlock(&btp->bt_lru_lock);
> +	return 3;

	return LRU_RETRY;

>  }
>  
> -int
> -xfs_buftarg_shrink(
> +void
> +xfs_wait_buftarg(
> +	struct xfs_buftarg	*btp)
> +{
> +	while (list_lru_count(&btp->bt_lru))
> +		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
> +			      NULL, LONG_MAX);
> +}
> +
> +static int

static enum lru_status

> +xfs_buftarg_isolate(
> +	struct list_head	*item,
> +	spinlock_t		*lru_lock,
> +	void			*arg)
> +{
> +	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
> +	struct list_head	*dispose = arg;
> +
> +	/*
> +	 * Decrement the b_lru_ref count unless the value is already
> +	 * zero. If the value is already zero, we need to reclaim the
> +	 * buffer, otherwise it gets another trip through the LRU.
> +	 */
> +	if (!atomic_add_unless(&bp->b_lru_ref, -1, 0))
> +		return 1;

		return LRU_ROTATE;

> +
> +	bp->b_lru_flags |= _XBF_LRU_DISPOSE;
> +	list_move(item, dispose);
> +	return 0;

	return LRU_REMOVED;

> +}
> +
> +static long
> +xfs_buftarg_shrink_scan(
>  	struct shrinker		*shrink,
>  	struct shrink_control	*sc)
>  {
>  	struct xfs_buftarg	*btp = container_of(shrink,
>  					struct xfs_buftarg, bt_shrinker);
> -	struct xfs_buf		*bp;
> -	int nr_to_scan = sc->nr_to_scan;
>  	LIST_HEAD(dispose);
> +	long			freed;
>  
> -	if (!nr_to_scan)
> -		return btp->bt_lru_nr;
> -
> -	spin_lock(&btp->bt_lru_lock);
> -	while (!list_empty(&btp->bt_lru)) {
> -		if (nr_to_scan-- <= 0)
> -			break;
> -
> -		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
> -
> -		/*
> -		 * Decrement the b_lru_ref count unless the value is already
> -		 * zero. If the value is already zero, we need to reclaim the
> -		 * buffer, otherwise it gets another trip through the LRU.
> -		 */
> -		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
> -			list_move_tail(&bp->b_lru, &btp->bt_lru);
> -			continue;
> -		}
> -
> -		/*
> -		 * remove the buffer from the LRU now to avoid needing another
> -		 * lock round trip inside xfs_buf_rele().
> -		 */
> -		list_move(&bp->b_lru, &dispose);
> -		btp->bt_lru_nr--;
> -		bp->b_lru_flags |= _XBF_LRU_DISPOSE;
> -	}
> -	spin_unlock(&btp->bt_lru_lock);
> +	freed = list_lru_walk_nodemask(&btp->bt_lru, xfs_buftarg_isolate,
> +				       &dispose, sc->nr_to_scan,
> +				       &sc->nodes_to_scan);
>  
>  	while (!list_empty(&dispose)) {
> +		struct xfs_buf *bp;
>  		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
>  		list_del_init(&bp->b_lru);
>  		xfs_buf_rele(bp);
>  	}
>  
> -	return btp->bt_lru_nr;
> +	return freed;
> +}
> +
> +static long
> +xfs_buftarg_shrink_count(
> +	struct shrinker		*shrink,
> +	struct shrink_control	*sc)
> +{
> +	struct xfs_buftarg	*btp = container_of(shrink,
> +					struct xfs_buftarg, bt_shrinker);
> +	return list_lru_count_nodemask(&btp->bt_lru, &sc->nodes_to_scan);
>  }
>  
>  void
> @@ -1656,11 +1643,11 @@ xfs_alloc_buftarg(
>  	if (!btp->bt_bdi)
>  		goto error;
>  
> -	INIT_LIST_HEAD(&btp->bt_lru);
> -	spin_lock_init(&btp->bt_lru_lock);
> +	list_lru_init(&btp->bt_lru);
>  	if (xfs_setsize_buftarg_early(btp, bdev))
>  		goto error;
> -	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
> +	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
> +	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
>  	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
>  	register_shrinker(&btp->bt_shrinker);
>  	return btp;
> diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
> index 433a12e..5ec7d35 100644
> --- a/fs/xfs/xfs_buf.h
> +++ b/fs/xfs/xfs_buf.h
> @@ -25,6 +25,7 @@
>  #include <linux/fs.h>
>  #include <linux/buffer_head.h>
>  #include <linux/uio.h>
> +#include <linux/list_lru.h>
>  
>  /*
>   *	Base types
> @@ -92,9 +93,7 @@ typedef struct xfs_buftarg {
>  
>  	/* LRU control structures */
>  	struct shrinker		bt_shrinker;
> -	struct list_head	bt_lru;
> -	spinlock_t		bt_lru_lock;
> -	unsigned int		bt_lru_nr;
> +	struct list_lru		bt_lru;
>  } xfs_buftarg_t;
>  
>  struct xfs_buf;


More information about the Containers mailing list