Loading Documentation/accounting/getdelays.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -516,6 +516,7 @@ int main(int argc, char *argv[]) default: default: fprintf(stderr, "Unknown nla_type %d\n", fprintf(stderr, "Unknown nla_type %d\n", na->nla_type); na->nla_type); case TASKSTATS_TYPE_NULL: break; break; } } na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); Loading Documentation/filesystems/Locking +100 −112 Original line number Original line Diff line number Diff line Loading @@ -18,7 +18,6 @@ prototypes: char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); locking rules: locking rules: none have BKL dcache_lock rename_lock ->d_lock may block dcache_lock rename_lock ->d_lock may block d_revalidate: no no no yes d_revalidate: no no no yes d_hash no no no yes d_hash no no no yes Loading @@ -42,18 +41,23 @@ ata *); int (*rename) (struct inode *, struct dentry *, int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *); struct inode *, struct dentry *); int (*readlink) (struct dentry *, char __user *,int); int (*readlink) (struct dentry *, char __user *,int); int (*follow_link) (struct dentry *, struct nameidata *); void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int, struct nameidata *); int (*permission) (struct inode *, int, struct nameidata *); int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); int (*removexattr) (struct dentry *, const char *); void (*truncate_range)(struct inode *, loff_t, loff_t); long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); locking rules: locking rules: all may block, none have BKL all may block i_mutex(inode) i_mutex(inode) lookup: yes lookup: yes create: yes create: yes Loading @@ -66,19 +70,24 @@ rmdir: yes (both) (see below) rename: yes (all) (see below) rename: yes (all) (see below) readlink: no readlink: no follow_link: no follow_link: no put_link: no truncate: yes (see below) truncate: yes (see below) setattr: yes setattr: yes permission: no permission: no check_acl: no getattr: no getattr: no setxattr: yes setxattr: yes getxattr: no getxattr: no listxattr: no listxattr: no removexattr: yes removexattr: yes truncate_range: yes fallocate: no fiemap: no Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on victim. victim. cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. ->truncate() is never called directly - it's a callback, not a ->truncate() is never called directly - it's a callback, not a method. It's called by vmtruncate() - library function normally used by method. It's called by vmtruncate() - deprecated library function used by ->setattr(). Locking information above applies to that call (i.e. is ->setattr(). Locking information above applies to that call (i.e. is inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been passed). passed). Loading @@ -91,7 +100,7 @@ prototypes: struct inode *(*alloc_inode)(struct super_block *sb); struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); int (*write_inode) (struct inode *, struct writeback_control *wbc); int (*drop_inode) (struct inode *); int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); void (*put_super) (struct super_block *); Loading @@ -105,10 +114,10 @@ prototypes: int (*show_options)(struct seq_file *, struct vfsmount *); int (*show_options)(struct seq_file *, struct vfsmount *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); locking rules: locking rules: All may block [not true, see below] All may block [not true, see below] None have BKL s_umount s_umount alloc_inode: alloc_inode: destroy_inode: destroy_inode: Loading @@ -127,6 +136,7 @@ umount_begin: no show_options: no (namespace_sem) show_options: no (namespace_sem) quota_read: no (see below) quota_read: no (see below) quota_write: no (see below) quota_write: no (see below) bdev_try_to_free_page: no (see below) ->statfs() has s_umount (shared) when called by ustat(2) (native or ->statfs() has s_umount (shared) when called by ustat(2) (native or compat), but that's an accident of bad API; s_umount is used to pin compat), but that's an accident of bad API; s_umount is used to pin Loading @@ -139,19 +149,25 @@ be the only ones operating on the quota file by the quota code (via dqio_sem) (unless an admin really wants to screw up something and dqio_sem) (unless an admin really wants to screw up something and writes to quota files with quotas on). For other details about locking writes to quota files with quotas on). For other details about locking see also dquot_operations section. see also dquot_operations section. ->bdev_try_to_free_page is called from the ->releasepage handler of the block device inode. See there for more details. --------------------------- file_system_type --------------------------- --------------------------- file_system_type --------------------------- prototypes: prototypes: int (*get_sb) (struct file_system_type *, int, int (*get_sb) (struct file_system_type *, int, const char *, void *, struct vfsmount *); const char *, void *, struct vfsmount *); struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); void (*kill_sb) (struct super_block *); locking rules: locking rules: may block BKL may block get_sb yes no get_sb yes kill_sb yes no mount yes kill_sb yes ->get_sb() returns error or 0 with locked superblock attached to the vfsmount ->get_sb() returns error or 0 with locked superblock attached to the vfsmount (exclusive on ->s_umount). (exclusive on ->s_umount). ->mount() returns ERR_PTR or the root dentry. ->kill_sb() takes a write-locked superblock, does all shutdown work on it, ->kill_sb() takes a write-locked superblock, does all shutdown work on it, unlocks and drops the reference. unlocks and drops the reference. Loading @@ -176,27 +192,35 @@ prototypes: void (*freepage)(struct page *); void (*freepage)(struct page *); int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); loff_t offset, unsigned long nr_segs); int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *); int (*launder_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); int (*error_remove_page)(struct address_space *, struct page *); locking rules: locking rules: All except set_page_dirty and freepage may block All except set_page_dirty and freepage may block BKL PageLocked(page) i_mutex PageLocked(page) i_mutex writepage: no yes, unlocks (see below) writepage: yes, unlocks (see below) readpage: no yes, unlocks readpage: yes, unlocks sync_page: no maybe sync_page: maybe writepages: no writepages: set_page_dirty no no set_page_dirty no readpages: no readpages: write_begin: no locks the page yes write_begin: locks the page yes write_end: no yes, unlocks yes write_end: yes, unlocks yes perform_write: no n/a yes bmap: bmap: no invalidatepage: yes invalidatepage: no yes releasepage: yes releasepage: no yes freepage: yes freepage: no yes direct_IO: direct_IO: no get_xip_mem: maybe launder_page: no yes migratepage: yes (both) launder_page: yes is_partially_uptodate: yes error_remove_page: yes ->write_begin(), ->write_end(), ->sync_page() and ->readpage() ->write_begin(), ->write_end(), ->sync_page() and ->readpage() may be called from the request handler (/dev/loop). may be called from the request handler (/dev/loop). Loading Loading @@ -276,9 +300,8 @@ under spinlock (it cannot block) and is sometimes called with the page not locked. not locked. ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some filesystems and by the swapper. The latter will eventually go away. All filesystems and by the swapper. The latter will eventually go away. Please, instances do not actually need the BKL. Please, keep it that way and don't keep it that way and don't breed new callers. breed new callers. ->invalidatepage() is called when the filesystem must attempt to drop ->invalidatepage() is called when the filesystem must attempt to drop some or all of the buffers from the page when it is being truncated. It some or all of the buffers from the page when it is being truncated. It Loading @@ -299,47 +322,37 @@ cleaned, or an error value if not. Note that in order to prevent the page getting mapped back in and redirtied, it needs to be kept locked getting mapped back in and redirtied, it needs to be kept locked across the entire operation. across the entire operation. Note: currently almost all instances of address_space methods are using BKL for internal serialization and that's one of the worst sources of contention. Normally they are calling library functions (in fs/buffer.c) and pass foo_get_block() as a callback (on local block-based filesystems, indeed). BKL is not needed for library stuff and is usually taken by foo_get_block(). It's an overkill, since block bitmaps can be protected by internal fs locking and real critical areas are much smaller than the areas filesystems protect now. ----------------------- file_lock_operations ------------------------------ ----------------------- file_lock_operations ------------------------------ prototypes: prototypes: void (*fl_insert)(struct file_lock *); /* lock insertion callback */ void (*fl_remove)(struct file_lock *); /* lock removal callback */ void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); void (*fl_release_private)(struct file_lock *); locking rules: locking rules: BKL may block file_lock_lock may block fl_insert: yes no fl_remove: yes no fl_copy_lock: yes no fl_copy_lock: yes no fl_release_private: yes yes fl_release_private: maybe no ----------------------- lock_manager_operations --------------------------- ----------------------- lock_manager_operations --------------------------- prototypes: prototypes: int (*fl_compare_owner)(struct file_lock *, struct file_lock *); int (*fl_compare_owner)(struct file_lock *, struct file_lock *); void (*fl_notify)(struct file_lock *); /* unblock callback */ void (*fl_notify)(struct file_lock *); /* unblock callback */ int (*fl_grant)(struct file_lock *, struct file_lock *, int); void (*fl_release_private)(struct file_lock *); void (*fl_release_private)(struct file_lock *); void (*fl_break)(struct file_lock *); /* break_lease callback */ void (*fl_break)(struct file_lock *); /* break_lease callback */ int (*fl_mylease)(struct file_lock *, struct file_lock *); int (*fl_change)(struct file_lock **, int); locking rules: locking rules: BKL may block file_lock_lock may block fl_compare_owner: yes no fl_compare_owner: yes no fl_notify: yes no fl_notify: yes no fl_release_private: yes yes fl_grant: no no fl_release_private: maybe no fl_break: yes no fl_break: yes no fl_mylease: yes no fl_change yes no Currently only NFSD and NLM provide instances of this class. None of the them block. If you have out-of-tree instances - please, show up. Locking in that area will change. --------------------------- buffer_head ----------------------------------- --------------------------- buffer_head ----------------------------------- prototypes: prototypes: void (*b_end_io)(struct buffer_head *bh, int uptodate); void (*b_end_io)(struct buffer_head *bh, int uptodate); Loading @@ -364,17 +377,17 @@ prototypes: void (*swap_slot_free_notify) (struct block_device *, unsigned long); void (*swap_slot_free_notify) (struct block_device *, unsigned long); locking rules: locking rules: BKL bd_mutex bd_mutex open: no yes open: yes release: no yes release: yes ioctl: no no ioctl: no compat_ioctl: no no compat_ioctl: no direct_access: no no direct_access: no media_changed: no no media_changed: no unlock_native_capacity: no no unlock_native_capacity: no revalidate_disk: no no revalidate_disk: no getgeo: no no getgeo: no swap_slot_free_notify: no no (see below) swap_slot_free_notify: no (see below) media_changed, unlock_native_capacity and revalidate_disk are called only from media_changed, unlock_native_capacity and revalidate_disk are called only from check_disk_change(). check_disk_change(). Loading Loading @@ -413,34 +426,21 @@ prototypes: unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*check_flags)(int); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int (*setlease)(struct file *, long, struct file_lock **); }; }; locking rules: locking rules: All may block. All may block except for ->setlease. BKL No VFS locks held on entry except for ->fsync and ->setlease. llseek: no (see below) read: no ->fsync() has i_mutex on inode. aio_read: no write: no ->setlease has the file_list_lock held and must not sleep. aio_write: no readdir: no poll: no unlocked_ioctl: no compat_ioctl: no mmap: no open: no flush: no release: no fsync: no (see below) aio_fsync: no fasync: no lock: yes readv: no writev: no sendfile: no sendpage: no get_unmapped_area: no check_flags: no ->llseek() locking has moved from llseek to the individual llseek ->llseek() locking has moved from llseek to the individual llseek implementations. If your fs is not using generic_file_llseek, you implementations. If your fs is not using generic_file_llseek, you Loading @@ -450,17 +450,10 @@ mutex or just to use i_size_read() instead. Note: this does not protect the file->f_pos against concurrent modifications Note: this does not protect the file->f_pos against concurrent modifications since this is something the userspace has to take care about. since this is something the userspace has to take care about. Note: ext2_release() was *the* source of contention on fs-intensive ->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags. loads and dropping BKL on ->release() helps to get rid of that (we still Most instances call fasync_helper(), which does that maintenance, so it's grab BKL for cases when we close a file that had been opened r/w, but that not normally something one needs to worry about. Return values > 0 will be can and should be done using the internal locking with smaller critical areas). mapped to zero in the VFS layer. Current worst offender is ext2_get_block()... ->fasync() is called without BKL protection, and is responsible for maintaining the FASYNC bit in filp->f_flags. Most instances call fasync_helper(), which does that maintenance, so it's not normally something one needs to worry about. Return values > 0 will be mapped to zero in the VFS layer. ->readdir() and ->ioctl() on directories must be changed. Ideally we would ->readdir() and ->ioctl() on directories must be changed. Ideally we would move ->readdir() to inode_operations and use a separate method for directory move ->readdir() to inode_operations and use a separate method for directory Loading @@ -471,8 +464,6 @@ components. And there are other reasons why the current interface is a mess... ->read on directories probably must go away - we should just enforce -EISDIR ->read on directories probably must go away - we should just enforce -EISDIR in sys_read() and friends. in sys_read() and friends. ->fsync() has i_mutex on inode. --------------------------- dquot_operations ------------------------------- --------------------------- dquot_operations ------------------------------- prototypes: prototypes: int (*write_dquot) (struct dquot *); int (*write_dquot) (struct dquot *); Loading Loading @@ -507,12 +498,12 @@ prototypes: int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); locking rules: locking rules: BKL mmap_sem PageLocked(page) mmap_sem PageLocked(page) open: no yes open: yes close: no yes close: yes fault: no yes can return with page locked fault: yes can return with page locked page_mkwrite: no yes can return with page locked page_mkwrite: yes can return with page locked access: no yes access: yes ->fault() is called when a previously not present pte is about ->fault() is called when a previously not present pte is about to be faulted in. The filesystem must find and return the page associated to be faulted in. The filesystem must find and return the page associated Loading @@ -539,6 +530,3 @@ VM_IO | VM_PFNMAP VMAs. (if you break something or notice that it is broken and do not fix it yourself (if you break something or notice that it is broken and do not fix it yourself - at least put it here) - at least put it here) ipc/shm.c::shm_delete() - may need BKL. ->read() and ->write() in many drivers are (probably) missing BKL. Documentation/kernel-parameters.txt +1 −6 Original line number Original line Diff line number Diff line Loading @@ -1759,7 +1759,7 @@ and is between 256 and 4096 characters. It is defined in the file nousb [USB] Disable the USB subsystem nousb [USB] Disable the USB subsystem nowatchdog [KNL] Disable the lockup detector. nowatchdog [KNL] Disable the lockup detector (NMI watchdog). nowb [ARM] nowb [ARM] Loading Loading @@ -2175,11 +2175,6 @@ and is between 256 and 4096 characters. It is defined in the file reset_devices [KNL] Force drivers to reset the underlying device reset_devices [KNL] Force drivers to reset the underlying device during initialization. during initialization. resource_alloc_from_bottom Allocate new resources from the beginning of available space, not the end. If you need to use this, please report a bug. resume= [SWSUSP] resume= [SWSUSP] Specify the partition device for software suspend Specify the partition device for software suspend Loading Documentation/power/runtime_pm.txt +2 −2 Original line number Original line Diff line number Diff line Loading @@ -379,8 +379,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: zero) zero) bool pm_runtime_suspended(struct device *dev); bool pm_runtime_suspended(struct device *dev); - return true if the device's runtime PM status is 'suspended', or false - return true if the device's runtime PM status is 'suspended' and its otherwise 'power.disable_depth' field is equal to zero, or false otherwise void pm_runtime_allow(struct device *dev); void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage - set the power.runtime_auto flag for the device and decrease its usage Loading Documentation/scsi/scsi_mid_low_api.txt +31 −28 Original line number Original line Diff line number Diff line Loading @@ -1044,9 +1044,9 @@ Details: /** /** * queuecommand - queue scsi command, invoke 'done' on completion * queuecommand - queue scsi command, invoke scp->scsi_done on completion * @shost: pointer to the scsi host object * @scp: pointer to scsi command object * @scp: pointer to scsi command object * @done: function pointer to be invoked on completion * * * Returns 0 on success. * Returns 0 on success. * * Loading Loading @@ -1074,42 +1074,45 @@ Details: * * * Other types of errors that are detected immediately may be * Other types of errors that are detected immediately may be * flagged by setting scp->result to an appropriate value, * flagged by setting scp->result to an appropriate value, * invoking the 'done' callback, and then returning 0 from this * invoking the scp->scsi_done callback, and then returning 0 * function. If the command is not performed immediately (and the * from this function. If the command is not performed * LLD is starting (or will start) the given command) then this * immediately (and the LLD is starting (or will start) the given * function should place 0 in scp->result and return 0. * command) then this function should place 0 in scp->result and * return 0. * * * Command ownership. If the driver returns zero, it owns the * Command ownership. If the driver returns zero, it owns the * command and must take responsibility for ensuring the 'done' * command and must take responsibility for ensuring the * callback is executed. Note: the driver may call done before * scp->scsi_done callback is executed. Note: the driver may * returning zero, but after it has called done, it may not * call scp->scsi_done before returning zero, but after it has * return any value other than zero. If the driver makes a * called scp->scsi_done, it may not return any value other than * non-zero return, it must not execute the command's done * zero. If the driver makes a non-zero return, it must not * callback at any time. * execute the command's scsi_done callback at any time. * * * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave") * Locks: up to and including 2.6.36, struct Scsi_Host::host_lock * and is expected to be held on return. * held on entry (with "irqsave") and is expected to be * held on return. From 2.6.37 onwards, queuecommand is * called without any locks held. * * * Calling context: in interrupt (soft irq) or process context * Calling context: in interrupt (soft irq) or process context * * * Notes: This function should be relatively fast. Normally it will * Notes: This function should be relatively fast. Normally it * not wait for IO to complete. Hence the 'done' callback is invoked * will not wait for IO to complete. Hence the scp->scsi_done * (often directly from an interrupt service routine) some time after * callback is invoked (often directly from an interrupt service * this function has returned. In some cases (e.g. pseudo adapter * routine) some time after this function has returned. In some * drivers that manufacture the response to a SCSI INQUIRY) * cases (e.g. pseudo adapter drivers that manufacture the * the 'done' callback may be invoked before this function returns. * response to a SCSI INQUIRY) the scp->scsi_done callback may be * If the 'done' callback is not invoked within a certain period * invoked before this function returns. If the scp->scsi_done * the SCSI mid level will commence error processing. * callback is not invoked within a certain period the SCSI mid * If a status of CHECK CONDITION is placed in "result" when the * level will commence error processing. If a status of CHECK * 'done' callback is invoked, then the LLD driver should * CONDITION is placed in "result" when the scp->scsi_done * perform autosense and fill in the struct scsi_cmnd::sense_buffer * callback is invoked, then the LLD driver should perform * autosense and fill in the struct scsi_cmnd::sense_buffer * array. The scsi_cmnd::sense_buffer array is zeroed prior to * array. The scsi_cmnd::sense_buffer array is zeroed prior to * the mid level queuing a command to an LLD. * the mid level queuing a command to an LLD. * * * Defined in: LLD * Defined in: LLD **/ **/ int queuecommand(struct scsi_cmnd * scp, int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp) void (*done)(struct scsi_cmnd *)) /** /** Loading Loading
Documentation/accounting/getdelays.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -516,6 +516,7 @@ int main(int argc, char *argv[]) default: default: fprintf(stderr, "Unknown nla_type %d\n", fprintf(stderr, "Unknown nla_type %d\n", na->nla_type); na->nla_type); case TASKSTATS_TYPE_NULL: break; break; } } na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); na = (struct nlattr *) (GENLMSG_DATA(&msg) + len); Loading
Documentation/filesystems/Locking +100 −112 Original line number Original line Diff line number Diff line Loading @@ -18,7 +18,6 @@ prototypes: char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); locking rules: locking rules: none have BKL dcache_lock rename_lock ->d_lock may block dcache_lock rename_lock ->d_lock may block d_revalidate: no no no yes d_revalidate: no no no yes d_hash no no no yes d_hash no no no yes Loading @@ -42,18 +41,23 @@ ata *); int (*rename) (struct inode *, struct dentry *, int (*rename) (struct inode *, struct dentry *, struct inode *, struct dentry *); struct inode *, struct dentry *); int (*readlink) (struct dentry *, char __user *,int); int (*readlink) (struct dentry *, char __user *,int); int (*follow_link) (struct dentry *, struct nameidata *); void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int, struct nameidata *); int (*permission) (struct inode *, int, struct nameidata *); int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*removexattr) (struct dentry *, const char *); int (*removexattr) (struct dentry *, const char *); void (*truncate_range)(struct inode *, loff_t, loff_t); long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); locking rules: locking rules: all may block, none have BKL all may block i_mutex(inode) i_mutex(inode) lookup: yes lookup: yes create: yes create: yes Loading @@ -66,19 +70,24 @@ rmdir: yes (both) (see below) rename: yes (all) (see below) rename: yes (all) (see below) readlink: no readlink: no follow_link: no follow_link: no put_link: no truncate: yes (see below) truncate: yes (see below) setattr: yes setattr: yes permission: no permission: no check_acl: no getattr: no getattr: no setxattr: yes setxattr: yes getxattr: no getxattr: no listxattr: no listxattr: no removexattr: yes removexattr: yes truncate_range: yes fallocate: no fiemap: no Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on victim. victim. cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. ->truncate() is never called directly - it's a callback, not a ->truncate() is never called directly - it's a callback, not a method. It's called by vmtruncate() - library function normally used by method. It's called by vmtruncate() - deprecated library function used by ->setattr(). Locking information above applies to that call (i.e. is ->setattr(). Locking information above applies to that call (i.e. is inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been passed). passed). Loading @@ -91,7 +100,7 @@ prototypes: struct inode *(*alloc_inode)(struct super_block *sb); struct inode *(*alloc_inode)(struct super_block *sb); void (*destroy_inode)(struct inode *); void (*destroy_inode)(struct inode *); void (*dirty_inode) (struct inode *); void (*dirty_inode) (struct inode *); int (*write_inode) (struct inode *, int); int (*write_inode) (struct inode *, struct writeback_control *wbc); int (*drop_inode) (struct inode *); int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); void (*put_super) (struct super_block *); Loading @@ -105,10 +114,10 @@ prototypes: int (*show_options)(struct seq_file *, struct vfsmount *); int (*show_options)(struct seq_file *, struct vfsmount *); ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); locking rules: locking rules: All may block [not true, see below] All may block [not true, see below] None have BKL s_umount s_umount alloc_inode: alloc_inode: destroy_inode: destroy_inode: Loading @@ -127,6 +136,7 @@ umount_begin: no show_options: no (namespace_sem) show_options: no (namespace_sem) quota_read: no (see below) quota_read: no (see below) quota_write: no (see below) quota_write: no (see below) bdev_try_to_free_page: no (see below) ->statfs() has s_umount (shared) when called by ustat(2) (native or ->statfs() has s_umount (shared) when called by ustat(2) (native or compat), but that's an accident of bad API; s_umount is used to pin compat), but that's an accident of bad API; s_umount is used to pin Loading @@ -139,19 +149,25 @@ be the only ones operating on the quota file by the quota code (via dqio_sem) (unless an admin really wants to screw up something and dqio_sem) (unless an admin really wants to screw up something and writes to quota files with quotas on). For other details about locking writes to quota files with quotas on). For other details about locking see also dquot_operations section. see also dquot_operations section. ->bdev_try_to_free_page is called from the ->releasepage handler of the block device inode. See there for more details. --------------------------- file_system_type --------------------------- --------------------------- file_system_type --------------------------- prototypes: prototypes: int (*get_sb) (struct file_system_type *, int, int (*get_sb) (struct file_system_type *, int, const char *, void *, struct vfsmount *); const char *, void *, struct vfsmount *); struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); void (*kill_sb) (struct super_block *); locking rules: locking rules: may block BKL may block get_sb yes no get_sb yes kill_sb yes no mount yes kill_sb yes ->get_sb() returns error or 0 with locked superblock attached to the vfsmount ->get_sb() returns error or 0 with locked superblock attached to the vfsmount (exclusive on ->s_umount). (exclusive on ->s_umount). ->mount() returns ERR_PTR or the root dentry. ->kill_sb() takes a write-locked superblock, does all shutdown work on it, ->kill_sb() takes a write-locked superblock, does all shutdown work on it, unlocks and drops the reference. unlocks and drops the reference. Loading @@ -176,27 +192,35 @@ prototypes: void (*freepage)(struct page *); void (*freepage)(struct page *); int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); loff_t offset, unsigned long nr_segs); int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *); int (*launder_page)(struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); int (*error_remove_page)(struct address_space *, struct page *); locking rules: locking rules: All except set_page_dirty and freepage may block All except set_page_dirty and freepage may block BKL PageLocked(page) i_mutex PageLocked(page) i_mutex writepage: no yes, unlocks (see below) writepage: yes, unlocks (see below) readpage: no yes, unlocks readpage: yes, unlocks sync_page: no maybe sync_page: maybe writepages: no writepages: set_page_dirty no no set_page_dirty no readpages: no readpages: write_begin: no locks the page yes write_begin: locks the page yes write_end: no yes, unlocks yes write_end: yes, unlocks yes perform_write: no n/a yes bmap: bmap: no invalidatepage: yes invalidatepage: no yes releasepage: yes releasepage: no yes freepage: yes freepage: no yes direct_IO: direct_IO: no get_xip_mem: maybe launder_page: no yes migratepage: yes (both) launder_page: yes is_partially_uptodate: yes error_remove_page: yes ->write_begin(), ->write_end(), ->sync_page() and ->readpage() ->write_begin(), ->write_end(), ->sync_page() and ->readpage() may be called from the request handler (/dev/loop). may be called from the request handler (/dev/loop). Loading Loading @@ -276,9 +300,8 @@ under spinlock (it cannot block) and is sometimes called with the page not locked. not locked. ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some filesystems and by the swapper. The latter will eventually go away. All filesystems and by the swapper. The latter will eventually go away. Please, instances do not actually need the BKL. Please, keep it that way and don't keep it that way and don't breed new callers. breed new callers. ->invalidatepage() is called when the filesystem must attempt to drop ->invalidatepage() is called when the filesystem must attempt to drop some or all of the buffers from the page when it is being truncated. It some or all of the buffers from the page when it is being truncated. It Loading @@ -299,47 +322,37 @@ cleaned, or an error value if not. Note that in order to prevent the page getting mapped back in and redirtied, it needs to be kept locked getting mapped back in and redirtied, it needs to be kept locked across the entire operation. across the entire operation. Note: currently almost all instances of address_space methods are using BKL for internal serialization and that's one of the worst sources of contention. Normally they are calling library functions (in fs/buffer.c) and pass foo_get_block() as a callback (on local block-based filesystems, indeed). BKL is not needed for library stuff and is usually taken by foo_get_block(). It's an overkill, since block bitmaps can be protected by internal fs locking and real critical areas are much smaller than the areas filesystems protect now. ----------------------- file_lock_operations ------------------------------ ----------------------- file_lock_operations ------------------------------ prototypes: prototypes: void (*fl_insert)(struct file_lock *); /* lock insertion callback */ void (*fl_remove)(struct file_lock *); /* lock removal callback */ void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); void (*fl_release_private)(struct file_lock *); locking rules: locking rules: BKL may block file_lock_lock may block fl_insert: yes no fl_remove: yes no fl_copy_lock: yes no fl_copy_lock: yes no fl_release_private: yes yes fl_release_private: maybe no ----------------------- lock_manager_operations --------------------------- ----------------------- lock_manager_operations --------------------------- prototypes: prototypes: int (*fl_compare_owner)(struct file_lock *, struct file_lock *); int (*fl_compare_owner)(struct file_lock *, struct file_lock *); void (*fl_notify)(struct file_lock *); /* unblock callback */ void (*fl_notify)(struct file_lock *); /* unblock callback */ int (*fl_grant)(struct file_lock *, struct file_lock *, int); void (*fl_release_private)(struct file_lock *); void (*fl_release_private)(struct file_lock *); void (*fl_break)(struct file_lock *); /* break_lease callback */ void (*fl_break)(struct file_lock *); /* break_lease callback */ int (*fl_mylease)(struct file_lock *, struct file_lock *); int (*fl_change)(struct file_lock **, int); locking rules: locking rules: BKL may block file_lock_lock may block fl_compare_owner: yes no fl_compare_owner: yes no fl_notify: yes no fl_notify: yes no fl_release_private: yes yes fl_grant: no no fl_release_private: maybe no fl_break: yes no fl_break: yes no fl_mylease: yes no fl_change yes no Currently only NFSD and NLM provide instances of this class. None of the them block. If you have out-of-tree instances - please, show up. Locking in that area will change. --------------------------- buffer_head ----------------------------------- --------------------------- buffer_head ----------------------------------- prototypes: prototypes: void (*b_end_io)(struct buffer_head *bh, int uptodate); void (*b_end_io)(struct buffer_head *bh, int uptodate); Loading @@ -364,17 +377,17 @@ prototypes: void (*swap_slot_free_notify) (struct block_device *, unsigned long); void (*swap_slot_free_notify) (struct block_device *, unsigned long); locking rules: locking rules: BKL bd_mutex bd_mutex open: no yes open: yes release: no yes release: yes ioctl: no no ioctl: no compat_ioctl: no no compat_ioctl: no direct_access: no no direct_access: no media_changed: no no media_changed: no unlock_native_capacity: no no unlock_native_capacity: no revalidate_disk: no no revalidate_disk: no getgeo: no no getgeo: no swap_slot_free_notify: no no (see below) swap_slot_free_notify: no (see below) media_changed, unlock_native_capacity and revalidate_disk are called only from media_changed, unlock_native_capacity and revalidate_disk are called only from check_disk_change(). check_disk_change(). Loading Loading @@ -413,34 +426,21 @@ prototypes: unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*check_flags)(int); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); int (*setlease)(struct file *, long, struct file_lock **); }; }; locking rules: locking rules: All may block. All may block except for ->setlease. BKL No VFS locks held on entry except for ->fsync and ->setlease. llseek: no (see below) read: no ->fsync() has i_mutex on inode. aio_read: no write: no ->setlease has the file_list_lock held and must not sleep. aio_write: no readdir: no poll: no unlocked_ioctl: no compat_ioctl: no mmap: no open: no flush: no release: no fsync: no (see below) aio_fsync: no fasync: no lock: yes readv: no writev: no sendfile: no sendpage: no get_unmapped_area: no check_flags: no ->llseek() locking has moved from llseek to the individual llseek ->llseek() locking has moved from llseek to the individual llseek implementations. If your fs is not using generic_file_llseek, you implementations. If your fs is not using generic_file_llseek, you Loading @@ -450,17 +450,10 @@ mutex or just to use i_size_read() instead. Note: this does not protect the file->f_pos against concurrent modifications Note: this does not protect the file->f_pos against concurrent modifications since this is something the userspace has to take care about. since this is something the userspace has to take care about. Note: ext2_release() was *the* source of contention on fs-intensive ->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags. loads and dropping BKL on ->release() helps to get rid of that (we still Most instances call fasync_helper(), which does that maintenance, so it's grab BKL for cases when we close a file that had been opened r/w, but that not normally something one needs to worry about. Return values > 0 will be can and should be done using the internal locking with smaller critical areas). mapped to zero in the VFS layer. Current worst offender is ext2_get_block()... ->fasync() is called without BKL protection, and is responsible for maintaining the FASYNC bit in filp->f_flags. Most instances call fasync_helper(), which does that maintenance, so it's not normally something one needs to worry about. Return values > 0 will be mapped to zero in the VFS layer. ->readdir() and ->ioctl() on directories must be changed. Ideally we would ->readdir() and ->ioctl() on directories must be changed. Ideally we would move ->readdir() to inode_operations and use a separate method for directory move ->readdir() to inode_operations and use a separate method for directory Loading @@ -471,8 +464,6 @@ components. And there are other reasons why the current interface is a mess... ->read on directories probably must go away - we should just enforce -EISDIR ->read on directories probably must go away - we should just enforce -EISDIR in sys_read() and friends. in sys_read() and friends. ->fsync() has i_mutex on inode. --------------------------- dquot_operations ------------------------------- --------------------------- dquot_operations ------------------------------- prototypes: prototypes: int (*write_dquot) (struct dquot *); int (*write_dquot) (struct dquot *); Loading Loading @@ -507,12 +498,12 @@ prototypes: int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); locking rules: locking rules: BKL mmap_sem PageLocked(page) mmap_sem PageLocked(page) open: no yes open: yes close: no yes close: yes fault: no yes can return with page locked fault: yes can return with page locked page_mkwrite: no yes can return with page locked page_mkwrite: yes can return with page locked access: no yes access: yes ->fault() is called when a previously not present pte is about ->fault() is called when a previously not present pte is about to be faulted in. The filesystem must find and return the page associated to be faulted in. The filesystem must find and return the page associated Loading @@ -539,6 +530,3 @@ VM_IO | VM_PFNMAP VMAs. (if you break something or notice that it is broken and do not fix it yourself (if you break something or notice that it is broken and do not fix it yourself - at least put it here) - at least put it here) ipc/shm.c::shm_delete() - may need BKL. ->read() and ->write() in many drivers are (probably) missing BKL.
Documentation/kernel-parameters.txt +1 −6 Original line number Original line Diff line number Diff line Loading @@ -1759,7 +1759,7 @@ and is between 256 and 4096 characters. It is defined in the file nousb [USB] Disable the USB subsystem nousb [USB] Disable the USB subsystem nowatchdog [KNL] Disable the lockup detector. nowatchdog [KNL] Disable the lockup detector (NMI watchdog). nowb [ARM] nowb [ARM] Loading Loading @@ -2175,11 +2175,6 @@ and is between 256 and 4096 characters. It is defined in the file reset_devices [KNL] Force drivers to reset the underlying device reset_devices [KNL] Force drivers to reset the underlying device during initialization. during initialization. resource_alloc_from_bottom Allocate new resources from the beginning of available space, not the end. If you need to use this, please report a bug. resume= [SWSUSP] resume= [SWSUSP] Specify the partition device for software suspend Specify the partition device for software suspend Loading
Documentation/power/runtime_pm.txt +2 −2 Original line number Original line Diff line number Diff line Loading @@ -379,8 +379,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: zero) zero) bool pm_runtime_suspended(struct device *dev); bool pm_runtime_suspended(struct device *dev); - return true if the device's runtime PM status is 'suspended', or false - return true if the device's runtime PM status is 'suspended' and its otherwise 'power.disable_depth' field is equal to zero, or false otherwise void pm_runtime_allow(struct device *dev); void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage - set the power.runtime_auto flag for the device and decrease its usage Loading
Documentation/scsi/scsi_mid_low_api.txt +31 −28 Original line number Original line Diff line number Diff line Loading @@ -1044,9 +1044,9 @@ Details: /** /** * queuecommand - queue scsi command, invoke 'done' on completion * queuecommand - queue scsi command, invoke scp->scsi_done on completion * @shost: pointer to the scsi host object * @scp: pointer to scsi command object * @scp: pointer to scsi command object * @done: function pointer to be invoked on completion * * * Returns 0 on success. * Returns 0 on success. * * Loading Loading @@ -1074,42 +1074,45 @@ Details: * * * Other types of errors that are detected immediately may be * Other types of errors that are detected immediately may be * flagged by setting scp->result to an appropriate value, * flagged by setting scp->result to an appropriate value, * invoking the 'done' callback, and then returning 0 from this * invoking the scp->scsi_done callback, and then returning 0 * function. If the command is not performed immediately (and the * from this function. If the command is not performed * LLD is starting (or will start) the given command) then this * immediately (and the LLD is starting (or will start) the given * function should place 0 in scp->result and return 0. * command) then this function should place 0 in scp->result and * return 0. * * * Command ownership. If the driver returns zero, it owns the * Command ownership. If the driver returns zero, it owns the * command and must take responsibility for ensuring the 'done' * command and must take responsibility for ensuring the * callback is executed. Note: the driver may call done before * scp->scsi_done callback is executed. Note: the driver may * returning zero, but after it has called done, it may not * call scp->scsi_done before returning zero, but after it has * return any value other than zero. If the driver makes a * called scp->scsi_done, it may not return any value other than * non-zero return, it must not execute the command's done * zero. If the driver makes a non-zero return, it must not * callback at any time. * execute the command's scsi_done callback at any time. * * * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave") * Locks: up to and including 2.6.36, struct Scsi_Host::host_lock * and is expected to be held on return. * held on entry (with "irqsave") and is expected to be * held on return. From 2.6.37 onwards, queuecommand is * called without any locks held. * * * Calling context: in interrupt (soft irq) or process context * Calling context: in interrupt (soft irq) or process context * * * Notes: This function should be relatively fast. Normally it will * Notes: This function should be relatively fast. Normally it * not wait for IO to complete. Hence the 'done' callback is invoked * will not wait for IO to complete. Hence the scp->scsi_done * (often directly from an interrupt service routine) some time after * callback is invoked (often directly from an interrupt service * this function has returned. In some cases (e.g. pseudo adapter * routine) some time after this function has returned. In some * drivers that manufacture the response to a SCSI INQUIRY) * cases (e.g. pseudo adapter drivers that manufacture the * the 'done' callback may be invoked before this function returns. * response to a SCSI INQUIRY) the scp->scsi_done callback may be * If the 'done' callback is not invoked within a certain period * invoked before this function returns. If the scp->scsi_done * the SCSI mid level will commence error processing. * callback is not invoked within a certain period the SCSI mid * If a status of CHECK CONDITION is placed in "result" when the * level will commence error processing. If a status of CHECK * 'done' callback is invoked, then the LLD driver should * CONDITION is placed in "result" when the scp->scsi_done * perform autosense and fill in the struct scsi_cmnd::sense_buffer * callback is invoked, then the LLD driver should perform * autosense and fill in the struct scsi_cmnd::sense_buffer * array. The scsi_cmnd::sense_buffer array is zeroed prior to * array. The scsi_cmnd::sense_buffer array is zeroed prior to * the mid level queuing a command to an LLD. * the mid level queuing a command to an LLD. * * * Defined in: LLD * Defined in: LLD **/ **/ int queuecommand(struct scsi_cmnd * scp, int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp) void (*done)(struct scsi_cmnd *)) /** /** Loading