| File: | ufs/ffs/ffs_alloc.c |
| Warning: | line 378, column 7 Division by zero |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* $OpenBSD: ffs_alloc.c,v 1.114 2021/03/11 13:31:35 jsg Exp $ */ | |||
| 2 | /* $NetBSD: ffs_alloc.c,v 1.11 1996/05/11 18:27:09 mycroft Exp $ */ | |||
| 3 | ||||
| 4 | /* | |||
| 5 | * Copyright (c) 2002 Networks Associates Technology, Inc. | |||
| 6 | * All rights reserved. | |||
| 7 | * | |||
| 8 | * This software was developed for the FreeBSD Project by Marshall | |||
| 9 | * Kirk McKusick and Network Associates Laboratories, the Security | |||
| 10 | * Research Division of Network Associates, Inc. under DARPA/SPAWAR | |||
| 11 | * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS | |||
| 12 | * research program. | |||
| 13 | * | |||
| 14 | * Copyright (c) 1982, 1986, 1989, 1993 | |||
| 15 | * The Regents of the University of California. All rights reserved. | |||
| 16 | * | |||
| 17 | * Redistribution and use in source and binary forms, with or without | |||
| 18 | * modification, are permitted provided that the following conditions | |||
| 19 | * are met: | |||
| 20 | * 1. Redistributions of source code must retain the above copyright | |||
| 21 | * notice, this list of conditions and the following disclaimer. | |||
| 22 | * 2. Redistributions in binary form must reproduce the above copyright | |||
| 23 | * notice, this list of conditions and the following disclaimer in the | |||
| 24 | * documentation and/or other materials provided with the distribution. | |||
| 25 | * 3. Neither the name of the University nor the names of its contributors | |||
| 26 | * may be used to endorse or promote products derived from this software | |||
| 27 | * without specific prior written permission. | |||
| 28 | * | |||
| 29 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |||
| 30 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| 31 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
| 32 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |||
| 33 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
| 34 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
| 35 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
| 36 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
| 37 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
| 38 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
| 39 | * SUCH DAMAGE. | |||
| 40 | * | |||
| 41 | * @(#)ffs_alloc.c 8.11 (Berkeley) 10/27/94 | |||
| 42 | */ | |||
| 43 | ||||
| 44 | #include <sys/param.h> | |||
| 45 | #include <sys/systm.h> | |||
| 46 | #include <sys/buf.h> | |||
| 47 | #include <sys/vnode.h> | |||
| 48 | #include <sys/mount.h> | |||
| 49 | #include <sys/syslog.h> | |||
| 50 | #include <sys/stdint.h> | |||
| 51 | #include <sys/time.h> | |||
| 52 | ||||
| 53 | #include <ufs/ufs/quota.h> | |||
| 54 | #include <ufs/ufs/inode.h> | |||
| 55 | #include <ufs/ufs/ufsmount.h> | |||
| 56 | #include <ufs/ufs/ufs_extern.h> | |||
| 57 | ||||
| 58 | #include <ufs/ffs/fs.h> | |||
| 59 | #include <ufs/ffs/ffs_extern.h> | |||
| 60 | ||||
| 61 | #define ffs_fserr(fs, uid, cp)do { log(3, "uid %u on %s: %s\n", (uid), (fs)->fs_fsmnt, ( cp)); } while (0) do { \ | |||
| 62 | log(LOG_ERR3, "uid %u on %s: %s\n", (uid), \ | |||
| 63 | (fs)->fs_fsmnt, (cp)); \ | |||
| 64 | } while (0) | |||
| 65 | ||||
| 66 | daddr_t ffs_alloccg(struct inode *, u_int, daddr_t, int); | |||
| 67 | struct buf * ffs_cgread(struct fs *, struct inode *, u_int); | |||
| 68 | daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t); | |||
| 69 | ufsino_t ffs_dirpref(struct inode *); | |||
| 70 | daddr_t ffs_fragextend(struct inode *, u_int, daddr_t, int, int); | |||
| 71 | daddr_t ffs_hashalloc(struct inode *, u_int, daddr_t, int, | |||
| 72 | daddr_t (*)(struct inode *, u_int, daddr_t, int)); | |||
| 73 | daddr_t ffs_nodealloccg(struct inode *, u_int, daddr_t, int); | |||
| 74 | daddr_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int); | |||
| 75 | ||||
| 76 | static const struct timeval fserr_interval = { 2, 0 }; | |||
| 77 | ||||
| 78 | ||||
| 79 | /* | |||
| 80 | * Allocate a block in the file system. | |||
| 81 | * | |||
| 82 | * The size of the requested block is given, which must be some | |||
| 83 | * multiple of fs_fsize and <= fs_bsize. | |||
| 84 | * A preference may be optionally specified. If a preference is given | |||
| 85 | * the following hierarchy is used to allocate a block: | |||
| 86 | * 1) allocate the requested block. | |||
| 87 | * 2) allocate a rotationally optimal block in the same cylinder. | |||
| 88 | * 3) allocate a block in the same cylinder group. | |||
| 89 | * 4) quadratically rehash into other cylinder groups, until an | |||
| 90 | * available block is located. | |||
| 91 | * If no block preference is given the following hierarchy is used | |||
| 92 | * to allocate a block: | |||
| 93 | * 1) allocate a block in the cylinder group that contains the | |||
| 94 | * inode for the file. | |||
| 95 | * 2) quadratically rehash into other cylinder groups, until an | |||
| 96 | * available block is located. | |||
| 97 | */ | |||
| 98 | int | |||
| 99 | ffs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref, int size, | |||
| 100 | struct ucred *cred, daddr_t *bnp) | |||
| 101 | { | |||
| 102 | static struct timeval fsfull_last; | |||
| 103 | struct fs *fs; | |||
| 104 | daddr_t bno; | |||
| 105 | u_int cg; | |||
| 106 | int error; | |||
| 107 | ||||
| 108 | *bnp = 0; | |||
| 109 | fs = ip->i_fsinode_u.fs; | |||
| 110 | #ifdef DIAGNOSTIC1 | |||
| 111 | if ((u_int)size > fs->fs_bsize || fragoff(fs, size)((size) & (fs)->fs_qfmask) != 0) { | |||
| 112 | printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", | |||
| 113 | ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); | |||
| 114 | panic("ffs_alloc: bad size"); | |||
| 115 | } | |||
| 116 | if (cred == NOCRED((struct ucred *)-1)) | |||
| 117 | panic("ffs_alloc: missing credential"); | |||
| 118 | #endif /* DIAGNOSTIC */ | |||
| 119 | if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) | |||
| 120 | goto nospace; | |||
| 121 | if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree)((((fs)->fs_cstotal.cs_nbfree) << ((fs))->fs_fragshift ) + (fs)->fs_cstotal.cs_nffree - ((fs)->fs_dsize * (fs-> fs_minfree) / 100)) <= 0) | |||
| 122 | goto nospace; | |||
| 123 | ||||
| 124 | if ((error = ufs_quota_alloc_blocks(ip, btodb(size), cred)ufs_quota_alloc_blocks2(ip, ((size) >> 9), cred, 0)) != 0) | |||
| 125 | return (error); | |||
| 126 | ||||
| 127 | /* | |||
| 128 | * Start allocation in the preferred block's cylinder group or | |||
| 129 | * the file's inode's cylinder group if no preferred block was | |||
| 130 | * specified. | |||
| 131 | */ | |||
| 132 | if (bpref >= fs->fs_size) | |||
| 133 | bpref = 0; | |||
| 134 | if (bpref == 0) | |||
| 135 | cg = ino_to_cg(fs, ip->i_number)((ip->i_number) / (fs)->fs_ipg); | |||
| 136 | else | |||
| 137 | cg = dtog(fs, bpref)((bpref) / (fs)->fs_fpg); | |||
| 138 | ||||
| 139 | /* Try allocating a block. */ | |||
| 140 | bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); | |||
| 141 | if (bno > 0) { | |||
| 142 | /* allocation successful, update inode data */ | |||
| 143 | DIP_ADD(ip, blocks, btodb(size))do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_blocks += (((size) >> 9)); else (ip)-> dinode_u.ffs2_din->di_blocks += (((size) >> 9)); } while (0); | |||
| 144 | ip->i_flag |= IN_CHANGE0x0002 | IN_UPDATE0x0004; | |||
| 145 | *bnp = bno; | |||
| 146 | return (0); | |||
| 147 | } | |||
| 148 | ||||
| 149 | /* Restore user's disk quota because allocation failed. */ | |||
| 150 | (void) ufs_quota_free_blocks(ip, btodb(size), cred)ufs_quota_free_blocks2(ip, ((size) >> 9), cred, 0); | |||
| 151 | ||||
| 152 | nospace: | |||
| 153 | if (ratecheck(&fsfull_last, &fserr_interval)) { | |||
| 154 | ffs_fserr(fs, cred->cr_uid, "file system full")do { log(3, "uid %u on %s: %s\n", (cred->cr_uid), (fs)-> fs_fsmnt, ("file system full")); } while (0); | |||
| 155 | uprintf("\n%s: write failed, file system is full\n", | |||
| 156 | fs->fs_fsmnt); | |||
| 157 | } | |||
| 158 | return (ENOSPC28); | |||
| 159 | } | |||
| 160 | ||||
| 161 | /* | |||
| 162 | * Reallocate a fragment to a bigger size | |||
| 163 | * | |||
| 164 | * The number and size of the old block is given, and a preference | |||
| 165 | * and new size is also specified. The allocator attempts to extend | |||
| 166 | * the original block. Failing that, the regular block allocator is | |||
| 167 | * invoked to get an appropriate block. | |||
| 168 | */ | |||
| 169 | int | |||
| 170 | ffs_realloccg(struct inode *ip, daddr_t lbprev, daddr_t bpref, int osize, | |||
| 171 | int nsize, struct ucred *cred, struct buf **bpp, daddr_t *blknop) | |||
| 172 | { | |||
| 173 | static struct timeval fsfull_last; | |||
| 174 | struct fs *fs; | |||
| 175 | struct buf *bp = NULL((void *)0); | |||
| 176 | daddr_t quota_updated = 0; | |||
| 177 | int request, error; | |||
| 178 | u_int cg; | |||
| 179 | daddr_t bprev, bno; | |||
| 180 | ||||
| 181 | if (bpp != NULL((void *)0)) | |||
| 182 | *bpp = NULL((void *)0); | |||
| 183 | fs = ip->i_fsinode_u.fs; | |||
| 184 | #ifdef DIAGNOSTIC1 | |||
| 185 | if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize)((osize) & (fs)->fs_qfmask) != 0 || | |||
| 186 | (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize)((nsize) & (fs)->fs_qfmask) != 0) { | |||
| 187 | printf( | |||
| 188 | "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", | |||
| 189 | ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); | |||
| 190 | panic("ffs_realloccg: bad size"); | |||
| 191 | } | |||
| 192 | if (cred == NOCRED((struct ucred *)-1)) | |||
| 193 | panic("ffs_realloccg: missing credential"); | |||
| 194 | #endif /* DIAGNOSTIC */ | |||
| 195 | if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree)((((fs)->fs_cstotal.cs_nbfree) << ((fs))->fs_fragshift ) + (fs)->fs_cstotal.cs_nffree - ((fs)->fs_dsize * (fs-> fs_minfree) / 100)) <= 0) | |||
| 196 | goto nospace; | |||
| 197 | ||||
| 198 | bprev = DIP(ip, db[lbprev])(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_db[lbprev] : (ip)->dinode_u.ffs2_din->di_db[lbprev ]); | |||
| 199 | ||||
| 200 | if (bprev == 0) { | |||
| 201 | printf("dev = 0x%x, bsize = %d, bprev = %lld, fs = %s\n", | |||
| 202 | ip->i_dev, fs->fs_bsize, (long long)bprev, fs->fs_fsmnt); | |||
| 203 | panic("ffs_realloccg: bad bprev"); | |||
| 204 | } | |||
| 205 | ||||
| 206 | /* | |||
| 207 | * Allocate the extra space in the buffer. | |||
| 208 | */ | |||
| 209 | if (bpp != NULL((void *)0)) { | |||
| 210 | if ((error = bread(ITOV(ip)((ip)->i_vnode), lbprev, fs->fs_bsize, &bp)) != 0) | |||
| 211 | goto error; | |||
| 212 | buf_adjcnt(bp, osize); | |||
| 213 | } | |||
| 214 | ||||
| 215 | if ((error = ufs_quota_alloc_blocks(ip, btodb(nsize - osize), cred)ufs_quota_alloc_blocks2(ip, ((nsize - osize) >> 9), cred , 0)) | |||
| 216 | != 0) | |||
| 217 | goto error; | |||
| 218 | ||||
| 219 | quota_updated = btodb(nsize - osize)((nsize - osize) >> 9); | |||
| 220 | ||||
| 221 | /* | |||
| 222 | * Check for extension in the existing location. | |||
| 223 | */ | |||
| 224 | cg = dtog(fs, bprev)((bprev) / (fs)->fs_fpg); | |||
| 225 | if ((bno = ffs_fragextend(ip, cg, bprev, osize, nsize)) != 0) { | |||
| 226 | DIP_ADD(ip, blocks, btodb(nsize - osize))do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_blocks += (((nsize - osize) >> 9)); else (ip)->dinode_u.ffs2_din->di_blocks += (((nsize - osize ) >> 9)); } while (0); | |||
| 227 | ip->i_flag |= IN_CHANGE0x0002 | IN_UPDATE0x0004; | |||
| 228 | if (bpp != NULL((void *)0)) { | |||
| 229 | if (bp->b_blkno != fsbtodb(fs, bno)((bno) << (fs)->fs_fsbtodb)) | |||
| 230 | panic("ffs_realloccg: bad blockno"); | |||
| 231 | #ifdef DIAGNOSTIC1 | |||
| 232 | if (nsize > bp->b_bufsize) | |||
| 233 | panic("ffs_realloccg: small buf"); | |||
| 234 | #endif | |||
| 235 | buf_adjcnt(bp, nsize); | |||
| 236 | bp->b_flags |= B_DONE0x00000100; | |||
| 237 | memset(bp->b_data + osize, 0, nsize - osize)__builtin_memset((bp->b_data + osize), (0), (nsize - osize )); | |||
| 238 | *bpp = bp; | |||
| 239 | } | |||
| 240 | if (blknop != NULL((void *)0)) { | |||
| 241 | *blknop = bno; | |||
| 242 | } | |||
| 243 | return (0); | |||
| 244 | } | |||
| 245 | /* | |||
| 246 | * Allocate a new disk location. | |||
| 247 | */ | |||
| 248 | if (bpref >= fs->fs_size) | |||
| 249 | bpref = 0; | |||
| 250 | switch (fs->fs_optim) { | |||
| 251 | case FS_OPTSPACE1: | |||
| 252 | /* | |||
| 253 | * Allocate an exact sized fragment. Although this makes | |||
| 254 | * best use of space, we will waste time relocating it if | |||
| 255 | * the file continues to grow. If the fragmentation is | |||
| 256 | * less than half of the minimum free reserve, we choose | |||
| 257 | * to begin optimizing for time. | |||
| 258 | */ | |||
| 259 | request = nsize; | |||
| 260 | if (fs->fs_minfree < 5 || | |||
| 261 | fs->fs_cstotal.cs_nffree > | |||
| 262 | fs->fs_dsize * fs->fs_minfree / (2 * 100)) | |||
| 263 | break; | |||
| 264 | fs->fs_optim = FS_OPTTIME0; | |||
| 265 | break; | |||
| 266 | case FS_OPTTIME0: | |||
| 267 | /* | |||
| 268 | * At this point we have discovered a file that is trying to | |||
| 269 | * grow a small fragment to a larger fragment. To save time, | |||
| 270 | * we allocate a full sized block, then free the unused portion. | |||
| 271 | * If the file continues to grow, the `ffs_fragextend' call | |||
| 272 | * above will be able to grow it in place without further | |||
| 273 | * copying. If aberrant programs cause disk fragmentation to | |||
| 274 | * grow within 2% of the free reserve, we choose to begin | |||
| 275 | * optimizing for space. | |||
| 276 | */ | |||
| 277 | request = fs->fs_bsize; | |||
| 278 | if (fs->fs_cstotal.cs_nffree < | |||
| 279 | fs->fs_dsize * (fs->fs_minfree - 2) / 100) | |||
| 280 | break; | |||
| 281 | fs->fs_optim = FS_OPTSPACE1; | |||
| 282 | break; | |||
| 283 | default: | |||
| 284 | printf("dev = 0x%x, optim = %d, fs = %s\n", | |||
| 285 | ip->i_dev, fs->fs_optim, fs->fs_fsmnt); | |||
| 286 | panic("ffs_realloccg: bad optim"); | |||
| 287 | /* NOTREACHED */ | |||
| 288 | } | |||
| 289 | bno = ffs_hashalloc(ip, cg, bpref, request, ffs_alloccg); | |||
| 290 | if (bno <= 0) | |||
| 291 | goto nospace; | |||
| 292 | ||||
| 293 | (void) uvm_vnp_uncache(ITOV(ip)((ip)->i_vnode)); | |||
| 294 | if (!DOINGSOFTDEP(ITOV(ip))((((ip)->i_vnode))->v_mount->mnt_flag & 0x04000000 )) | |||
| 295 | ffs_blkfree(ip, bprev, (long)osize); | |||
| 296 | if (nsize < request) | |||
| 297 | ffs_blkfree(ip, bno + numfrags(fs, nsize)((nsize) >> (fs)->fs_fshift), | |||
| 298 | (long)(request - nsize)); | |||
| 299 | DIP_ADD(ip, blocks, btodb(nsize - osize))do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_blocks += (((nsize - osize) >> 9)); else (ip)->dinode_u.ffs2_din->di_blocks += (((nsize - osize ) >> 9)); } while (0); | |||
| 300 | ip->i_flag |= IN_CHANGE0x0002 | IN_UPDATE0x0004; | |||
| 301 | if (bpp != NULL((void *)0)) { | |||
| 302 | bp->b_blkno = fsbtodb(fs, bno)((bno) << (fs)->fs_fsbtodb); | |||
| 303 | #ifdef DIAGNOSTIC1 | |||
| 304 | if (nsize > bp->b_bufsize) | |||
| 305 | panic("ffs_realloccg: small buf 2"); | |||
| 306 | #endif | |||
| 307 | buf_adjcnt(bp, nsize); | |||
| 308 | bp->b_flags |= B_DONE0x00000100; | |||
| 309 | memset(bp->b_data + osize, 0, nsize - osize)__builtin_memset((bp->b_data + osize), (0), (nsize - osize )); | |||
| 310 | *bpp = bp; | |||
| 311 | } | |||
| 312 | if (blknop != NULL((void *)0)) { | |||
| 313 | *blknop = bno; | |||
| 314 | } | |||
| 315 | return (0); | |||
| 316 | ||||
| 317 | nospace: | |||
| 318 | if (ratecheck(&fsfull_last, &fserr_interval)) { | |||
| 319 | ffs_fserr(fs, cred->cr_uid, "file system full")do { log(3, "uid %u on %s: %s\n", (cred->cr_uid), (fs)-> fs_fsmnt, ("file system full")); } while (0); | |||
| 320 | uprintf("\n%s: write failed, file system is full\n", | |||
| 321 | fs->fs_fsmnt); | |||
| 322 | } | |||
| 323 | error = ENOSPC28; | |||
| 324 | ||||
| 325 | error: | |||
| 326 | if (bp != NULL((void *)0)) { | |||
| 327 | brelse(bp); | |||
| 328 | bp = NULL((void *)0); | |||
| 329 | } | |||
| 330 | ||||
| 331 | /* | |||
| 332 | * Restore user's disk quota because allocation failed. | |||
| 333 | */ | |||
| 334 | if (quota_updated != 0) | |||
| 335 | (void)ufs_quota_free_blocks(ip, quota_updated, cred)ufs_quota_free_blocks2(ip, quota_updated, cred, 0); | |||
| 336 | ||||
| 337 | return error; | |||
| 338 | } | |||
| 339 | ||||
| 340 | /* | |||
| 341 | * Allocate an inode in the file system. | |||
| 342 | * | |||
| 343 | * If allocating a directory, use ffs_dirpref to select the inode. | |||
| 344 | * If allocating in a directory, the following hierarchy is followed: | |||
| 345 | * 1) allocate the preferred inode. | |||
| 346 | * 2) allocate an inode in the same cylinder group. | |||
| 347 | * 3) quadratically rehash into other cylinder groups, until an | |||
| 348 | * available inode is located. | |||
| 349 | * If no inode preference is given the following hierarchy is used | |||
| 350 | * to allocate an inode: | |||
| 351 | * 1) allocate an inode in cylinder group 0. | |||
| 352 | * 2) quadratically rehash into other cylinder groups, until an | |||
| 353 | * available inode is located. | |||
| 354 | */ | |||
| 355 | int | |||
| 356 | ffs_inode_alloc(struct inode *pip, mode_t mode, struct ucred *cred, | |||
| 357 | struct vnode **vpp) | |||
| 358 | { | |||
| 359 | static struct timeval fsnoinodes_last; | |||
| 360 | struct vnode *pvp = ITOV(pip)((pip)->i_vnode); | |||
| 361 | struct fs *fs; | |||
| 362 | struct inode *ip; | |||
| 363 | ufsino_t ino, ipref; | |||
| 364 | u_int cg; | |||
| 365 | int error; | |||
| 366 | ||||
| 367 | *vpp = NULL((void *)0); | |||
| 368 | fs = pip->i_fsinode_u.fs; | |||
| 369 | if (fs->fs_cstotal.cs_nifree == 0) | |||
| ||||
| 370 | goto noinodes; | |||
| 371 | ||||
| 372 | if ((mode & IFMT0170000) == IFDIR0040000) | |||
| 373 | ipref = ffs_dirpref(pip); | |||
| 374 | else | |||
| 375 | ipref = pip->i_number; | |||
| 376 | if (ipref >= fs->fs_ncg * fs->fs_ipg) | |||
| 377 | ipref = 0; | |||
| 378 | cg = ino_to_cg(fs, ipref)((ipref) / (fs)->fs_ipg); | |||
| ||||
| 379 | ||||
| 380 | /* | |||
| 381 | * Track number of dirs created one after another | |||
| 382 | * in a same cg without intervening by files. | |||
| 383 | */ | |||
| 384 | if ((mode & IFMT0170000) == IFDIR0040000) { | |||
| 385 | if (fs->fs_contigdirs[cg] < 255) | |||
| 386 | fs->fs_contigdirs[cg]++; | |||
| 387 | } else { | |||
| 388 | if (fs->fs_contigdirs[cg] > 0) | |||
| 389 | fs->fs_contigdirs[cg]--; | |||
| 390 | } | |||
| 391 | ino = (ufsino_t)ffs_hashalloc(pip, cg, ipref, mode, ffs_nodealloccg); | |||
| 392 | if (ino == 0) | |||
| 393 | goto noinodes; | |||
| 394 | error = VFS_VGET(pvp->v_mount, ino, vpp)(*(pvp->v_mount)->mnt_op->vfs_vget)(pvp->v_mount, ino, vpp); | |||
| 395 | if (error) { | |||
| 396 | ffs_inode_free(pip, ino, mode); | |||
| 397 | return (error); | |||
| 398 | } | |||
| 399 | ||||
| 400 | ip = VTOI(*vpp)((struct inode *)(*vpp)->v_data); | |||
| 401 | ||||
| 402 | if (DIP(ip, mode)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_mode : (ip)->dinode_u.ffs2_din->di_mode)) { | |||
| 403 | printf("mode = 0%o, inum = %u, fs = %s\n", | |||
| 404 | DIP(ip, mode)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_mode : (ip)->dinode_u.ffs2_din->di_mode), ip->i_number, fs->fs_fsmnt); | |||
| 405 | panic("ffs_valloc: dup alloc"); | |||
| 406 | } | |||
| 407 | ||||
| 408 | if (DIP(ip, blocks)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_blocks : (ip)->dinode_u.ffs2_din->di_blocks)) { | |||
| 409 | printf("free inode %s/%d had %lld blocks\n", | |||
| 410 | fs->fs_fsmnt, ino, (long long)DIP(ip, blocks)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_blocks : (ip)->dinode_u.ffs2_din->di_blocks)); | |||
| 411 | DIP_ASSIGN(ip, blocks, 0)do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_blocks = (0); else (ip)->dinode_u.ffs2_din ->di_blocks = (0); } while (0); | |||
| 412 | } | |||
| 413 | ||||
| 414 | DIP_ASSIGN(ip, flags, 0)do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_flags = (0); else (ip)->dinode_u.ffs2_din ->di_flags = (0); } while (0); | |||
| 415 | ||||
| 416 | /* | |||
| 417 | * Set up a new generation number for this inode. | |||
| 418 | * On wrap, we make sure to assign a number != 0 and != UINT_MAX | |||
| 419 | * (the original value). | |||
| 420 | */ | |||
| 421 | if (DIP(ip, gen)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_gen : (ip)->dinode_u.ffs2_din->di_gen) != 0) | |||
| 422 | DIP_ADD(ip, gen, 1)do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_gen += (1); else (ip)->dinode_u.ffs2_din-> di_gen += (1); } while (0); | |||
| 423 | while (DIP(ip, gen)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_gen : (ip)->dinode_u.ffs2_din->di_gen) == 0) | |||
| 424 | DIP_ASSIGN(ip, gen, arc4random_uniform(UINT_MAX))do { if ((ip)->i_ump->um_fstype == 1) (ip)->dinode_u .ffs1_din->di_gen = (arc4random_uniform(0xffffffffU)); else (ip)->dinode_u.ffs2_din->di_gen = (arc4random_uniform( 0xffffffffU)); } while (0); | |||
| 425 | ||||
| 426 | return (0); | |||
| 427 | ||||
| 428 | noinodes: | |||
| 429 | if (ratecheck(&fsnoinodes_last, &fserr_interval)) { | |||
| 430 | ffs_fserr(fs, cred->cr_uid, "out of inodes")do { log(3, "uid %u on %s: %s\n", (cred->cr_uid), (fs)-> fs_fsmnt, ("out of inodes")); } while (0); | |||
| 431 | uprintf("\n%s: create/symlink failed, no inodes free\n", | |||
| 432 | fs->fs_fsmnt); | |||
| 433 | } | |||
| 434 | return (ENOSPC28); | |||
| 435 | } | |||
| 436 | ||||
| 437 | /* | |||
| 438 | * Find a cylinder group to place a directory. | |||
| 439 | * | |||
| 440 | * The policy implemented by this algorithm is to allocate a | |||
| 441 | * directory inode in the same cylinder group as its parent | |||
| 442 | * directory, but also to reserve space for its files inodes | |||
| 443 | * and data. Restrict the number of directories which may be | |||
| 444 | * allocated one after another in the same cylinder group | |||
| 445 | * without intervening allocation of files. | |||
| 446 | * | |||
| 447 | * If we allocate a first level directory then force allocation | |||
| 448 | * in another cylinder group. | |||
| 449 | */ | |||
| 450 | ufsino_t | |||
| 451 | ffs_dirpref(struct inode *pip) | |||
| 452 | { | |||
| 453 | struct fs *fs; | |||
| 454 | u_int cg, prefcg; | |||
| 455 | u_int dirsize, cgsize; | |||
| 456 | u_int avgifree, avgbfree, avgndir, curdirsize; | |||
| 457 | u_int minifree, minbfree, maxndir; | |||
| 458 | u_int mincg, minndir; | |||
| 459 | u_int maxcontigdirs; | |||
| 460 | ||||
| 461 | fs = pip->i_fsinode_u.fs; | |||
| 462 | ||||
| 463 | avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; | |||
| 464 | avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; | |||
| 465 | avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; | |||
| 466 | ||||
| 467 | /* | |||
| 468 | * Force allocation in another cg if creating a first level dir. | |||
| 469 | */ | |||
| 470 | if (ITOV(pip)((pip)->i_vnode)->v_flag & VROOT0x0001) { | |||
| 471 | prefcg = arc4random_uniform(fs->fs_ncg); | |||
| 472 | mincg = prefcg; | |||
| 473 | minndir = fs->fs_ipg; | |||
| 474 | for (cg = prefcg; cg < fs->fs_ncg; cg++) | |||
| 475 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir < minndir && | |||
| 476 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree >= avgifree && | |||
| 477 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= avgbfree) { | |||
| 478 | mincg = cg; | |||
| 479 | minndir = fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir; | |||
| 480 | } | |||
| 481 | for (cg = 0; cg < prefcg; cg++) | |||
| 482 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir < minndir && | |||
| 483 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree >= avgifree && | |||
| 484 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= avgbfree) { | |||
| 485 | mincg = cg; | |||
| 486 | minndir = fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir; | |||
| 487 | } | |||
| 488 | cg = mincg; | |||
| 489 | goto end; | |||
| 490 | } else | |||
| 491 | prefcg = ino_to_cg(fs, pip->i_number)((pip->i_number) / (fs)->fs_ipg); | |||
| 492 | ||||
| 493 | /* | |||
| 494 | * Count various limits which used for | |||
| 495 | * optimal allocation of a directory inode. | |||
| 496 | */ | |||
| 497 | maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); | |||
| 498 | minifree = avgifree - (avgifree / 4); | |||
| 499 | if (minifree < 1) | |||
| 500 | minifree = 1; | |||
| 501 | minbfree = avgbfree - (avgbfree / 4); | |||
| 502 | if (minbfree < 1) | |||
| 503 | minbfree = 1; | |||
| 504 | ||||
| 505 | cgsize = fs->fs_fsize * fs->fs_fpg; | |||
| 506 | dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; | |||
| 507 | curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; | |||
| 508 | if (dirsize < curdirsize) | |||
| 509 | dirsize = curdirsize; | |||
| 510 | if (dirsize <= 0) | |||
| 511 | maxcontigdirs = 0; /* dirsize overflowed */ | |||
| 512 | else | |||
| 513 | maxcontigdirs = min(avgbfree * fs->fs_bsize / dirsize, 255); | |||
| 514 | if (fs->fs_avgfpdir > 0) | |||
| 515 | maxcontigdirs = min(maxcontigdirs, | |||
| 516 | fs->fs_ipg / fs->fs_avgfpdir); | |||
| 517 | if (maxcontigdirs == 0) | |||
| 518 | maxcontigdirs = 1; | |||
| 519 | ||||
| 520 | /* | |||
| 521 | * Limit number of dirs in one cg and reserve space for | |||
| 522 | * regular files, but only if we have no deficit in | |||
| 523 | * inodes or space. | |||
| 524 | * | |||
| 525 | * We are trying to find a suitable cylinder group nearby | |||
| 526 | * our preferred cylinder group to place a new directory. | |||
| 527 | * We scan from our preferred cylinder group forward looking | |||
| 528 | * for a cylinder group that meets our criterion. If we get | |||
| 529 | * to the final cylinder group and do not find anything, | |||
| 530 | * we start scanning forwards from the beginning of the | |||
| 531 | * filesystem. While it might seem sensible to start scanning | |||
| 532 | * backwards or even to alternate looking forward and backward, | |||
| 533 | * this approach fails badly when the filesystem is nearly full. | |||
| 534 | * Specifically, we first search all the areas that have no space | |||
| 535 | * and finally try the one preceding that. We repeat this on | |||
| 536 | * every request and in the case of the final block end up | |||
| 537 | * searching the entire filesystem. By jumping to the front | |||
| 538 | * of the filesystem, our future forward searches always look | |||
| 539 | * in new cylinder groups so finds every possible block after | |||
| 540 | * one pass over the filesystem. | |||
| 541 | */ | |||
| 542 | for (cg = prefcg; cg < fs->fs_ncg; cg++) | |||
| 543 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir < maxndir && | |||
| 544 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree >= minifree && | |||
| 545 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= minbfree) { | |||
| 546 | if (fs->fs_contigdirs[cg] < maxcontigdirs) | |||
| 547 | goto end; | |||
| 548 | } | |||
| 549 | for (cg = 0; cg < prefcg; cg++) | |||
| 550 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir < maxndir && | |||
| 551 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree >= minifree && | |||
| 552 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= minbfree) { | |||
| 553 | if (fs->fs_contigdirs[cg] < maxcontigdirs) | |||
| 554 | goto end; | |||
| 555 | } | |||
| 556 | /* | |||
| 557 | * This is a backstop when we have deficit in space. | |||
| 558 | */ | |||
| 559 | for (cg = prefcg; cg < fs->fs_ncg; cg++) | |||
| 560 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree >= avgifree) | |||
| 561 | goto end; | |||
| 562 | for (cg = 0; cg < prefcg; cg++) | |||
| 563 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree >= avgifree) | |||
| 564 | goto end; | |||
| 565 | end: | |||
| 566 | return ((ufsino_t)(fs->fs_ipg * cg)); | |||
| 567 | } | |||
| 568 | ||||
| 569 | /* | |||
| 570 | * Select the desired position for the next block in a file. The file is | |||
| 571 | * logically divided into sections. The first section is composed of the | |||
| 572 | * direct blocks. Each additional section contains fs_maxbpg blocks. | |||
| 573 | * | |||
| 574 | * If no blocks have been allocated in the first section, the policy is to | |||
| 575 | * request a block in the same cylinder group as the inode that describes | |||
| 576 | * the file. The first indirect is allocated immediately following the last | |||
| 577 | * direct block and the data blocks for the first indirect immediately | |||
| 578 | * follow it. | |||
| 579 | * | |||
| 580 | * If no blocks have been allocated in any other section, the indirect | |||
| 581 | * block(s) are allocated in the same cylinder group as its inode in an | |||
| 582 | * area reserved immediately following the inode blocks. The policy for | |||
| 583 | * the data blocks is to place them in a cylinder group with a greater than | |||
| 584 | * average number of free blocks. An appropriate cylinder group is found | |||
| 585 | * by using a rotor that sweeps the cylinder groups. When a new group of | |||
| 586 | * blocks is needed, the sweep begins in the cylinder group following the | |||
| 587 | * cylinder group from which the previous allocation was made. The sweep | |||
| 588 | * continues until a cylinder group with greater than the average number | |||
| 589 | * of free blocks is found. If the allocation is for the first block in an | |||
| 590 | * indirect block, the information on the previous allocation is unavailable; | |||
| 591 | * here a best guess is made based upon the logical block number being | |||
| 592 | * allocated. | |||
| 593 | */ | |||
| 594 | int32_t | |||
| 595 | ffs1_blkpref(struct inode *ip, daddr_t lbn, int indx, int32_t *bap) | |||
| 596 | { | |||
| 597 | struct fs *fs; | |||
| 598 | u_int cg, inocg; | |||
| 599 | u_int avgbfree, startcg; | |||
| 600 | uint32_t pref; | |||
| 601 | ||||
| 602 | KASSERT(indx <= 0 || bap != NULL)((indx <= 0 || bap != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/ufs/ffs/ffs_alloc.c", 602, "indx <= 0 || bap != NULL" )); | |||
| 603 | fs = ip->i_fsinode_u.fs; | |||
| 604 | /* | |||
| 605 | * Allocation of indirect blocks is indicated by passing negative | |||
| 606 | * values in indx: -1 for single indirect, -2 for double indirect, | |||
| 607 | * -3 for triple indirect. As noted below, we attempt to allocate | |||
| 608 | * the first indirect inline with the file data. For all later | |||
| 609 | * indirect blocks, the data is often allocated in other cylinder | |||
| 610 | * groups. However to speed random file access and to speed up | |||
| 611 | * fsck, the filesystem reserves the first fs_metaspace blocks | |||
| 612 | * (typically half of fs_minfree) of the data area of each cylinder | |||
| 613 | * group to hold these later indirect blocks. | |||
| 614 | */ | |||
| 615 | inocg = ino_to_cg(fs, ip->i_number)((ip->i_number) / (fs)->fs_ipg); | |||
| 616 | if (indx < 0) { | |||
| 617 | /* | |||
| 618 | * Our preference for indirect blocks is the zone at the | |||
| 619 | * beginning of the inode's cylinder group data area that | |||
| 620 | * we try to reserve for indirect blocks. | |||
| 621 | */ | |||
| 622 | pref = cgmeta(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno )); | |||
| 623 | /* | |||
| 624 | * If we are allocating the first indirect block, try to | |||
| 625 | * place it immediately following the last direct block. | |||
| 626 | */ | |||
| 627 | if (indx == -1 && lbn < NDADDR12 + NINDIR(fs)((fs)->fs_nindir) && | |||
| 628 | ip->i_din1dinode_u.ffs1_din->di_db[NDADDR12 - 1] != 0) | |||
| 629 | pref = ip->i_din1dinode_u.ffs1_din->di_db[NDADDR12 - 1] + fs->fs_frag; | |||
| 630 | return (pref); | |||
| 631 | } | |||
| 632 | /* | |||
| 633 | * If we are allocating the first data block in the first indirect | |||
| 634 | * block and the indirect has been allocated in the data block area, | |||
| 635 | * try to place it immediately following the indirect block. | |||
| 636 | */ | |||
| 637 | if (lbn == NDADDR12) { | |||
| 638 | pref = ip->i_din1dinode_u.ffs1_din->di_ib[0]; | |||
| 639 | if (pref != 0 && pref >= cgdata(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ) + (fs)->fs_minfree) && | |||
| 640 | pref < cgbase(fs, inocg + 1)((daddr_t)(fs)->fs_fpg * (inocg + 1))) | |||
| 641 | return (pref + fs->fs_frag); | |||
| 642 | } | |||
| 643 | /* | |||
| 644 | * If we are the beginning of a file, or we have already allocated | |||
| 645 | * the maximum number of blocks per cylinder group, or we do not | |||
| 646 | * have a block allocated immediately preceding us, then we need | |||
| 647 | * to decide where to start allocating new blocks. | |||
| 648 | */ | |||
| 649 | if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { | |||
| 650 | /* | |||
| 651 | * If we are allocating a directory data block, we want | |||
| 652 | * to place it in the metadata area. | |||
| 653 | */ | |||
| 654 | if ((DIP(ip, mode)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_mode : (ip)->dinode_u.ffs2_din->di_mode) & IFMT0170000) == IFDIR0040000) | |||
| 655 | return (cgmeta(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ))); | |||
| 656 | /* | |||
| 657 | * Until we fill all the direct and all the first indirect's | |||
| 658 | * blocks, we try to allocate in the data area of the inode's | |||
| 659 | * cylinder group. | |||
| 660 | */ | |||
| 661 | if (lbn < NDADDR12 + NINDIR(fs)((fs)->fs_nindir)) | |||
| 662 | return (cgdata(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ) + (fs)->fs_minfree)); | |||
| 663 | /* | |||
| 664 | * Find a cylinder with greater than average number of | |||
| 665 | * unused data blocks. | |||
| 666 | */ | |||
| 667 | if (indx == 0 || bap[indx - 1] == 0) | |||
| 668 | startcg = inocg + lbn / fs->fs_maxbpg; | |||
| 669 | else | |||
| 670 | startcg = dtog(fs, bap[indx - 1])((bap[indx - 1]) / (fs)->fs_fpg) + 1; | |||
| 671 | startcg %= fs->fs_ncg; | |||
| 672 | avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; | |||
| 673 | for (cg = startcg; cg < fs->fs_ncg; cg++) | |||
| 674 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= avgbfree) { | |||
| 675 | fs->fs_cgrotor = cg; | |||
| 676 | return (cgdata(fs, cg)(((((daddr_t)(fs)->fs_fpg * (cg)) + (fs)->fs_cgoffset * ((cg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno) + ( fs)->fs_minfree)); | |||
| 677 | } | |||
| 678 | for (cg = 0; cg <= startcg; cg++) | |||
| 679 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= avgbfree) { | |||
| 680 | fs->fs_cgrotor = cg; | |||
| 681 | return (cgdata(fs, cg)(((((daddr_t)(fs)->fs_fpg * (cg)) + (fs)->fs_cgoffset * ((cg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno) + ( fs)->fs_minfree)); | |||
| 682 | } | |||
| 683 | return (0); | |||
| 684 | } | |||
| 685 | /* | |||
| 686 | * Otherwise, we just always try to lay things out contiguously. | |||
| 687 | */ | |||
| 688 | return (bap[indx - 1] + fs->fs_frag); | |||
| 689 | } | |||
| 690 | ||||
| 691 | /* | |||
| 692 | * Same as above, for UFS2. | |||
| 693 | */ | |||
| 694 | #ifdef FFS21 | |||
| 695 | int64_t | |||
| 696 | ffs2_blkpref(struct inode *ip, daddr_t lbn, int indx, int64_t *bap) | |||
| 697 | { | |||
| 698 | struct fs *fs; | |||
| 699 | u_int cg, inocg; | |||
| 700 | u_int avgbfree, startcg; | |||
| 701 | uint64_t pref; | |||
| 702 | ||||
| 703 | KASSERT(indx <= 0 || bap != NULL)((indx <= 0 || bap != ((void *)0)) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/ufs/ffs/ffs_alloc.c", 703, "indx <= 0 || bap != NULL" )); | |||
| 704 | fs = ip->i_fsinode_u.fs; | |||
| 705 | /* | |||
| 706 | * Allocation of indirect blocks is indicated by passing negative | |||
| 707 | * values in indx: -1 for single indirect, -2 for double indirect, | |||
| 708 | * -3 for triple indirect. As noted below, we attempt to allocate | |||
| 709 | * the first indirect inline with the file data. For all later | |||
| 710 | * indirect blocks, the data is often allocated in other cylinder | |||
| 711 | * groups. However to speed random file access and to speed up | |||
| 712 | * fsck, the filesystem reserves the first fs_metaspace blocks | |||
| 713 | * (typically half of fs_minfree) of the data area of each cylinder | |||
| 714 | * group to hold these later indirect blocks. | |||
| 715 | */ | |||
| 716 | inocg = ino_to_cg(fs, ip->i_number)((ip->i_number) / (fs)->fs_ipg); | |||
| 717 | if (indx < 0) { | |||
| 718 | /* | |||
| 719 | * Our preference for indirect blocks is the zone at the | |||
| 720 | * beginning of the inode's cylinder group data area that | |||
| 721 | * we try to reserve for indirect blocks. | |||
| 722 | */ | |||
| 723 | pref = cgmeta(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno )); | |||
| 724 | /* | |||
| 725 | * If we are allocating the first indirect block, try to | |||
| 726 | * place it immediately following the last direct block. | |||
| 727 | */ | |||
| 728 | if (indx == -1 && lbn < NDADDR12 + NINDIR(fs)((fs)->fs_nindir) && | |||
| 729 | ip->i_din2dinode_u.ffs2_din->di_db[NDADDR12 - 1] != 0) | |||
| 730 | pref = ip->i_din2dinode_u.ffs2_din->di_db[NDADDR12 - 1] + fs->fs_frag; | |||
| 731 | return (pref); | |||
| 732 | } | |||
| 733 | /* | |||
| 734 | * If we are allocating the first data block in the first indirect | |||
| 735 | * block and the indirect has been allocated in the data block area, | |||
| 736 | * try to place it immediately following the indirect block. | |||
| 737 | */ | |||
| 738 | if (lbn == NDADDR12) { | |||
| 739 | pref = ip->i_din2dinode_u.ffs2_din->di_ib[0]; | |||
| 740 | if (pref != 0 && pref >= cgdata(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ) + (fs)->fs_minfree) && | |||
| 741 | pref < cgbase(fs, inocg + 1)((daddr_t)(fs)->fs_fpg * (inocg + 1))) | |||
| 742 | return (pref + fs->fs_frag); | |||
| 743 | } | |||
| 744 | /* | |||
| 745 | * If we are the beginning of a file, or we have already allocated | |||
| 746 | * the maximum number of blocks per cylinder group, or we do not | |||
| 747 | * have a block allocated immediately preceding us, then we need | |||
| 748 | * to decide where to start allocating new blocks. | |||
| 749 | */ | |||
| 750 | ||||
| 751 | if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { | |||
| 752 | /* | |||
| 753 | * If we are allocating a directory data block, we want | |||
| 754 | * to place it in the metadata area. | |||
| 755 | */ | |||
| 756 | if ((DIP(ip, mode)(((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din ->di_mode : (ip)->dinode_u.ffs2_din->di_mode) & IFMT0170000) == IFDIR0040000) | |||
| 757 | return (cgmeta(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ))); | |||
| 758 | /* | |||
| 759 | * Until we fill all the direct and all the first indirect's | |||
| 760 | * blocks, we try to allocate in the data area of the inode's | |||
| 761 | * cylinder group. | |||
| 762 | */ | |||
| 763 | if (lbn < NDADDR12 + NINDIR(fs)((fs)->fs_nindir)) | |||
| 764 | return (cgdata(fs, inocg)(((((daddr_t)(fs)->fs_fpg * (inocg)) + (fs)->fs_cgoffset * ((inocg) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ) + (fs)->fs_minfree)); | |||
| 765 | /* | |||
| 766 | * Find a cylinder with greater than average number of | |||
| 767 | * unused data blocks. | |||
| 768 | */ | |||
| 769 | if (indx == 0 || bap[indx - 1] == 0) | |||
| 770 | startcg = inocg + lbn / fs->fs_maxbpg; | |||
| 771 | else | |||
| 772 | startcg = dtog(fs, bap[indx - 1] + 1)((bap[indx - 1] + 1) / (fs)->fs_fpg); | |||
| 773 | ||||
| 774 | startcg %= fs->fs_ncg; | |||
| 775 | avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; | |||
| 776 | ||||
| 777 | for (cg = startcg; cg < fs->fs_ncg; cg++) | |||
| 778 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= avgbfree) | |||
| 779 | return (cgbase(fs, cg)((daddr_t)(fs)->fs_fpg * (cg)) + fs->fs_frag); | |||
| 780 | ||||
| 781 | for (cg = 0; cg < startcg; cg++) | |||
| 782 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree >= avgbfree) | |||
| 783 | return (cgbase(fs, cg)((daddr_t)(fs)->fs_fpg * (cg)) + fs->fs_frag); | |||
| 784 | ||||
| 785 | return (0); | |||
| 786 | } | |||
| 787 | ||||
| 788 | /* | |||
| 789 | * Otherwise, we just always try to lay things out contiguously. | |||
| 790 | */ | |||
| 791 | return (bap[indx - 1] + fs->fs_frag); | |||
| 792 | } | |||
| 793 | #endif /* FFS2 */ | |||
| 794 | ||||
| 795 | /* | |||
| 796 | * Implement the cylinder overflow algorithm. | |||
| 797 | * | |||
| 798 | * The policy implemented by this algorithm is: | |||
| 799 | * 1) allocate the block in its requested cylinder group. | |||
| 800 | * 2) quadratically rehash on the cylinder group number. | |||
| 801 | * 3) brute force search for a free block. | |||
| 802 | */ | |||
| 803 | daddr_t | |||
| 804 | ffs_hashalloc(struct inode *ip, u_int cg, daddr_t pref, int size, | |||
| 805 | daddr_t (*allocator)(struct inode *, u_int, daddr_t, int)) | |||
| 806 | { | |||
| 807 | struct fs *fs; | |||
| 808 | daddr_t result; | |||
| 809 | u_int i, icg = cg; | |||
| 810 | ||||
| 811 | fs = ip->i_fsinode_u.fs; | |||
| 812 | /* | |||
| 813 | * 1: preferred cylinder group | |||
| 814 | */ | |||
| 815 | result = (*allocator)(ip, cg, pref, size); | |||
| 816 | if (result) | |||
| 817 | return (result); | |||
| 818 | /* | |||
| 819 | * 2: quadratic rehash | |||
| 820 | */ | |||
| 821 | for (i = 1; i < fs->fs_ncg; i *= 2) { | |||
| 822 | cg += i; | |||
| 823 | if (cg >= fs->fs_ncg) | |||
| 824 | cg -= fs->fs_ncg; | |||
| 825 | result = (*allocator)(ip, cg, 0, size); | |||
| 826 | if (result) | |||
| 827 | return (result); | |||
| 828 | } | |||
| 829 | /* | |||
| 830 | * 3: brute force search | |||
| 831 | * Note that we start at i == 2, since 0 was checked initially, | |||
| 832 | * and 1 is always checked in the quadratic rehash. | |||
| 833 | */ | |||
| 834 | cg = (icg + 2) % fs->fs_ncg; | |||
| 835 | for (i = 2; i < fs->fs_ncg; i++) { | |||
| 836 | result = (*allocator)(ip, cg, 0, size); | |||
| 837 | if (result) | |||
| 838 | return (result); | |||
| 839 | cg++; | |||
| 840 | if (cg == fs->fs_ncg) | |||
| 841 | cg = 0; | |||
| 842 | } | |||
| 843 | return (0); | |||
| 844 | } | |||
| 845 | ||||
| 846 | struct buf * | |||
| 847 | ffs_cgread(struct fs *fs, struct inode *ip, u_int cg) | |||
| 848 | { | |||
| 849 | struct buf *bp; | |||
| 850 | ||||
| 851 | if (bread(ip->i_devvpi_ump->um_devvp, fsbtodb(fs, cgtod(fs, cg))((((((daddr_t)(fs)->fs_fpg * (cg)) + (fs)->fs_cgoffset * ((cg) & ~((fs)->fs_cgmask))) + (fs)->fs_cblkno)) << (fs)->fs_fsbtodb), | |||
| 852 | (int)fs->fs_cgsize, &bp)) { | |||
| 853 | brelse(bp); | |||
| 854 | return (NULL((void *)0)); | |||
| 855 | } | |||
| 856 | ||||
| 857 | if (!cg_chkmagic((struct cg *)bp->b_data)(((struct cg *)bp->b_data)->cg_magic == 0x090255 || ((struct ocg *)((struct cg *)bp->b_data))->cg_magic == 0x090255 )) { | |||
| 858 | brelse(bp); | |||
| 859 | return (NULL((void *)0)); | |||
| 860 | } | |||
| 861 | ||||
| 862 | return bp; | |||
| 863 | } | |||
| 864 | ||||
| 865 | /* | |||
| 866 | * Determine whether a fragment can be extended. | |||
| 867 | * | |||
| 868 | * Check to see if the necessary fragments are available, and | |||
| 869 | * if they are, allocate them. | |||
| 870 | */ | |||
| 871 | daddr_t | |||
| 872 | ffs_fragextend(struct inode *ip, u_int cg, daddr_t bprev, int osize, int nsize) | |||
| 873 | { | |||
| 874 | struct fs *fs; | |||
| 875 | struct cg *cgp; | |||
| 876 | struct buf *bp; | |||
| 877 | struct timespec now; | |||
| 878 | daddr_t bno; | |||
| 879 | int i, frags, bbase; | |||
| 880 | ||||
| 881 | fs = ip->i_fsinode_u.fs; | |||
| 882 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nffree < numfrags(fs, nsize - osize)((nsize - osize) >> (fs)->fs_fshift)) | |||
| 883 | return (0); | |||
| 884 | frags = numfrags(fs, nsize)((nsize) >> (fs)->fs_fshift); | |||
| 885 | bbase = fragnum(fs, bprev)((bprev) & ((fs)->fs_frag - 1)); | |||
| 886 | if (bbase > fragnum(fs, (bprev + frags - 1))(((bprev + frags - 1)) & ((fs)->fs_frag - 1))) { | |||
| 887 | /* cannot extend across a block boundary */ | |||
| 888 | return (0); | |||
| 889 | } | |||
| 890 | ||||
| 891 | if (!(bp = ffs_cgread(fs, ip, cg))) | |||
| 892 | return (0); | |||
| 893 | ||||
| 894 | cgp = (struct cg *)bp->b_data; | |||
| 895 | nanotime(&now); | |||
| 896 | cgp->cg_ffs2_time = now.tv_sec; | |||
| 897 | cgp->cg_time = now.tv_sec; | |||
| 898 | ||||
| 899 | bno = dtogd(fs, bprev)((bprev) % (fs)->fs_fpg); | |||
| 900 | for (i = numfrags(fs, osize)((osize) >> (fs)->fs_fshift); i < frags; i++) | |||
| 901 | if (isclr(cg_blksfree(cgp), bno + i)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno + i)>>3] & (1<<((bno + i)&(8 -1 )))) == 0)) { | |||
| 902 | brelse(bp); | |||
| 903 | return (0); | |||
| 904 | } | |||
| 905 | /* | |||
| 906 | * the current fragment can be extended | |||
| 907 | * deduct the count on fragment being extended into | |||
| 908 | * increase the count on the remaining fragment (if any) | |||
| 909 | * allocate the extended piece | |||
| 910 | */ | |||
| 911 | for (i = frags; i < fs->fs_frag - bbase; i++) | |||
| 912 | if (isclr(cg_blksfree(cgp), bno + i)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno + i)>>3] & (1<<((bno + i)&(8 -1 )))) == 0)) | |||
| 913 | break; | |||
| 914 | cgp->cg_frsum[i - numfrags(fs, osize)((osize) >> (fs)->fs_fshift)]--; | |||
| 915 | if (i != frags) | |||
| 916 | cgp->cg_frsum[i - frags]++; | |||
| 917 | for (i = numfrags(fs, osize)((osize) >> (fs)->fs_fshift); i < frags; i++) { | |||
| 918 | clrbit(cg_blksfree(cgp), bno + i)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno + i)>>3] &= ~(1<<((bno + i)&(8 - 1)))); | |||
| 919 | cgp->cg_cs.cs_nffree--; | |||
| 920 | fs->fs_cstotal.cs_nffree--; | |||
| 921 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nffree--; | |||
| 922 | } | |||
| 923 | fs->fs_fmod = 1; | |||
| 924 | if (DOINGSOFTDEP(ITOV(ip))((((ip)->i_vnode))->v_mount->mnt_flag & 0x04000000 )) | |||
| 925 | softdep_setup_blkmapdep(bp, fs, bprev); | |||
| 926 | ||||
| 927 | bdwrite(bp); | |||
| 928 | return (bprev); | |||
| 929 | } | |||
| 930 | ||||
| 931 | /* | |||
| 932 | * Determine whether a block can be allocated. | |||
| 933 | * | |||
| 934 | * Check to see if a block of the appropriate size is available, | |||
| 935 | * and if it is, allocate it. | |||
| 936 | */ | |||
| 937 | daddr_t | |||
| 938 | ffs_alloccg(struct inode *ip, u_int cg, daddr_t bpref, int size) | |||
| 939 | { | |||
| 940 | struct fs *fs; | |||
| 941 | struct cg *cgp; | |||
| 942 | struct buf *bp; | |||
| 943 | struct timespec now; | |||
| 944 | daddr_t bno, blkno; | |||
| 945 | int i, frags, allocsiz; | |||
| 946 | ||||
| 947 | fs = ip->i_fsinode_u.fs; | |||
| 948 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree == 0 && size == fs->fs_bsize) | |||
| 949 | return (0); | |||
| 950 | ||||
| 951 | if (!(bp = ffs_cgread(fs, ip, cg))) | |||
| 952 | return (0); | |||
| 953 | ||||
| 954 | cgp = (struct cg *)bp->b_data; | |||
| 955 | if (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize) { | |||
| 956 | brelse(bp); | |||
| 957 | return (0); | |||
| 958 | } | |||
| 959 | ||||
| 960 | nanotime(&now); | |||
| 961 | cgp->cg_ffs2_time = now.tv_sec; | |||
| 962 | cgp->cg_time = now.tv_sec; | |||
| 963 | ||||
| 964 | if (size == fs->fs_bsize) { | |||
| 965 | /* allocate and return a complete data block */ | |||
| 966 | bno = ffs_alloccgblk(ip, bp, bpref); | |||
| 967 | bdwrite(bp); | |||
| 968 | return (bno); | |||
| 969 | } | |||
| 970 | /* | |||
| 971 | * check to see if any fragments are already available | |||
| 972 | * allocsiz is the size which will be allocated, hacking | |||
| 973 | * it down to a smaller size if necessary | |||
| 974 | */ | |||
| 975 | frags = numfrags(fs, size)((size) >> (fs)->fs_fshift); | |||
| 976 | for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) | |||
| 977 | if (cgp->cg_frsum[allocsiz] != 0) | |||
| 978 | break; | |||
| 979 | if (allocsiz == fs->fs_frag) { | |||
| 980 | /* | |||
| 981 | * no fragments were available, so a block will be | |||
| 982 | * allocated, and hacked up | |||
| 983 | */ | |||
| 984 | if (cgp->cg_cs.cs_nbfree == 0) { | |||
| 985 | brelse(bp); | |||
| 986 | return (0); | |||
| 987 | } | |||
| 988 | bno = ffs_alloccgblk(ip, bp, bpref); | |||
| 989 | bpref = dtogd(fs, bno)((bno) % (fs)->fs_fpg); | |||
| 990 | for (i = frags; i < fs->fs_frag; i++) | |||
| 991 | setbit(cg_blksfree(cgp), bpref + i)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bpref + i)>>3] |= 1<<((bpref + i)&(8 -1 ))); | |||
| 992 | i = fs->fs_frag - frags; | |||
| 993 | cgp->cg_cs.cs_nffree += i; | |||
| 994 | fs->fs_cstotal.cs_nffree += i; | |||
| 995 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nffree += i; | |||
| 996 | fs->fs_fmod = 1; | |||
| 997 | cgp->cg_frsum[i]++; | |||
| 998 | bdwrite(bp); | |||
| 999 | return (bno); | |||
| 1000 | } | |||
| 1001 | bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); | |||
| 1002 | if (bno < 0) { | |||
| 1003 | brelse(bp); | |||
| 1004 | return (0); | |||
| 1005 | } | |||
| 1006 | ||||
| 1007 | for (i = 0; i < frags; i++) | |||
| 1008 | clrbit(cg_blksfree(cgp), bno + i)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno + i)>>3] &= ~(1<<((bno + i)&(8 - 1)))); | |||
| 1009 | cgp->cg_cs.cs_nffree -= frags; | |||
| 1010 | fs->fs_cstotal.cs_nffree -= frags; | |||
| 1011 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nffree -= frags; | |||
| 1012 | fs->fs_fmod = 1; | |||
| 1013 | cgp->cg_frsum[allocsiz]--; | |||
| 1014 | if (frags != allocsiz) | |||
| 1015 | cgp->cg_frsum[allocsiz - frags]++; | |||
| 1016 | ||||
| 1017 | blkno = cgbase(fs, cg)((daddr_t)(fs)->fs_fpg * (cg)) + bno; | |||
| 1018 | if (DOINGSOFTDEP(ITOV(ip))((((ip)->i_vnode))->v_mount->mnt_flag & 0x04000000 )) | |||
| 1019 | softdep_setup_blkmapdep(bp, fs, blkno); | |||
| 1020 | bdwrite(bp); | |||
| 1021 | return (blkno); | |||
| 1022 | } | |||
| 1023 | ||||
| 1024 | /* | |||
| 1025 | * Allocate a block in a cylinder group. | |||
| 1026 | * Note that this routine only allocates fs_bsize blocks; these | |||
| 1027 | * blocks may be fragmented by the routine that allocates them. | |||
| 1028 | */ | |||
| 1029 | daddr_t | |||
| 1030 | ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref) | |||
| 1031 | { | |||
| 1032 | struct fs *fs; | |||
| 1033 | struct cg *cgp; | |||
| 1034 | daddr_t bno, blkno; | |||
| 1035 | u_int8_t *blksfree; | |||
| 1036 | int cylno, cgbpref; | |||
| 1037 | ||||
| 1038 | fs = ip->i_fsinode_u.fs; | |||
| 1039 | cgp = (struct cg *) bp->b_data; | |||
| 1040 | blksfree = cg_blksfree(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))); | |||
| 1041 | ||||
| 1042 | if (bpref == 0) { | |||
| 1043 | bpref = cgp->cg_rotor; | |||
| 1044 | } else if ((cgbpref = dtog(fs, bpref)((bpref) / (fs)->fs_fpg)) != cgp->cg_cgx) { | |||
| 1045 | /* map bpref to correct zone in this cg */ | |||
| 1046 | if (bpref < cgdata(fs, cgbpref)(((((daddr_t)(fs)->fs_fpg * (cgbpref)) + (fs)->fs_cgoffset * ((cgbpref) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno ) + (fs)->fs_minfree)) | |||
| 1047 | bpref = cgmeta(fs, cgp->cg_cgx)(((((daddr_t)(fs)->fs_fpg * (cgp->cg_cgx)) + (fs)->fs_cgoffset * ((cgp->cg_cgx) & ~((fs)->fs_cgmask))) + (fs)-> fs_dblkno)); | |||
| 1048 | else | |||
| 1049 | bpref = cgdata(fs, cgp->cg_cgx)(((((daddr_t)(fs)->fs_fpg * (cgp->cg_cgx)) + (fs)->fs_cgoffset * ((cgp->cg_cgx) & ~((fs)->fs_cgmask))) + (fs)-> fs_dblkno) + (fs)->fs_minfree); | |||
| 1050 | } | |||
| 1051 | /* | |||
| 1052 | * If the requested block is available, use it. | |||
| 1053 | */ | |||
| 1054 | bno = dtogd(fs, blknum(fs, bpref))((((bpref) &~ ((fs)->fs_frag - 1))) % (fs)->fs_fpg); | |||
| 1055 | if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)((bno) >> (fs)->fs_fragshift))) | |||
| 1056 | goto gotit; | |||
| 1057 | /* | |||
| 1058 | * Take the next available block in this cylinder group. | |||
| 1059 | */ | |||
| 1060 | bno = ffs_mapsearch(fs, cgp, bpref, (int) fs->fs_frag); | |||
| 1061 | if (bno < 0) | |||
| 1062 | return (0); | |||
| 1063 | ||||
| 1064 | /* Update cg_rotor only if allocated from the data zone */ | |||
| 1065 | if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx))(((((((daddr_t)(fs)->fs_fpg * (cgp->cg_cgx)) + (fs)-> fs_cgoffset * ((cgp->cg_cgx) & ~((fs)->fs_cgmask))) + (fs)->fs_dblkno) + (fs)->fs_minfree)) % (fs)->fs_fpg )) | |||
| 1066 | cgp->cg_rotor = bno; | |||
| 1067 | ||||
| 1068 | gotit: | |||
| 1069 | blkno = fragstoblks(fs, bno)((bno) >> (fs)->fs_fragshift); | |||
| 1070 | ffs_clrblock(fs, blksfree, blkno); | |||
| 1071 | ffs_clusteracct(fs, cgp, blkno, -1); | |||
| 1072 | cgp->cg_cs.cs_nbfree--; | |||
| 1073 | fs->fs_cstotal.cs_nbfree--; | |||
| 1074 | fs->fs_cs(fs, cgp->cg_cgx)fs_csp[cgp->cg_cgx].cs_nbfree--; | |||
| 1075 | ||||
| 1076 | if (fs->fs_magic != FS_UFS2_MAGIC0x19540119) { | |||
| 1077 | cylno = cbtocylno(fs, bno)(((bno) << (fs)->fs_fsbtodb) / (fs)->fs_spc); | |||
| 1078 | cg_blks(fs, cgp, cylno)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_b[cylno]) : ((int16_t *)((u_int8_t *)(cgp) + (cgp)->cg_boff ) + (cylno) * (fs)->fs_nrpos))[cbtorpos(fs, bno)((fs)->fs_nrpos <= 1 ? 0 : (((bno) << (fs)->fs_fsbtodb ) % (fs)->fs_spc / (fs)->fs_nsect * (fs)->fs_trackskew + ((bno) << (fs)->fs_fsbtodb) % (fs)->fs_spc % ( fs)->fs_nsect * (fs)->fs_interleave) % (fs)->fs_nsect * (fs)->fs_nrpos / (fs)->fs_npsect)]--; | |||
| 1079 | cg_blktot(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_btot) : ((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_btotoff )))[cylno]--; | |||
| 1080 | } | |||
| 1081 | ||||
| 1082 | fs->fs_fmod = 1; | |||
| 1083 | blkno = cgbase(fs, cgp->cg_cgx)((daddr_t)(fs)->fs_fpg * (cgp->cg_cgx)) + bno; | |||
| 1084 | ||||
| 1085 | if (DOINGSOFTDEP(ITOV(ip))((((ip)->i_vnode))->v_mount->mnt_flag & 0x04000000 )) | |||
| 1086 | softdep_setup_blkmapdep(bp, fs, blkno); | |||
| 1087 | ||||
| 1088 | return (blkno); | |||
| 1089 | } | |||
| 1090 | ||||
| 1091 | /* inode allocation routine */ | |||
| 1092 | daddr_t | |||
| 1093 | ffs_nodealloccg(struct inode *ip, u_int cg, daddr_t ipref, int mode) | |||
| 1094 | { | |||
| 1095 | struct fs *fs; | |||
| 1096 | struct cg *cgp; | |||
| 1097 | struct buf *bp; | |||
| 1098 | struct timespec now; | |||
| 1099 | int start, len, loc, map, i; | |||
| 1100 | #ifdef FFS21 | |||
| 1101 | struct buf *ibp = NULL((void *)0); | |||
| 1102 | struct ufs2_dinode *dp2; | |||
| 1103 | #endif | |||
| 1104 | ||||
| 1105 | /* | |||
| 1106 | * For efficiency, before looking at the bitmaps for free inodes, | |||
| 1107 | * check the counters kept in the superblock cylinder group summaries, | |||
| 1108 | * and in the cylinder group itself. | |||
| 1109 | */ | |||
| 1110 | fs = ip->i_fsinode_u.fs; | |||
| 1111 | if (fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree == 0) | |||
| 1112 | return (0); | |||
| 1113 | ||||
| 1114 | if (!(bp = ffs_cgread(fs, ip, cg))) | |||
| 1115 | return (0); | |||
| 1116 | ||||
| 1117 | cgp = (struct cg *)bp->b_data; | |||
| 1118 | if (cgp->cg_cs.cs_nifree == 0) { | |||
| 1119 | brelse(bp); | |||
| 1120 | return (0); | |||
| 1121 | } | |||
| 1122 | ||||
| 1123 | /* | |||
| 1124 | * We are committed to the allocation from now on, so update the time | |||
| 1125 | * on the cylinder group. | |||
| 1126 | */ | |||
| 1127 | nanotime(&now); | |||
| 1128 | cgp->cg_ffs2_time = now.tv_sec; | |||
| 1129 | cgp->cg_time = now.tv_sec; | |||
| 1130 | ||||
| 1131 | /* | |||
| 1132 | * If there was a preferred location for the new inode, try to find it. | |||
| 1133 | */ | |||
| 1134 | if (ipref) { | |||
| 1135 | ipref %= fs->fs_ipg; | |||
| 1136 | if (isclr(cg_inosused(cgp), ipref)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)-> cg_iusedoff))))[(ipref)>>3] & (1<<((ipref)& (8 -1)))) == 0)) | |||
| 1137 | goto gotit; /* inode is free, grab it. */ | |||
| 1138 | } | |||
| 1139 | ||||
| 1140 | /* | |||
| 1141 | * Otherwise, look for the next available inode, starting at cg_irotor | |||
| 1142 | * (the position in the bitmap of the last used inode). | |||
| 1143 | */ | |||
| 1144 | start = cgp->cg_irotor / NBBY8; | |||
| 1145 | len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY)(((fs->fs_ipg - cgp->cg_irotor) + ((8) - 1)) / (8)); | |||
| 1146 | loc = skpc(0xff, len, &cg_inosused(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff )))[start]); | |||
| 1147 | if (loc == 0) { | |||
| 1148 | /* | |||
| 1149 | * If we didn't find a free inode in the upper part of the | |||
| 1150 | * bitmap (from cg_irotor to the end), then look at the bottom | |||
| 1151 | * part (from 0 to cg_irotor). | |||
| 1152 | */ | |||
| 1153 | len = start + 1; | |||
| 1154 | start = 0; | |||
| 1155 | loc = skpc(0xff, len, &cg_inosused(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff )))[0]); | |||
| 1156 | if (loc == 0) { | |||
| 1157 | /* | |||
| 1158 | * If we failed again, then either the bitmap or the | |||
| 1159 | * counters kept for the cylinder group are wrong. | |||
| 1160 | */ | |||
| 1161 | printf("cg = %d, irotor = %d, fs = %s\n", | |||
| 1162 | cg, cgp->cg_irotor, fs->fs_fsmnt); | |||
| 1163 | panic("ffs_nodealloccg: map corrupted"); | |||
| 1164 | /* NOTREACHED */ | |||
| 1165 | } | |||
| 1166 | } | |||
| 1167 | ||||
| 1168 | /* skpc() returns the position relative to the end */ | |||
| 1169 | i = start + len - loc; | |||
| 1170 | ||||
| 1171 | /* | |||
| 1172 | * Okay, so now in 'i' we have the location in the bitmap of a byte | |||
| 1173 | * holding a free inode. Find the corresponding bit and set it, | |||
| 1174 | * updating cg_irotor as well, accordingly. | |||
| 1175 | */ | |||
| 1176 | map = cg_inosused(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff )))[i]; | |||
| 1177 | ipref = i * NBBY8; | |||
| 1178 | for (i = 1; i < (1 << NBBY8); i <<= 1, ipref++) { | |||
| 1179 | if ((map & i) == 0) { | |||
| 1180 | cgp->cg_irotor = ipref; | |||
| 1181 | goto gotit; | |||
| 1182 | } | |||
| 1183 | } | |||
| 1184 | ||||
| 1185 | printf("fs = %s\n", fs->fs_fsmnt); | |||
| 1186 | panic("ffs_nodealloccg: block not in map"); | |||
| 1187 | /* NOTREACHED */ | |||
| 1188 | ||||
| 1189 | gotit: | |||
| 1190 | ||||
| 1191 | #ifdef FFS21 | |||
| 1192 | /* | |||
| 1193 | * For FFS2, check if all inodes in this cylinder group have been used | |||
| 1194 | * at least once. If they haven't, and we are allocating an inode past | |||
| 1195 | * the last allocated block of inodes, read in a block and initialize | |||
| 1196 | * all inodes in it. | |||
| 1197 | */ | |||
| 1198 | if (fs->fs_magic == FS_UFS2_MAGIC0x19540119 && | |||
| 1199 | /* Inode is beyond last initialized block of inodes? */ | |||
| 1200 | ipref + INOPB(fs)((fs)->fs_inopb) > cgp->cg_initediblk && | |||
| 1201 | /* Has any inode not been used at least once? */ | |||
| 1202 | cgp->cg_initediblk < cgp->cg_ffs2_niblk) { | |||
| 1203 | ||||
| 1204 | ibp = getblk(ip->i_devvpi_ump->um_devvp, fsbtodb(fs,((((daddr_t)(((((daddr_t)(fs)->fs_fpg * (((cg * fs->fs_ipg + cgp->cg_initediblk) / (fs)->fs_ipg))) + (fs)->fs_cgoffset * ((((cg * fs->fs_ipg + cgp->cg_initediblk) / (fs)-> fs_ipg)) & ~((fs)->fs_cgmask))) + (fs)->fs_iblkno) + ((((((cg * fs->fs_ipg + cgp->cg_initediblk) % (fs)-> fs_ipg) / ((fs)->fs_inopb))) << ((fs))->fs_fragshift ))))) << (fs)->fs_fsbtodb) | |||
| 1205 | ino_to_fsba(fs, cg * fs->fs_ipg + cgp->cg_initediblk))((((daddr_t)(((((daddr_t)(fs)->fs_fpg * (((cg * fs->fs_ipg + cgp->cg_initediblk) / (fs)->fs_ipg))) + (fs)->fs_cgoffset * ((((cg * fs->fs_ipg + cgp->cg_initediblk) / (fs)-> fs_ipg)) & ~((fs)->fs_cgmask))) + (fs)->fs_iblkno) + ((((((cg * fs->fs_ipg + cgp->cg_initediblk) % (fs)-> fs_ipg) / ((fs)->fs_inopb))) << ((fs))->fs_fragshift ))))) << (fs)->fs_fsbtodb), | |||
| 1206 | (int)fs->fs_bsize, 0, INFSLP0xffffffffffffffffULL); | |||
| 1207 | ||||
| 1208 | memset(ibp->b_data, 0, fs->fs_bsize)__builtin_memset((ibp->b_data), (0), (fs->fs_bsize)); | |||
| 1209 | dp2 = (struct ufs2_dinode *)(ibp->b_data); | |||
| 1210 | ||||
| 1211 | /* Give each inode a generation number */ | |||
| 1212 | for (i = 0; i < INOPB(fs)((fs)->fs_inopb); i++) { | |||
| 1213 | while (dp2->di_gen == 0) | |||
| 1214 | dp2->di_gen = arc4random(); | |||
| 1215 | dp2++; | |||
| 1216 | } | |||
| 1217 | ||||
| 1218 | /* Update the counter of initialized inodes */ | |||
| 1219 | cgp->cg_initediblk += INOPB(fs)((fs)->fs_inopb); | |||
| 1220 | } | |||
| 1221 | #endif /* FFS2 */ | |||
| 1222 | ||||
| 1223 | if (DOINGSOFTDEP(ITOV(ip))((((ip)->i_vnode))->v_mount->mnt_flag & 0x04000000 )) | |||
| 1224 | softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); | |||
| 1225 | ||||
| 1226 | setbit(cg_inosused(cgp), ipref)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff ))))[(ipref)>>3] |= 1<<((ipref)&(8 -1))); | |||
| 1227 | ||||
| 1228 | /* Update the counters we keep on free inodes */ | |||
| 1229 | cgp->cg_cs.cs_nifree--; | |||
| 1230 | fs->fs_cstotal.cs_nifree--; | |||
| 1231 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree--; | |||
| 1232 | fs->fs_fmod = 1; /* file system was modified */ | |||
| 1233 | ||||
| 1234 | /* Update the counters we keep on allocated directories */ | |||
| 1235 | if ((mode & IFMT0170000) == IFDIR0040000) { | |||
| 1236 | cgp->cg_cs.cs_ndir++; | |||
| 1237 | fs->fs_cstotal.cs_ndir++; | |||
| 1238 | fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir++; | |||
| 1239 | } | |||
| 1240 | ||||
| 1241 | bdwrite(bp); | |||
| 1242 | ||||
| 1243 | #ifdef FFS21 | |||
| 1244 | if (ibp != NULL((void *)0)) | |||
| 1245 | bawrite(ibp); | |||
| 1246 | #endif | |||
| 1247 | ||||
| 1248 | /* Return the allocated inode number */ | |||
| 1249 | return (cg * fs->fs_ipg + ipref); | |||
| 1250 | } | |||
| 1251 | ||||
| 1252 | /* | |||
| 1253 | * Free a block or fragment. | |||
| 1254 | * | |||
| 1255 | * The specified block or fragment is placed back in the | |||
| 1256 | * free map. If a fragment is deallocated, a possible | |||
| 1257 | * block reassembly is checked. | |||
| 1258 | */ | |||
| 1259 | void | |||
| 1260 | ffs_blkfree(struct inode *ip, daddr_t bno, long size) | |||
| 1261 | { | |||
| 1262 | struct fs *fs; | |||
| 1263 | struct cg *cgp; | |||
| 1264 | struct buf *bp; | |||
| 1265 | struct timespec now; | |||
| 1266 | daddr_t blkno; | |||
| 1267 | int i, cg, blk, frags, bbase; | |||
| 1268 | ||||
| 1269 | fs = ip->i_fsinode_u.fs; | |||
| 1270 | if ((u_int)size > fs->fs_bsize || fragoff(fs, size)((size) & (fs)->fs_qfmask) != 0 || | |||
| 1271 | fragnum(fs, bno)((bno) & ((fs)->fs_frag - 1)) + numfrags(fs, size)((size) >> (fs)->fs_fshift) > fs->fs_frag) { | |||
| 1272 | printf("dev = 0x%x, bsize = %d, size = %ld, fs = %s\n", | |||
| 1273 | ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); | |||
| 1274 | panic("ffs_blkfree: bad size"); | |||
| 1275 | } | |||
| 1276 | cg = dtog(fs, bno)((bno) / (fs)->fs_fpg); | |||
| 1277 | if ((u_int)bno >= fs->fs_size) { | |||
| 1278 | printf("bad block %lld, ino %u\n", (long long)bno, | |||
| 1279 | ip->i_number); | |||
| 1280 | ffs_fserr(fs, DIP(ip, uid), "bad block")do { log(3, "uid %u on %s: %s\n", ((((ip)->i_ump->um_fstype == 1) ? (ip)->dinode_u.ffs1_din->di_uid : (ip)->dinode_u .ffs2_din->di_uid)), (fs)->fs_fsmnt, ("bad block")); } while (0); | |||
| 1281 | return; | |||
| 1282 | } | |||
| 1283 | if (!(bp = ffs_cgread(fs, ip, cg))) | |||
| 1284 | return; | |||
| 1285 | ||||
| 1286 | cgp = (struct cg *)bp->b_data; | |||
| 1287 | nanotime(&now); | |||
| 1288 | cgp->cg_ffs2_time = now.tv_sec; | |||
| 1289 | cgp->cg_time = now.tv_sec; | |||
| 1290 | ||||
| 1291 | bno = dtogd(fs, bno)((bno) % (fs)->fs_fpg); | |||
| 1292 | if (size == fs->fs_bsize) { | |||
| 1293 | blkno = fragstoblks(fs, bno)((bno) >> (fs)->fs_fragshift); | |||
| 1294 | if (!ffs_isfreeblock(fs, cg_blksfree(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))), blkno)) { | |||
| 1295 | printf("dev = 0x%x, block = %lld, fs = %s\n", | |||
| 1296 | ip->i_dev, (long long)bno, fs->fs_fsmnt); | |||
| 1297 | panic("ffs_blkfree: freeing free block"); | |||
| 1298 | } | |||
| 1299 | ffs_setblock(fs, cg_blksfree(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))), blkno); | |||
| 1300 | ffs_clusteracct(fs, cgp, blkno, 1); | |||
| 1301 | cgp->cg_cs.cs_nbfree++; | |||
| 1302 | fs->fs_cstotal.cs_nbfree++; | |||
| 1303 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree++; | |||
| 1304 | ||||
| 1305 | if (fs->fs_magic != FS_UFS2_MAGIC0x19540119) { | |||
| 1306 | i = cbtocylno(fs, bno)(((bno) << (fs)->fs_fsbtodb) / (fs)->fs_spc); | |||
| 1307 | cg_blks(fs, cgp, i)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_b[i]) : ((int16_t *)((u_int8_t *)(cgp) + (cgp)->cg_boff ) + (i) * (fs)->fs_nrpos))[cbtorpos(fs, bno)((fs)->fs_nrpos <= 1 ? 0 : (((bno) << (fs)->fs_fsbtodb ) % (fs)->fs_spc / (fs)->fs_nsect * (fs)->fs_trackskew + ((bno) << (fs)->fs_fsbtodb) % (fs)->fs_spc % ( fs)->fs_nsect * (fs)->fs_interleave) % (fs)->fs_nsect * (fs)->fs_nrpos / (fs)->fs_npsect)]++; | |||
| 1308 | cg_blktot(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_btot) : ((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_btotoff )))[i]++; | |||
| 1309 | } | |||
| 1310 | ||||
| 1311 | } else { | |||
| 1312 | bbase = bno - fragnum(fs, bno)((bno) & ((fs)->fs_frag - 1)); | |||
| 1313 | /* | |||
| 1314 | * decrement the counts associated with the old frags | |||
| 1315 | */ | |||
| 1316 | blk = blkmap(fs, cg_blksfree(cgp), bbase)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bbase) / 8] >> ((bbase) % 8)) & (0xff >> (8 - (fs)->fs_frag))); | |||
| 1317 | ffs_fragacct(fs, blk, cgp->cg_frsum, -1); | |||
| 1318 | /* | |||
| 1319 | * deallocate the fragment | |||
| 1320 | */ | |||
| 1321 | frags = numfrags(fs, size)((size) >> (fs)->fs_fshift); | |||
| 1322 | for (i = 0; i < frags; i++) { | |||
| 1323 | if (isset(cg_blksfree(cgp), bno + i)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno + i)>>3] & (1<<((bno + i)&(8 -1 ))))) { | |||
| 1324 | printf("dev = 0x%x, block = %lld, fs = %s\n", | |||
| 1325 | ip->i_dev, (long long)(bno + i), | |||
| 1326 | fs->fs_fsmnt); | |||
| 1327 | panic("ffs_blkfree: freeing free frag"); | |||
| 1328 | } | |||
| 1329 | setbit(cg_blksfree(cgp), bno + i)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno + i)>>3] |= 1<<((bno + i)&(8 -1))); | |||
| 1330 | } | |||
| 1331 | cgp->cg_cs.cs_nffree += i; | |||
| 1332 | fs->fs_cstotal.cs_nffree += i; | |||
| 1333 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nffree += i; | |||
| 1334 | /* | |||
| 1335 | * add back in counts associated with the new frags | |||
| 1336 | */ | |||
| 1337 | blk = blkmap(fs, cg_blksfree(cgp), bbase)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bbase) / 8] >> ((bbase) % 8)) & (0xff >> (8 - (fs)->fs_frag))); | |||
| 1338 | ffs_fragacct(fs, blk, cgp->cg_frsum, 1); | |||
| 1339 | /* | |||
| 1340 | * if a complete block has been reassembled, account for it | |||
| 1341 | */ | |||
| 1342 | blkno = fragstoblks(fs, bbase)((bbase) >> (fs)->fs_fragshift); | |||
| 1343 | if (ffs_isblock(fs, cg_blksfree(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))), blkno)) { | |||
| 1344 | cgp->cg_cs.cs_nffree -= fs->fs_frag; | |||
| 1345 | fs->fs_cstotal.cs_nffree -= fs->fs_frag; | |||
| 1346 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nffree -= fs->fs_frag; | |||
| 1347 | ffs_clusteracct(fs, cgp, blkno, 1); | |||
| 1348 | cgp->cg_cs.cs_nbfree++; | |||
| 1349 | fs->fs_cstotal.cs_nbfree++; | |||
| 1350 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nbfree++; | |||
| 1351 | ||||
| 1352 | if (fs->fs_magic != FS_UFS2_MAGIC0x19540119) { | |||
| 1353 | i = cbtocylno(fs, bbase)(((bbase) << (fs)->fs_fsbtodb) / (fs)->fs_spc); | |||
| 1354 | cg_blks(fs, cgp, i)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_b[i]) : ((int16_t *)((u_int8_t *)(cgp) + (cgp)->cg_boff ) + (i) * (fs)->fs_nrpos))[cbtorpos(fs, bbase)((fs)->fs_nrpos <= 1 ? 0 : (((bbase) << (fs)-> fs_fsbtodb) % (fs)->fs_spc / (fs)->fs_nsect * (fs)-> fs_trackskew + ((bbase) << (fs)->fs_fsbtodb) % (fs)-> fs_spc % (fs)->fs_nsect * (fs)->fs_interleave) % (fs)-> fs_nsect * (fs)->fs_nrpos / (fs)->fs_npsect)]++; | |||
| 1355 | cg_blktot(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_btot) : ((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_btotoff )))[i]++; | |||
| 1356 | } | |||
| 1357 | } | |||
| 1358 | } | |||
| 1359 | fs->fs_fmod = 1; | |||
| 1360 | bdwrite(bp); | |||
| 1361 | } | |||
| 1362 | ||||
| 1363 | int | |||
| 1364 | ffs_inode_free(struct inode *pip, ufsino_t ino, mode_t mode) | |||
| 1365 | { | |||
| 1366 | struct vnode *pvp = ITOV(pip)((pip)->i_vnode); | |||
| 1367 | ||||
| 1368 | if (DOINGSOFTDEP(pvp)((pvp)->v_mount->mnt_flag & 0x04000000)) { | |||
| 1369 | softdep_freefile(pvp, ino, mode); | |||
| 1370 | return (0); | |||
| 1371 | } | |||
| 1372 | ||||
| 1373 | return (ffs_freefile(pip, ino, mode)); | |||
| 1374 | } | |||
| 1375 | ||||
| 1376 | /* | |||
| 1377 | * Do the actual free operation. | |||
| 1378 | * The specified inode is placed back in the free map. | |||
| 1379 | */ | |||
| 1380 | int | |||
| 1381 | ffs_freefile(struct inode *pip, ufsino_t ino, mode_t mode) | |||
| 1382 | { | |||
| 1383 | struct fs *fs; | |||
| 1384 | struct cg *cgp; | |||
| 1385 | struct buf *bp; | |||
| 1386 | struct timespec now; | |||
| 1387 | u_int cg; | |||
| 1388 | ||||
| 1389 | fs = pip->i_fsinode_u.fs; | |||
| 1390 | if (ino >= fs->fs_ipg * fs->fs_ncg) | |||
| 1391 | panic("ffs_freefile: range: dev = 0x%x, ino = %d, fs = %s", | |||
| 1392 | pip->i_dev, ino, fs->fs_fsmnt); | |||
| 1393 | ||||
| 1394 | cg = ino_to_cg(fs, ino)((ino) / (fs)->fs_ipg); | |||
| 1395 | if (!(bp = ffs_cgread(fs, pip, cg))) | |||
| 1396 | return (0); | |||
| 1397 | ||||
| 1398 | cgp = (struct cg *)bp->b_data; | |||
| 1399 | nanotime(&now); | |||
| 1400 | cgp->cg_ffs2_time = now.tv_sec; | |||
| 1401 | cgp->cg_time = now.tv_sec; | |||
| 1402 | ||||
| 1403 | ino %= fs->fs_ipg; | |||
| 1404 | if (isclr(cg_inosused(cgp), ino)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)-> cg_iusedoff))))[(ino)>>3] & (1<<((ino)&(8 -1)))) == 0)) { | |||
| 1405 | printf("dev = 0x%x, ino = %u, fs = %s\n", | |||
| 1406 | pip->i_dev, ino, fs->fs_fsmnt); | |||
| 1407 | if (fs->fs_ronly == 0) | |||
| 1408 | panic("ffs_freefile: freeing free inode"); | |||
| 1409 | } | |||
| 1410 | clrbit(cg_inosused(cgp), ino)(((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_iused) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff ))))[(ino)>>3] &= ~(1<<((ino)&(8 -1)))); | |||
| 1411 | if (ino < cgp->cg_irotor) | |||
| 1412 | cgp->cg_irotor = ino; | |||
| 1413 | cgp->cg_cs.cs_nifree++; | |||
| 1414 | fs->fs_cstotal.cs_nifree++; | |||
| 1415 | fs->fs_cs(fs, cg)fs_csp[cg].cs_nifree++; | |||
| 1416 | if ((mode & IFMT0170000) == IFDIR0040000) { | |||
| 1417 | cgp->cg_cs.cs_ndir--; | |||
| 1418 | fs->fs_cstotal.cs_ndir--; | |||
| 1419 | fs->fs_cs(fs, cg)fs_csp[cg].cs_ndir--; | |||
| 1420 | } | |||
| 1421 | fs->fs_fmod = 1; | |||
| 1422 | bdwrite(bp); | |||
| 1423 | return (0); | |||
| 1424 | } | |||
| 1425 | ||||
| 1426 | ||||
| 1427 | /* | |||
| 1428 | * Find a block of the specified size in the specified cylinder group. | |||
| 1429 | * | |||
| 1430 | * It is a panic if a request is made to find a block if none are | |||
| 1431 | * available. | |||
| 1432 | */ | |||
| 1433 | daddr_t | |||
| 1434 | ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz) | |||
| 1435 | { | |||
| 1436 | daddr_t bno; | |||
| 1437 | int start, len, loc, i; | |||
| 1438 | int blk, field, subfield, pos; | |||
| 1439 | ||||
| 1440 | /* | |||
| 1441 | * find the fragment by searching through the free block | |||
| 1442 | * map for an appropriate bit pattern | |||
| 1443 | */ | |||
| 1444 | if (bpref) | |||
| 1445 | start = dtogd(fs, bpref)((bpref) % (fs)->fs_fpg) / NBBY8; | |||
| 1446 | else | |||
| 1447 | start = cgp->cg_frotor / NBBY8; | |||
| 1448 | len = howmany(fs->fs_fpg, NBBY)(((fs->fs_fpg) + ((8) - 1)) / (8)) - start; | |||
| 1449 | loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff )))[start], | |||
| 1450 | (u_char *)fragtbl[fs->fs_frag], | |||
| 1451 | (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY8)))); | |||
| 1452 | if (loc == 0) { | |||
| 1453 | len = start + 1; | |||
| 1454 | start = 0; | |||
| 1455 | loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)(((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp))-> cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff )))[0], | |||
| 1456 | (u_char *)fragtbl[fs->fs_frag], | |||
| 1457 | (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY8)))); | |||
| 1458 | if (loc == 0) { | |||
| 1459 | printf("start = %d, len = %d, fs = %s\n", | |||
| 1460 | start, len, fs->fs_fsmnt); | |||
| 1461 | panic("ffs_alloccg: map corrupted"); | |||
| 1462 | /* NOTREACHED */ | |||
| 1463 | } | |||
| 1464 | } | |||
| 1465 | bno = (start + len - loc) * NBBY8; | |||
| 1466 | cgp->cg_frotor = bno; | |||
| 1467 | /* | |||
| 1468 | * found the byte in the map | |||
| 1469 | * sift through the bits to find the selected frag | |||
| 1470 | */ | |||
| 1471 | for (i = bno + NBBY8; bno < i; bno += fs->fs_frag) { | |||
| 1472 | blk = blkmap(fs, cg_blksfree(cgp), bno)((((((cgp)->cg_magic != 0x090255) ? (((struct ocg *)(cgp)) ->cg_free) : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff ))))[(bno) / 8] >> ((bno) % 8)) & (0xff >> (8 - (fs)->fs_frag))); | |||
| 1473 | blk <<= 1; | |||
| 1474 | field = around[allocsiz]; | |||
| 1475 | subfield = inside[allocsiz]; | |||
| 1476 | for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { | |||
| 1477 | if ((blk & field) == subfield) | |||
| 1478 | return (bno + pos); | |||
| 1479 | field <<= 1; | |||
| 1480 | subfield <<= 1; | |||
| 1481 | } | |||
| 1482 | } | |||
| 1483 | printf("bno = %lld, fs = %s\n", (long long)bno, fs->fs_fsmnt); | |||
| 1484 | panic("ffs_alloccg: block not in map"); | |||
| 1485 | return (-1); | |||
| 1486 | } | |||
| 1487 | ||||
| 1488 | /* | |||
| 1489 | * Update the cluster map because of an allocation or free. | |||
| 1490 | * | |||
| 1491 | * Cnt == 1 means free; cnt == -1 means allocating. | |||
| 1492 | */ | |||
| 1493 | void | |||
| 1494 | ffs_clusteracct(struct fs *fs, struct cg *cgp, daddr_t blkno, int cnt) | |||
| 1495 | { | |||
| 1496 | int32_t *sump; | |||
| 1497 | int32_t *lp; | |||
| 1498 | u_char *freemapp, *mapp; | |||
| 1499 | int i, start, end, forw, back, map, bit; | |||
| 1500 | ||||
| 1501 | if (fs->fs_contigsumsize <= 0) | |||
| 1502 | return; | |||
| 1503 | freemapp = cg_clustersfree(cgp)((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_clusteroff)); | |||
| 1504 | sump = cg_clustersum(cgp)((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_clustersumoff)); | |||
| 1505 | /* | |||
| 1506 | * Allocate or clear the actual block. | |||
| 1507 | */ | |||
| 1508 | if (cnt > 0) | |||
| 1509 | setbit(freemapp, blkno)((freemapp)[(blkno)>>3] |= 1<<((blkno)&(8 -1) )); | |||
| 1510 | else | |||
| 1511 | clrbit(freemapp, blkno)((freemapp)[(blkno)>>3] &= ~(1<<((blkno)& (8 -1)))); | |||
| 1512 | /* | |||
| 1513 | * Find the size of the cluster going forward. | |||
| 1514 | */ | |||
| 1515 | start = blkno + 1; | |||
| 1516 | end = start + fs->fs_contigsumsize; | |||
| 1517 | if (end >= cgp->cg_nclusterblks) | |||
| 1518 | end = cgp->cg_nclusterblks; | |||
| 1519 | mapp = &freemapp[start / NBBY8]; | |||
| 1520 | map = *mapp++; | |||
| 1521 | bit = 1 << (start % NBBY8); | |||
| 1522 | for (i = start; i < end; i++) { | |||
| 1523 | if ((map & bit) == 0) | |||
| 1524 | break; | |||
| 1525 | if ((i & (NBBY8 - 1)) != (NBBY8 - 1)) { | |||
| 1526 | bit <<= 1; | |||
| 1527 | } else { | |||
| 1528 | map = *mapp++; | |||
| 1529 | bit = 1; | |||
| 1530 | } | |||
| 1531 | } | |||
| 1532 | forw = i - start; | |||
| 1533 | /* | |||
| 1534 | * Find the size of the cluster going backward. | |||
| 1535 | */ | |||
| 1536 | start = blkno - 1; | |||
| 1537 | end = start - fs->fs_contigsumsize; | |||
| 1538 | if (end < 0) | |||
| 1539 | end = -1; | |||
| 1540 | mapp = &freemapp[start / NBBY8]; | |||
| 1541 | map = *mapp--; | |||
| 1542 | bit = 1 << (start % NBBY8); | |||
| 1543 | for (i = start; i > end; i--) { | |||
| 1544 | if ((map & bit) == 0) | |||
| 1545 | break; | |||
| 1546 | if ((i & (NBBY8 - 1)) != 0) { | |||
| 1547 | bit >>= 1; | |||
| 1548 | } else { | |||
| 1549 | map = *mapp--; | |||
| 1550 | bit = 1 << (NBBY8 - 1); | |||
| 1551 | } | |||
| 1552 | } | |||
| 1553 | back = start - i; | |||
| 1554 | /* | |||
| 1555 | * Account for old cluster and the possibly new forward and | |||
| 1556 | * back clusters. | |||
| 1557 | */ | |||
| 1558 | i = back + forw + 1; | |||
| 1559 | if (i > fs->fs_contigsumsize) | |||
| 1560 | i = fs->fs_contigsumsize; | |||
| 1561 | sump[i] += cnt; | |||
| 1562 | if (back > 0) | |||
| 1563 | sump[back] -= cnt; | |||
| 1564 | if (forw > 0) | |||
| 1565 | sump[forw] -= cnt; | |||
| 1566 | /* | |||
| 1567 | * Update cluster summary information. | |||
| 1568 | */ | |||
| 1569 | lp = &sump[fs->fs_contigsumsize]; | |||
| 1570 | for (i = fs->fs_contigsumsize; i > 0; i--) | |||
| 1571 | if (*lp-- > 0) | |||
| 1572 | break; | |||
| 1573 | fs->fs_maxcluster[cgp->cg_cgx] = i; | |||
| 1574 | } |