externchar end[]; // first address after kernel. // defined by kernel.ld.
structrun { structrun *next; };
// we must have the type_name(Keme) so we can establish the array of struct(kmems[NCPU]) structKeme{ structspinlocklock; structrun *freelist; };
structKemekmems[NCPU];
void kinit() { // set eight "kmem" for eight cpus for(int i = 0; i < NCPU; i++){ initlock(&kmems[i].lock, "kmem"); // we build a single freelist to kmems[0] firstly if (i == 0) freerange(end, (void*)PHYSTOP); } }
void freerange(void *pa_start, void *pa_end) { char *p; p = (char*)PGROUNDUP((uint64)pa_start); for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE) kfree(p); }
// Free the page of physical memory pointed at by v, // which normally should have been returned by a // call to kalloc(). (The exception is when // initializing the allocator; see kinit above.) void kfree(void *pa) { structrun *r;
// Fill with junk to catch dangling refs. memset(pa, 1, PGSIZE);
r = (struct run*)pa;
// get current cpu_id push_off(); int cpu_id = cpuid(); pop_off();
// free a page from freelist of cpu_id acquire(&kmems[cpu_id].lock); r->next = kmems[cpu_id].freelist; kmems[cpu_id].freelist = r; release(&kmems[cpu_id].lock); }
// Allocate one 4096-byte page of physical memory. // Returns a pointer that the kernel can use. // Returns 0 if the memory cannot be allocated. void * kalloc(void) { structrun *r;
// get current cpu_id push_off(); int cpu_id = cpuid(); pop_off();
acquire(&kmems[cpu_id].lock); // use 'acquire' also disable intrruption
r = kmems[cpu_id].freelist; // if freelist of current cpu_id has freepage we use it if(r){ kmems[cpu_id].freelist = r->next; release(&kmems[cpu_id].lock); memset((char*)r, 5, PGSIZE); // fill with junk return (void*)r; }
// else we steal freepage from other freelist of cpus else{ release(&kmems[cpu_id].lock); for(int i = 0; i < NCPU; i++){ // avoid race condition(same cpu_id lock) if (i != cpu_id){ acquire(&kmems[i].lock);
// the last_list also not memory so ret 0 if(i == NCPU - 1 && kmems[i].freelist == 0){ release(&kmems[i].lock); return (void*)0; }
// It is OK to allocte a freepage by stealing from other cpus if(kmems[i].freelist){ structrun *to_alloc = kmems[i].freelist; kmems[i].freelist = to_alloc->next; release(&kmems[i].lock); memset((char*)to_alloc, 5, PGSIZE); // fill with junk return (void*)to_alloc; }
// if no matching condition, we must release current lock release(&kmems[i].lock); } } }
// Returns 0 if the memory cannot be allocated. return (void*)0; }
chapter 8.3 The buffer cache uses a per-buffer sleep-lock to ensure that only one thread at a time uses each buffer (and thus each disk block); bread() returns a locked buffer, and brelse() releases the lock.
// Buffer cache. // // The buffer cache is a linked list of buf structures holding // cached copies of disk block contents. Caching disk blocks // in memory reduces the number of disk reads and also provides // a synchronization point for disk blocks used by multiple processes. // // Interface: // * To get a buffer for a particular disk block, call bread. // * After changing buffer data, call bwrite to write it to disk. // * When done with the buffer, call brelse. // * Do not use the buffer after calling brelse. // * Only one process at a time can use a buffer, // so do not keep them longer than necessary.
initlock(&bcache.lock, "bcache"); for(int i = 0; i < NBUK; i++){ initlock(&bcache.buckets[i].lock, "bcache.bucket"); bcache.buckets[i].head.next = (void*)0; // setting 0 for each tail of bucket // we pass all bufs to buckets[0] firstly if (i == 0){ prev_b = &bcache.buckets[i].head; for(b = bcache.buf; b < bcache.buf + NBUF; b++){ if(b == bcache.buf + NBUF - 1) // buf[29] b->next = (void*)0; // set tail of 0 for the buckets[0] of bufs prev_b->next = b; b->timestamp = ticks; // when initialize kernel, ticks == 0 initsleeplock(&b->lock, "buffer"); prev_b = b; // next linking } } } }
// Look through buffer cache for block on device dev. // If not found, allocate a buffer. // In either case, return locked buffer. static struct buf* bget(uint dev, uint blockno) { structbuf *b; int buk_id = hash(dev, blockno); // get buk_id by mapping of hash
// Is the block already cached? // atmic: one bucket.lock only be acquired by one process acquire(&bcache.buckets[buk_id].lock); b = bcache.buckets[buk_id].head.next; // the first buf in buckets[buk_id] while(b){ if(b->dev == dev && b->blockno == blockno){ // acquire(&bcache.buckets[buk_id].lock); b->refcnt++; release(&bcache.buckets[buk_id].lock); acquiresleep(&b->lock); return b; } b = b->next; } release(&bcache.buckets[buk_id].lock); // Not cached. // Recycle the least recently used (LRU) unused buffer. int max_timestamp = 0; int lru_buk_id = -1; // select the LRU bucket id int is_better = 0; // we have better lru_buk_id? structbuf *lru_b = (void*)0; structbuf *prev_lru_b = (void*)0; // atmic: we alway lock buckets[lru_buk_id] to avoid be used by other process // find lru_buk_id when refcnt == 0 and getting max_timestamp in each bucket[i] structbuf *prev_b = (void*)0; for(int i = 0; i < NBUK; i++){ prev_b = &bcache.buckets[i].head; acquire(&bcache.buckets[i].lock); while(prev_b->next){ // we must use ">=" max_timestamp == 0 at first time in eviction if(prev_b->next->refcnt == 0 && prev_b->next->timestamp >= max_timestamp){ max_timestamp = prev_b->next->timestamp; is_better = 1; prev_lru_b = prev_b; // get prev_lru_b // not use break, we must find the max_timestamp until we running in the tail of buckets[12] } prev_b = prev_b->next; } if(is_better){ if(lru_buk_id != -1) release(&bcache.buckets[lru_buk_id].lock); // release old buckets[lru_buk_id] lru_buk_id = i; // get new lru_buk_id and alway lock it } else release(&bcache.buckets[i].lock); // not better lru_buk_id, so we release current bucket[i] is_better = 0; // reset is_better to go next bucket[i] }
// get lru_b lru_b = prev_lru_b->next;
// steal lru_b from buckets[lru_buk_id] by prev_lru_b // and release buckets[lru_buk_id].lock if(lru_b){ prev_lru_b->next = prev_lru_b->next->next; release(&bcache.buckets[lru_buk_id].lock); }
// cache lru_b to buckets[buk_id] // atmic: one bucket.lock only be acquired by one process acquire(&bcache.lock); acquire(&bcache.buckets[buk_id].lock); if(lru_b){ lru_b->next = bcache.buckets[buk_id].head.next; bcache.buckets[buk_id].head.next = lru_b; }
// if two processes will use the same block(same blockno) in buckets[lru_buk_id] // one process can check it that if already here we get it // otherwise, we will use same block in two processes and cache double // then "panic freeing free block" b = bcache.buckets[buk_id].head.next; // the first buf in buckets[buk_id] while(b){ if(b->dev == dev && b->blockno == blockno){ b->refcnt++; release(&bcache.buckets[buk_id].lock); release(&bcache.lock); acquiresleep(&b->lock); return b; } b = b->next; }
// not find lru_b when travel each bucket if (lru_b == 0) panic("bget: no buffers"); lru_b->dev = dev; lru_b->blockno = blockno; lru_b->valid = 0; lru_b->refcnt = 1; release(&bcache.buckets[buk_id].lock); release(&bcache.lock); acquiresleep(&lru_b->lock); return lru_b;
}
// Return a locked buf with the contents of the indicated block. struct buf* bread(uint dev, uint blockno) { structbuf *b;
// Write b's contents to disk. Must be locked. void bwrite(struct buf *b) { if(!holdingsleep(&b->lock)) panic("bwrite"); virtio_disk_rw(b, 1); }
// Release a locked buffer. // Move to the head of the most-recently-used list. void brelse(struct buf *b) { if(!holdingsleep(&b->lock)) panic("brelse");
releasesleep(&b->lock);
int buk_id = hash(b->dev, b->blockno); acquire(&bcache.buckets[buk_id].lock); b->refcnt--; // update timestamp when it is a free buf (b->refcnt == 0) if(b->refcnt == 0) b->timestamp = ticks; release(&bcache.buckets[buk_id].lock); }
make[1]: Leaving directory '/home/duile/xv6-labs-2021' == Test running kalloctest == $ make qemu-gdb (97.6s) == Test kalloctest: test1 == kalloctest: test1: OK == Test kalloctest: test2 == kalloctest: test2: OK == Test kalloctest: sbrkmuch == $ make qemu-gdb kalloctest: sbrkmuch: OK (12.7s) == Test running bcachetest == $ make qemu-gdb (12.7s) == Test bcachetest: test0 == bcachetest: test0: OK == Test bcachetest: test1 == bcachetest: test1: OK == Test usertests == $ make qemu-gdb usertests: OK (187.3s) == Test time == time: OK Score: 70/70 duile@LAPTOP-II21PK0U:xv6-labs-2021$