pomaly pristup na ZIPku a USB fotak - patch!

Tomas Pluskal plusik at pohoda.cz
Sat Jan 11 19:13:13 CET 2003


Tak jsem konecne (po 4 mesicich :) nasel dostatek casu a trochu se ponoril
do problemu, a vysledkem je prilozeny patch (zajemci prosim testujte dle
libosti). Jedna se o podporu clusterovani v msdosfs, opsanou zcasti z
cd9660 a zcasti z ext2fs.

Rychlost drive bez patche:
- USB fotak: cca 50 KB/s
- ATAPI ZIPka: cca 80 KB/s

Rychlost s patchem:
- USB fotak: cca 400 KB/s (jeste je potreba patch pro bandwith
  reclamation, ktery jsem avizoval pred nejakou dobou)
- ATAPI ZIPka: cca 700 KB/s


Tomas

On Tue, 3 Sep 2002, Cejka Rudolf wrote:

> Tomas Pluskal wrote (2002/09/02):
> > Pri normalnim pouziti FAT na ZIPce je velikost pozadavku 2KB. Kdyz pustim
> > ...
> > HDD cca 2x az 3x pomalejsi nez ufs nebo ext2).
>
> Tak se definitivne vzdavam a uz zmlknu :-) Moc diky za perfektni info.
>
> Ze by velikost pozadavku byla rovna velikosti alokacniho bloku FAT
> a nevyuzivalo se zadne clusterovani a u disku s cache to nevadilo
> tak moc jako u ZIP bez nebo s malou cache?
>
-------------- next part --------------
--- msdosfs_vnops.c.old	Sat Jan 11 18:58:28 2003
+++ msdosfs_vnops.c	Sat Jan 11 18:58:33 2003
@@ -606,14 +606,24 @@
 		} else {
 			blsize = pmp->pm_bpcluster;
 			rablock = lbn + 1;
-			if (seqcount > 1 &&
-			    de_cn2off(pmp, rablock) < dep->de_FileSize) {
-				rasize = pmp->pm_bpcluster;
-				error = breadn(vp, lbn, blsize,
-				    &rablock, &rasize, 1, NOCRED, &bp); 
-			} else {
-				error = bread(vp, lbn, blsize, NOCRED, &bp);
-			}
+                        if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
+        			if (de_cn2off(pmp, rablock) < dep->de_FileSize) {
+                                    error = cluster_read(vp, (off_t)dep->de_FileSize,
+                                            lbn, blsize, NOCRED, uio->uio_resid,
+                                            seqcount ,&bp);
+                                } else 
+                                        error = bread(vp, lbn, blsize, NOCRED, &bp);
+                                
+                        } else {
+				if (seqcount > 1 &&
+				    de_cn2off(pmp, rablock) < dep->de_FileSize) {
+					rasize = pmp->pm_bpcluster;
+					error = breadn(vp, lbn, blsize,
+						       &rablock, &rasize, 1, NOCRED, &bp); 
+				} else {
+					error = bread(vp, lbn, blsize, NOCRED, &bp);
+	    			}
+                        }
 		}
 		if (error) {
 			brelse(bp);
@@ -665,6 +675,7 @@
 	struct denode *dep = VTODE(vp);
 	struct msdosfsmount *pmp = dep->de_pmp;
 	struct ucred *cred = ap->a_cred;
+        int seqcount = ap->a_ioflag >> 16;
 
 #ifdef MSDOSFS_DEBUG
 	printf("msdosfs_write(vp %p, uio %p, ioflag %x, cred %p\n",
@@ -812,10 +823,17 @@
 		 */
 		if (ioflag & IO_SYNC)
 			(void) bwrite(bp);
-		else if (n + croffset == pmp->pm_bpcluster)
-			bawrite(bp);
-		else
+		else if (n + croffset == pmp->pm_bpcluster) {
+                        if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
+                                bp->b_flags |= B_CLUSTEROK;
+                                cluster_write(bp, dep->de_FileSize, seqcount);
+                        } else {    
+        			bawrite(bp);
+                        }
+		} else {
+                        bp->b_flags |= B_CLUSTEROK;
 			bdwrite(bp);
+                }
 		dep->de_flag |= DE_UPDATE;
 	} while (error == 0 && uio->uio_resid > 0);
 
@@ -1804,16 +1822,24 @@
 	} */ *ap;
 {
 	struct denode *dep = VTODE(ap->a_vp);
+        struct msdosfsmount *pmp = dep->de_pmp;
+        daddr_t lblkno = ap->a_bn;
+	int bshift = pmp->pm_bnshift;
 
 	if (ap->a_vpp != NULL)
 		*ap->a_vpp = dep->de_devvp;
 	if (ap->a_bnp == NULL)
 		return (0);
 	if (ap->a_runp) {
-		/*
-		 * Sequential clusters should be counted here.
-		 */
-		*ap->a_runp = 0;
+		int nblk;
+
+		nblk = (dep->de_FileSize >> bshift) - (lblkno + 1);
+		if (nblk <= 0)
+			*ap->a_runp = 0;
+		else if (nblk >= (MAXBSIZE >> bshift))
+			*ap->a_runp = (MAXBSIZE >> bshift) - 1;
+		else
+			*ap->a_runp = nblk;
 	}
 	if (ap->a_runb) {
 		*ap->a_runb = 0;


More information about the Users-l mailing list