diff options
-rw-r--r-- | package/xf86-video-siliconmotion/patches/loongson.patch | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/package/xf86-video-siliconmotion/patches/loongson.patch b/package/xf86-video-siliconmotion/patches/loongson.patch new file mode 100644 index 000000000..2b9041516 --- /dev/null +++ b/package/xf86-video-siliconmotion/patches/loongson.patch @@ -0,0 +1,138 @@ +diff -Nur xf86-video-siliconmotion-1.7.4/src/smi_video.c xf86-video-siliconmotion-1.7.4-loongson/src/smi_video.c +--- xf86-video-siliconmotion-1.7.4/src/smi_video.c 2010-02-25 08:33:07.000000000 +0100 ++++ xf86-video-siliconmotion-1.7.4-loongson/src/smi_video.c 2012-03-13 04:18:18.634989344 +0100 +@@ -276,6 +276,7 @@ + XVIMAGE_YUY2, + XVIMAGE_YV12, + XVIMAGE_I420, ++ XVIMAGE_UYVY, + { + FOURCC_RV15, /* id */ + XvRGB, /* type */ +@@ -1464,6 +1465,117 @@ + LEAVE(); + } + ++static void myXVCopyYUV12ToPacked(const unsigned char *srcy, const unsigned char *srcv, const unsigned char *srcu, ++ unsigned char *dst, int srcPitchy, int srcPitchuv, int dstPitch, int h, int w) ++{ ++ int i, j; ++ unsigned char const *y, *u, *v; ++ int dstinc, yinc, uinc, vinc; ++ ++ y = srcy; ++ u = srcu; ++ v = srcv; ++ ++ dstinc = dstPitch - 2*w; ++ yinc = srcPitchy - w; ++ uinc = srcPitchuv - w/2; ++ vinc = srcPitchuv - w/2; ++ ++ for (i = 0; i < h; i++) { ++ asm ( ++// ".set arch=loongson2f\n\t" ++ ".set noreorder\n\t" ++ "move $8, %8 \n\t" ++ "1: \n\t" ++ "beqz $8, 2f \n\t" ++ "xor $f0, $f0, $f0 \n\t" ++ "ldc1 $f4, (%0) \n\t" ++ "punpcklbh $f2, $f4, $f0 \n\t" ++ "punpckhbh $f4, $f4, $f0 \n\t" ++ "ldc1 $f16, 8(%0) \n\t" ++ "punpcklbh $f14, $f16, $f0 \n\t" ++ "punpckhbh $f16, $f16, $f0 \n\t" ++ ++ "lwc1 $f8, (%1) \n\t" ++ "lwc1 $f12, (%2) \n\t" ++ "punpcklbh $f8, $f8, $f12 \n\t" ++ "punpcklbh $f6, $f0, $f8 \n\t" ++ "punpckhbh $f8, $f0, $f8 \n\t" ++ "lwc1 $f18, 4(%1) \n\t" ++ "lwc1 $f12, 4(%2) \n\t" ++ "punpcklbh $f18, $f18, $f12 \n\t" ++ "punpcklbh $f10, $f0, $f18 \n\t" ++ "punpckhbh $f12, $f0, $f18 \n\t" ++ ++ "or $f2, $f2, $f6 \n\t" ++ "or $f4, $f4, $f8 \n\t" ++ "or $f14, $f14, $f10 \n\t" ++ "or $f16, $f16, $f12 \n\t" ++ ++ "sdc1 $f2, (%3) \n\t" ++ "sdc1 $f4, 8(%3) \n\t" ++ "add %0, 16 \n\t" ++ "add %1, 8 \n\t" ++ "add %2, 8 \n\t" ++ "sdc1 $f14, 0x10(%3) \n\t" ++ "sdc1 $f16, 0x18(%3) \n\t" ++ "add $8, -1 \n\t" ++ "b 1b \n\t" ++ "add %3, 32 \n\t" ++ "2: \n\t" ++ ".set reorder\n\t" ++ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst) ++ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" (w>>4) ++ : "memory","$8" ++ ); ++ ++ asm ( ++// ".set arch=loongson2f\n\t" ++ ".set noreorder\n\t" ++ "move $8, %8 \n\t" ++ "1: \n\t" ++ "beqz $8, 2f \n\t" ++ "xor $f0, $f0, $f0 \n\t" ++ "ldc1 $f4, (%0) \n\t" ++ "punpcklbh $f2, $f4, $f0 \n\t" ++ "punpckhbh $f4, $f4, $f0 \n\t" ++ ++ "lwc1 $f8, (%1) \n\t" ++ "lwc1 $f12, (%2) \n\t" ++ "punpcklbh $f8, $f8, $f12 \n\t" ++ "punpcklbh $f6, $f0, $f8 \n\t" ++ "punpckhbh $f8, $f0, $f8 \n\t" ++ ++ "or $f2, $f2, $f6 \n\t" ++ "or $f4, $f4, $f8 \n\t" ++ ++ "sdc1 $f2, (%3) \n\t" ++ "sdc1 $f4, 8(%3) \n\t" ++ "add %0, 8 \n\t" ++ "add %1, 4 \n\t" ++ "add %2, 4 \n\t" ++ "add $8, -1 \n\t" ++ "b 1b \n\t" ++ "add %3, 16 \n\t" ++ "2:\n\t" ++ ".set reorder\n\t" ++ : "=r" (y), "=r" (u), "=r" (v), "=r" (dst) ++ : "0" (y), "1" (u), "2" (v), "3" (dst), "r" ((w&0xf)/8) ++ : "memory","$8" ++ ); ++ ++ for (j = (w&7)/2; j; j--) { ++ *dst++ = *y++; ++ *dst++ = *u++; ++ *dst++ = *y++; ++ *dst++ = *v++; ++ } ++ y += yinc; ++ u = (i%2) ? (u + uinc): (u - w/2); ++ v = (i%2) ? (v + vinc): (v - w/2); ++ dst += dstinc; ++ } ++} + + static int + SMI_PutImage( +@@ -1592,7 +1704,7 @@ + offset3 = tmp; + } + nLines = ((((y2 + 0xffff) >> 16) + 1) & ~1) - top; +- xf86XVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1), ++ myXVCopyYUV12ToPacked(buf + (top * srcPitch) + (left >> 1), + buf + offset2, buf + offset3, dstStart, + srcPitch, srcPitch2, dstPitch, nLines, + nPixels); |