Add support for V4L2's MJPEG and JPEG format.

cusax-fix
Sebastien Vincent 16 years ago
parent 2cffd9e024
commit ad6b8c79c4

@ -0,0 +1,286 @@
/*
* jidctflt.c
*
* Copyright (C) 1994-1998, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
*
* The authors make NO WARRANTY or representation, either express or implied,
* with respect to this software, its quality, accuracy, merchantability, or
* fitness for a particular purpose. This software is provided "AS IS", and you,
* its user, assume the entire risk as to its quality and accuracy.
*
* This software is copyright (C) 1991-1998, Thomas G. Lane.
* All Rights Reserved except as specified below.
*
* Permission is hereby granted to use, copy, modify, and distribute this
* software (or portions thereof) for any purpose, without fee, subject to these
* conditions:
* (1) If any part of the source code for this software is distributed, then this
* README file must be included, with this copyright and no-warranty notice
* unaltered; and any additions, deletions, or changes to the original files
* must be clearly indicated in accompanying documentation.
* (2) If only executable code is distributed, then the accompanying
* documentation must state that "this software is based in part on the work of
* the Independent JPEG Group".
* (3) Permission for use of this software is granted only if the user accepts
* full responsibility for any undesirable consequences; the authors accept
* NO LIABILITY for damages of any kind.
*
* These conditions apply to any software derived from or based on the IJG code,
* not just to the unmodified library. If you use our work, you ought to
* acknowledge us.
*
* Permission is NOT granted for the use of any IJG author's name or company name
* in advertising or publicity relating to this software or products derived from
* it. This software may be referred to only as "the Independent JPEG Group's
* software".
*
* We specifically permit and encourage the use of this software as the basis of
* commercial products, provided that all warranty or liability claims are
* assumed by the product vendor.
*
*
* This file contains a floating-point implementation of the
* inverse DCT (Discrete Cosine Transform). In the IJG code, this routine
* must also perform dequantization of the input coefficients.
*
* This implementation should be more accurate than either of the integer
* IDCT implementations. However, it may not give the same results on all
* machines because of differences in roundoff behavior. Speed will depend
* on the hardware's floating point capacity.
*
* A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT
* on each row (or vice versa, but it's more convenient to emit a row at
* a time). Direct algorithms are also available, but they are much more
* complex and seem not to be any faster when reduced to code.
*
* This implementation is based on Arai, Agui, and Nakajima's algorithm for
* scaled DCT. Their original paper (Trans. IEICE E-71(11):1095) is in
* Japanese, but the algorithm is described in the Pennebaker & Mitchell
* JPEG textbook (see REFERENCES section in file README). The following code
* is based directly on figure 4-8 in P&M.
* While an 8-point DCT cannot be done in less than 11 multiplies, it is
* possible to arrange the computation so that many of the multiplies are
* simple scalings of the final outputs. These multiplies can then be
* folded into the multiplications or divisions by the JPEG quantization
* table entries. The AA&N method leaves only 5 multiplies and 29 adds
* to be done in the DCT itself.
* The primary disadvantage of this method is that with a fixed-point
* implementation, accuracy is lost due to imprecise representation of the
* scaled quantization values. However, that problem does not arise if
* we use floating point arithmetic.
*/
#include <stdint.h>
#include "tinyjpeg-internal.h"
#define FAST_FLOAT float
#define DCTSIZE 8
#define DCTSIZE2 (DCTSIZE*DCTSIZE)
#define DEQUANTIZE(coef,quantval) (((FAST_FLOAT) (coef)) * (quantval))
#if defined(__GNUC__) && (defined(__i686__)) // || defined(__x86_64__))
static inline unsigned char descale_and_clamp(int x, int shift)
{
__asm__ (
"add %3,%1\n"
"\tsar %2,%1\n"
"\tsub $-128,%1\n"
"\tcmovl %5,%1\n" /* Use the sub to compare to 0 */
"\tcmpl %4,%1\n"
"\tcmovg %4,%1\n"
: "=r"(x)
: "0"(x), "Ir"(shift), "ir"(1UL<<(shift-1)), "r" (0xff), "r" (0)
);
return x;
}
#else
static inline unsigned char descale_and_clamp(int x, int shift)
{
x += (1UL<<(shift-1));
if (x<0)
x = (x >> shift) | ((~(0UL)) << (32-(shift)));
else
x >>= shift;
x += 128;
if (x>255)
return 255;
else if (x<0)
return 0;
else
return x;
}
#endif
/*
* Perform dequantization and inverse DCT on one block of coefficients.
*/
void
tinyjpeg_idct_float (struct component *compptr, uint8_t *output_buf, int stride)
{
FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
FAST_FLOAT tmp10, tmp11, tmp12, tmp13;
FAST_FLOAT z5, z10, z11, z12, z13;
int16_t *inptr;
FAST_FLOAT *quantptr;
FAST_FLOAT *wsptr;
uint8_t *outptr;
int ctr;
FAST_FLOAT workspace[DCTSIZE2]; /* buffers data between passes */
/* Pass 1: process columns from input, store into work array. */
inptr = compptr->DCT;
quantptr = compptr->Q_table;
wsptr = workspace;
for (ctr = DCTSIZE; ctr > 0; ctr--) {
/* Due to quantization, we will usually find that many of the input
* coefficients are zero, especially the AC terms. We can exploit this
* by short-circuiting the IDCT calculation for any column in which all
* the AC terms are zero. In that case each output is equal to the
* DC coefficient (with scale factor as needed).
* With typical images and quantization tables, half or more of the
* column DCT calculations can be simplified this way.
*/
if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
inptr[DCTSIZE*7] == 0) {
/* AC terms all zero */
FAST_FLOAT dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
wsptr[DCTSIZE*0] = dcval;
wsptr[DCTSIZE*1] = dcval;
wsptr[DCTSIZE*2] = dcval;
wsptr[DCTSIZE*3] = dcval;
wsptr[DCTSIZE*4] = dcval;
wsptr[DCTSIZE*5] = dcval;
wsptr[DCTSIZE*6] = dcval;
wsptr[DCTSIZE*7] = dcval;
inptr++; /* advance pointers to next column */
quantptr++;
wsptr++;
continue;
}
/* Even part */
tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
tmp1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
tmp3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
tmp10 = tmp0 + tmp2; /* phase 3 */
tmp11 = tmp0 - tmp2;
tmp13 = tmp1 + tmp3; /* phases 5-3 */
tmp12 = (tmp1 - tmp3) * ((FAST_FLOAT) 1.414213562) - tmp13; /* 2*c4 */
tmp0 = tmp10 + tmp13; /* phase 2 */
tmp3 = tmp10 - tmp13;
tmp1 = tmp11 + tmp12;
tmp2 = tmp11 - tmp12;
/* Odd part */
tmp4 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
tmp5 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
tmp6 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
tmp7 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
z13 = tmp6 + tmp5; /* phase 6 */
z10 = tmp6 - tmp5;
z11 = tmp4 + tmp7;
z12 = tmp4 - tmp7;
tmp7 = z11 + z13; /* phase 5 */
tmp11 = (z11 - z13) * ((FAST_FLOAT) 1.414213562); /* 2*c4 */
z5 = (z10 + z12) * ((FAST_FLOAT) 1.847759065); /* 2*c2 */
tmp10 = ((FAST_FLOAT) 1.082392200) * z12 - z5; /* 2*(c2-c6) */
tmp12 = ((FAST_FLOAT) -2.613125930) * z10 + z5; /* -2*(c2+c6) */
tmp6 = tmp12 - tmp7; /* phase 2 */
tmp5 = tmp11 - tmp6;
tmp4 = tmp10 + tmp5;
wsptr[DCTSIZE*0] = tmp0 + tmp7;
wsptr[DCTSIZE*7] = tmp0 - tmp7;
wsptr[DCTSIZE*1] = tmp1 + tmp6;
wsptr[DCTSIZE*6] = tmp1 - tmp6;
wsptr[DCTSIZE*2] = tmp2 + tmp5;
wsptr[DCTSIZE*5] = tmp2 - tmp5;
wsptr[DCTSIZE*4] = tmp3 + tmp4;
wsptr[DCTSIZE*3] = tmp3 - tmp4;
inptr++; /* advance pointers to next column */
quantptr++;
wsptr++;
}
/* Pass 2: process rows from work array, store into output array. */
/* Note that we must descale the results by a factor of 8 == 2**3. */
wsptr = workspace;
outptr = output_buf;
for (ctr = 0; ctr < DCTSIZE; ctr++) {
/* Rows of zeroes can be exploited in the same way as we did with columns.
* However, the column calculation has created many nonzero AC terms, so
* the simplification applies less often (typically 5% to 10% of the time).
* And testing floats for zero is relatively expensive, so we don't bother.
*/
/* Even part */
tmp10 = wsptr[0] + wsptr[4];
tmp11 = wsptr[0] - wsptr[4];
tmp13 = wsptr[2] + wsptr[6];
tmp12 = (wsptr[2] - wsptr[6]) * ((FAST_FLOAT) 1.414213562) - tmp13;
tmp0 = tmp10 + tmp13;
tmp3 = tmp10 - tmp13;
tmp1 = tmp11 + tmp12;
tmp2 = tmp11 - tmp12;
/* Odd part */
z13 = wsptr[5] + wsptr[3];
z10 = wsptr[5] - wsptr[3];
z11 = wsptr[1] + wsptr[7];
z12 = wsptr[1] - wsptr[7];
tmp7 = z11 + z13;
tmp11 = (z11 - z13) * ((FAST_FLOAT) 1.414213562);
z5 = (z10 + z12) * ((FAST_FLOAT) 1.847759065); /* 2*c2 */
tmp10 = ((FAST_FLOAT) 1.082392200) * z12 - z5; /* 2*(c2-c6) */
tmp12 = ((FAST_FLOAT) -2.613125930) * z10 + z5; /* -2*(c2+c6) */
tmp6 = tmp12 - tmp7;
tmp5 = tmp11 - tmp6;
tmp4 = tmp10 + tmp5;
/* Final output stage: scale down by a factor of 8 and range-limit */
outptr[0] = descale_and_clamp((int)(tmp0 + tmp7), 3);
outptr[7] = descale_and_clamp((int)(tmp0 - tmp7), 3);
outptr[1] = descale_and_clamp((int)(tmp1 + tmp6), 3);
outptr[6] = descale_and_clamp((int)(tmp1 - tmp6), 3);
outptr[2] = descale_and_clamp((int)(tmp2 + tmp5), 3);
outptr[5] = descale_and_clamp((int)(tmp2 - tmp5), 3);
outptr[4] = descale_and_clamp((int)(tmp3 + tmp4), 3);
outptr[3] = descale_and_clamp((int)(tmp3 - tmp4), 3);
wsptr += DCTSIZE; /* advance pointer to next row */
outptr += stride;
}
}

@ -8,13 +8,52 @@
#include "net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2.h"
#include <fcntl.h>
#include <linux/videodev2.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/select.h>
#include <unistd.h>
#include <linux/videodev2.h>
#include "tinyjpeg.h"
/* from lti-civil */
static void jpeg2rgb (unsigned char *src, unsigned char *dest,
long srcFrameBytes, int flags)
{
unsigned char *components[1];
struct jdec_private *jdec;
components[0] = dest;
jdec = tinyjpeg_init();
if (jdec == NULL)
{
return;
}
tinyjpeg_set_flags(jdec, flags);
tinyjpeg_set_components(jdec, components, 1);
if (tinyjpeg_parse_header(jdec, src, srcFrameBytes) < 0)
{
printf("parseheader!\n");
fflush(stdout);
free(jdec);
return;
}
// supplying TINYJPEG_FMT_RGB24 reverse colors : strange
if (tinyjpeg_decode(jdec, TINYJPEG_FMT_BGR24) < 0)
{
free(jdec);
return;
}
free(jdec);
}
JNIEXPORT jint JNICALL
Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_close
(JNIEnv *jniEnv, jclass clazz, jint fd)
@ -43,6 +82,19 @@ Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_
return (jlong) memcpy((void *) dest, (const void *) src, n);
}
JNIEXPORT jlong JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_convert_1jpeg
(JNIEnv *jniEnv, jclass clazz, jlong dst, jlong src, jint size)
{
unsigned char* s = (unsigned char*)src;
if(size > 0xaf && s[0] == 0xFF && s[1] == 0xD8)
{
jpeg2rgb(s, (unsigned char*)dst, size, TINYJPEG_FLAGS_MJPEG_TABLE);
}
return 0;
}
JNIEXPORT jlong JNICALL
Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_mmap
(JNIEnv *jniEnv, jclass clazz, jlong start, jint length, jint prot,
@ -279,6 +331,24 @@ Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_
((struct v4l2_requestbuffers *) v4l2_requestbuffers)->memory = memory;
}
JNIEXPORT jlong JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_v4l2_1streamparm_1alloc
(JNIEnv *jniEnv, jclass clazz, jint type)
{
struct v4l2_streamparm* v4l2_streamparm = (struct v4l2_streamparm *)malloc(sizeof(struct v4l2_streamparm));
if(v4l2_streamparm)
v4l2_streamparm->type = type;
return (jlong)v4l2_streamparm;
}
JNIEXPORT void JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_v4l2_1streamparm_1setFps
(JNIEnv *jniEnv, jclass clazz, jlong v4l2_streamparm, jint fps)
{
((struct v4l2_streamparm*)v4l2_streamparm)->parm.capture.timeperframe.numerator = 1;
((struct v4l2_streamparm*)v4l2_streamparm)->parm.capture.timeperframe.denominator = fps;
}
JNIEXPORT jint JNICALL
Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_VIDIOC_1DQBUF
(JNIEnv *jniEnv, jclass clazz)
@ -328,6 +398,12 @@ Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_
return VIDIOC_S_FMT;
}
JNIEXPORT jint JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_VIDIOC_1S_1PARM
(JNIEnv *jniEnv, jclass clazz)
{
return VIDIOC_S_PARM;
}
JNIEXPORT jint JNICALL
Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_VIDIOC_1STREAMOFF
(JNIEnv *jniEnv, jclass clazz)

@ -46,6 +46,14 @@ JNIEXPORT jint JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media
JNIEXPORT jlong JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_memcpy
(JNIEnv *, jclass, jlong, jlong, jint);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: convert_jpeg
* Signature: (JJI)J
*/
JNIEXPORT jlong JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_convert_1jpeg
(JNIEnv *, jclass, jlong, jlong, jint);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: mmap
@ -262,6 +270,22 @@ JNIEXPORT void JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media
JNIEXPORT void JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_v4l2_1requestbuffers_1setMemory
(JNIEnv *, jclass, jlong, jint);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: v4l2_streamparm_alloc
* Signature: (I)J
*/
JNIEXPORT jlong JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_v4l2_1streamparm_1alloc
(JNIEnv *, jclass, jint);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: v4l2_streamparm_setFps
* Signature: (JI)V
*/
JNIEXPORT void JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_v4l2_1streamparm_1setFps
(JNIEnv *, jclass, jlong, jint);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: VIDIOC_DQBUF
@ -318,6 +342,14 @@ JNIEXPORT jint JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media
JNIEXPORT jint JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_VIDIOC_1S_1FMT
(JNIEnv *, jclass);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: VIDIOC_S_PARM
* Signature: ()I
*/
JNIEXPORT jint JNICALL Java_net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2_VIDIOC_1S_1PARM
(JNIEnv *, jclass);
/*
* Class: net_java_sip_communicator_impl_neomedia_jmfext_media_protocol_video4linux2_Video4Linux2
* Method: VIDIOC_STREAMOFF

@ -0,0 +1,121 @@
/*
* Small jpeg decoder library (Internal header)
*
* Copyright (c) 2006, Luc Saillard <luc@saillard.org>
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of the author nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __TINYJPEG_INTERNAL_H_
#define __TINYJPEG_INTERNAL_H_
#include <setjmp.h>
#define SANITY_CHECK 1
struct jdec_private;
#define HUFFMAN_HASH_NBITS 9
#define HUFFMAN_HASH_SIZE (1UL<<HUFFMAN_HASH_NBITS)
#define HUFFMAN_HASH_MASK (HUFFMAN_HASH_SIZE-1)
#define HUFFMAN_TABLES 4
#define COMPONENTS 3
#define JPEG_MAX_WIDTH 2048
#define JPEG_MAX_HEIGHT 2048
struct huffman_table
{
/* Fast look up table, using HUFFMAN_HASH_NBITS bits we can have directly the symbol,
* if the symbol is <0, then we need to look into the tree table */
short int lookup[HUFFMAN_HASH_SIZE];
/* code size: give the number of bits of a symbol is encoded */
unsigned char code_size[HUFFMAN_HASH_SIZE];
/* some place to store value that is not encoded in the lookup table
* IMPROVEME: Calculate if 256 value is enough to store all values
*/
uint16_t slowtable[16-HUFFMAN_HASH_NBITS][256];
};
struct component
{
unsigned int Hfactor;
unsigned int Vfactor;
float *Q_table; /* Pointer to the quantisation table to use */
struct huffman_table *AC_table;
struct huffman_table *DC_table;
short int previous_DC; /* Previous DC coefficient */
short int DCT[64]; /* DCT coef */
#if SANITY_CHECK
unsigned int cid;
#endif
};
typedef void (*decode_MCU_fct) (struct jdec_private *priv);
typedef void (*convert_colorspace_fct) (struct jdec_private *priv);
struct jdec_private
{
/* Public variables */
uint8_t *components[COMPONENTS];
unsigned int width, height; /* Size of the image */
unsigned int flags;
/* Private variables */
const unsigned char *stream_begin, *stream_end;
unsigned int stream_length;
const unsigned char *stream; /* Pointer to the current stream */
unsigned int reservoir, nbits_in_reservoir;
struct component component_infos[COMPONENTS];
float Q_tables[COMPONENTS][64]; /* quantization tables */
struct huffman_table HTDC[HUFFMAN_TABLES]; /* DC huffman tables */
struct huffman_table HTAC[HUFFMAN_TABLES]; /* AC huffman tables */
int default_huffman_table_initialized;
int restart_interval;
int restarts_to_go; /* MCUs left in this restart interval */
int last_rst_marker_seen; /* Rst marker is incremented each time */
/* Temp space used after the IDCT to store each components */
uint8_t Y[64*4], Cr[64], Cb[64];
jmp_buf jump_state;
/* Internal Pointer use for colorspace conversion, do not modify it !!! */
uint8_t *plane[COMPONENTS];
char error_string[256];
};
#define IDCT tinyjpeg_idct_float
void tinyjpeg_idct_float (struct component *compptr, uint8_t *output_buf, int stride);
#endif

File diff suppressed because it is too large Load Diff

@ -0,0 +1,73 @@
/*
* Small jpeg decoder library (header file)
*
* Copyright (c) 2006, Luc Saillard <luc@saillard.org>
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of the author nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __JPEGDEC_H__
#define __JPEGDEC_H__
#ifdef __cplusplus
extern "C" {
#endif
struct jdec_private;
/* Flags that can be set by any applications */
#define TINYJPEG_FLAGS_MJPEG_TABLE (1<<1)
/* Format accepted in outout */
enum tinyjpeg_fmt {
TINYJPEG_FMT_GREY = 1,
TINYJPEG_FMT_BGR24,
TINYJPEG_FMT_RGB24,
TINYJPEG_FMT_YUV420P,
};
struct jdec_private *tinyjpeg_init(void);
void tinyjpeg_free(struct jdec_private *priv);
int tinyjpeg_parse_header(struct jdec_private *priv, const unsigned char *buf, unsigned int size);
int tinyjpeg_decode(struct jdec_private *priv, int pixel_format);
const char *tinyjpeg_get_errorstring(struct jdec_private *priv);
void tinyjpeg_get_size(struct jdec_private *priv, unsigned int *width, unsigned int *height);
int tinyjpeg_get_components(struct jdec_private *priv, unsigned char **components);
int tinyjpeg_set_components(struct jdec_private *priv, unsigned char **components, unsigned int ncomponents);
int tinyjpeg_set_flags(struct jdec_private *priv, int flags);
#ifdef __cplusplus
}
#endif
#endif

@ -35,13 +35,21 @@ public class AVFrameFormat
*/
private int pixFmt;
/**
* Native format of the capture device. Because some native capture device
* formats can correspond to the same FFmpeg format, JMF's stream is unable
* to differentiate them. So having the native format here
* allow JMF stream to know it directly.
*/
private int devicePixFmt;
/**
* Initializes a new <tt>AVFrameFormat</tt> instance with unspecified size,
* frame rate and FFmpeg colorspace.
*/
public AVFrameFormat()
{
this(NOT_SPECIFIED);
this(NOT_SPECIFIED, NOT_SPECIFIED);
}
/**
@ -49,10 +57,12 @@ public AVFrameFormat()
* colorspace and unspecified size and frame rate.
*
* @param pixFmt the FFmpeg colorspace to be represented by the new instance
* @param devicePixFmt the capture device colorspace to be represented by
* the new instance
*/
public AVFrameFormat(int pixFmt)
public AVFrameFormat(int pixFmt, int devicePixFmt)
{
this(null, NOT_SPECIFIED, pixFmt);
this(null, NOT_SPECIFIED, pixFmt, devicePixFmt);
}
/**
@ -62,12 +72,16 @@ public AVFrameFormat(int pixFmt)
* @param size the <tt>Dimension</tt> of the new instance
* @param frameRate the frame rate of the new instance
* @param pixFmt the FFmpeg colorspace to be represented by the new instance
* @param devicePixFmt the capture device colorspace to be represented by
* the new instance
*/
public AVFrameFormat(Dimension size, float frameRate, int pixFmt)
public AVFrameFormat(Dimension size, float frameRate, int pixFmt,
int devicePixFmt)
{
super(AVFRAME, size, NOT_SPECIFIED, AVFrame.class, frameRate);
this.pixFmt = pixFmt;
this.devicePixFmt = devicePixFmt;
}
/**
@ -80,7 +94,8 @@ public AVFrameFormat(Dimension size, float frameRate, int pixFmt)
@Override
public Object clone()
{
AVFrameFormat f = new AVFrameFormat(size, frameRate, pixFmt);
AVFrameFormat f = new AVFrameFormat(size, frameRate, pixFmt,
devicePixFmt);
f.copy(this);
return f;
@ -139,6 +154,16 @@ public int getPixFmt()
return pixFmt;
}
/**
* Gets the native capture device format represented by this instance.
*
* @return native capture device format reprensented by this instance
*/
public int getDevicePixFmt()
{
return devicePixFmt;
}
/**
* Finds the attributes shared by two matching <tt>Format</tt>s. If the
* specified <tt>Format</tt> does not match this one, the result is

@ -85,7 +85,8 @@ public JNIDecoder()
Constants.VIDEO_WIDTH,
Constants.VIDEO_HEIGHT),
ensureFrameRate(Format.NOT_SPECIFIED),
FFmpeg.PIX_FMT_YUV420P)
FFmpeg.PIX_FMT_YUV420P,
Format.NOT_SPECIFIED)
};
Dimension outputSize = outputFormats[0].getSize();
@ -169,7 +170,8 @@ protected Format[] getMatchingOutputFormats(Format in)
new AVFrameFormat(
outSize,
ensureFrameRate(ivf.getFrameRate()),
FFmpeg.PIX_FMT_YUV420P)
FFmpeg.PIX_FMT_YUV420P,
Format.NOT_SPECIFIED)
};
}
@ -289,7 +291,8 @@ public synchronized int process(Buffer inBuffer, Buffer outBuffer)
= new AVFrameFormat(
outSize,
outFrameRate,
FFmpeg.PIX_FMT_YUV420P);
FFmpeg.PIX_FMT_YUV420P,
Format.NOT_SPECIFIED);
}
outBuffer.setFormat(outputFormat);

@ -61,7 +61,7 @@ public DirectShowAuto() throws Exception
if(ffmpegPixFmt != FFmpeg.PIX_FMT_NONE)
{
format = new AVFrameFormat(ffmpegPixFmt);
format = new AVFrameFormat(ffmpegPixFmt, (int)pixelFormat);
}
else
{

@ -58,7 +58,8 @@ public QuickTimeAuto()
+ inputDevice.uniqueID()),
new Format[]
{
new AVFrameFormat(FFmpeg.PIX_FMT_ARGB),
new AVFrameFormat(FFmpeg.PIX_FMT_ARGB,
Format.NOT_SPECIFIED),
new RGBFormat()
});

@ -118,9 +118,9 @@ private boolean discoverAndRegister(String deviceName)
* API Specification with a specific device name, a specific <tt>open()</tt>
* file descriptor and a specific <tt>v4l2_capability</tt> with JMF.
*
* @param deviceName
* @param fd
* @param v4l2_capability
* @param deviceName name of the device (i.e. /dev/videoX)
* @param fd file descriptor of the device
* @param v4l2_capability device V4L2 capability
* @return <tt>true</tt> if a <tt>CaptureDeviceInfo</tt> for the specified
* <tt>CaptureDevice</tt> has been added to <tt>CaptureDeviceManager</tt>;
* otherwise, <tt>false</tt>
@ -185,7 +185,7 @@ private boolean register(String deviceName, int fd, long v4l2_capability)
int ffmpegPixFmt = DataSource.getFFmpegPixFmt(pixelformat);
if (FFmpeg.PIX_FMT_NONE != ffmpegPixFmt)
format = new AVFrameFormat(ffmpegPixFmt);
format = new AVFrameFormat(ffmpegPixFmt, pixelformat);
else
return false;
}

@ -233,7 +233,7 @@ protected Format[] getSupportedFormats(int streamIndex)
int pixelFormat = (int)getFFmpegPixFmt(f);
formats.add(new AVFrameFormat(size, Format.NOT_SPECIFIED,
pixelFormat));
pixelFormat, (int)f));
}
return formats.toArray(new Format[formats.size()]);
@ -297,10 +297,7 @@ protected Format setFormat(
if(newValue instanceof AVFrameFormat)
{
AVFrameFormat f = (AVFrameFormat)newValue;
long pixelFormat = -1;
int pixFmt = f.getPixFmt();
pixelFormat = getDSPixFmt(pixFmt);
long pixelFormat = f.getDevicePixFmt();
if(pixelFormat != -1)
{

@ -42,7 +42,8 @@ public class DataSource
new AVFrameFormat(
screenSize,
Format.NOT_SPECIFIED,
FFmpeg.PIX_FMT_ARGB),
FFmpeg.PIX_FMT_ARGB,
Format.NOT_SPECIFIED),
new RGBFormat(
screenSize, // size
Format.NOT_SPECIFIED, // maxDataLength

@ -344,7 +344,8 @@ private Format getCaptureOutputFormat()
? null
: new Dimension(width, height)),
Format.NOT_SPECIFIED,
FFmpeg.PIX_FMT_ARGB);
FFmpeg.PIX_FMT_ARGB,
CVPixelFormatType.kCVPixelFormatType_32ARGB);
else
return
new RGBFormat(
@ -360,7 +361,9 @@ private Format getCaptureOutputFormat()
if ((width == 0) && (height == 0))
{
if (captureOutputFormat instanceof AVFrameFormat)
return new AVFrameFormat(FFmpeg.PIX_FMT_YUV420P);
return new AVFrameFormat(FFmpeg.PIX_FMT_YUV420P,
CVPixelFormatType.
kCVPixelFormatType_420YpCbCr8Planar);
else
return new YUVFormat(YUVFormat.YUV_420);
}
@ -370,7 +373,9 @@ else if (captureOutputFormat instanceof AVFrameFormat)
new AVFrameFormat(
new Dimension(width, height),
Format.NOT_SPECIFIED,
FFmpeg.PIX_FMT_YUV420P);
FFmpeg.PIX_FMT_YUV420P,
CVPixelFormatType.
kCVPixelFormatType_420YpCbCr8Planar);
}
else
{

@ -49,7 +49,11 @@ public class DataSource
Video4Linux2.V4L2_PIX_FMT_YUV420,
FFmpeg.PIX_FMT_YUV420P,
Video4Linux2.V4L2_PIX_FMT_YUYV,
FFmpeg.PIX_FMT_YUYV422
FFmpeg.PIX_FMT_YUYV422,
Video4Linux2.V4L2_PIX_FMT_MJPEG,
FFmpeg.PIX_FMT_RGB24,
Video4Linux2.V4L2_PIX_FMT_JPEG,
FFmpeg.PIX_FMT_RGB24,
};
/**
@ -210,7 +214,7 @@ private String getDeviceName()
* Gets the Video for Linux Two API Specification pixel format matching a
* specific FFmpeg pixel format.
*
* @param ffmpegPixFmt the FFmpeg pixel format to get the matching Video for
* @param v4l2PixFmt the FFmpeg pixel format to get the matching Video for
* Linux Two API Specification pixel format of
* @return the Video for Linux Two API Specification pixel format matching
* the specified FFmpeg format
@ -227,7 +231,7 @@ public static int getFFmpegPixFmt(int v4l2PixFmt)
* Gets the FFmpeg pixel format matching a specific Video for Linux Two API
* Specification pixel format.
*
* @param v4l2PixFmt the Video for Linux Two API Specification pixel format
* @param ffmpegPixFmt the Video for Linux Two API Specification pixel format
* to get the matching FFmpeg pixel format of
* @return the FFmpeg pixel format matching the specified Video for Linux
* Two API Specification pixel format

@ -53,6 +53,12 @@ public class Video4Linux2
public static final int V4L2_PIX_FMT_YUYV
= v4l2_fourcc('Y', 'U', 'Y', 'V');
public static final int V4L2_PIX_FMT_MJPEG
= v4l2_fourcc('M', 'J', 'P', 'G');
public static final int V4L2_PIX_FMT_JPEG
= v4l2_fourcc('J', 'P', 'E', 'G');
public static final int VIDIOC_DQBUF;
public static final int VIDIOC_G_FMT;
@ -67,6 +73,8 @@ public class Video4Linux2
public static final int VIDIOC_S_FMT;
public static final int VIDIOC_S_PARM;
public static final int VIDIOC_STREAMOFF;
public static final int VIDIOC_STREAMON;
@ -82,6 +90,7 @@ public class Video4Linux2
VIDIOC_QUERYCAP = VIDIOC_QUERYCAP();
VIDIOC_REQBUFS = VIDIOC_REQBUFS();
VIDIOC_S_FMT = VIDIOC_S_FMT();
VIDIOC_S_PARM = VIDIOC_S_PARM();
VIDIOC_STREAMOFF = VIDIOC_STREAMOFF();
VIDIOC_STREAMON = VIDIOC_STREAMON();
}
@ -94,6 +103,8 @@ public class Video4Linux2
public static native long memcpy(long dest, long src, int n);
public static native long convert_jpeg(long dest, long src, int size);
public static native long mmap(
long start,
int length,
@ -184,6 +195,10 @@ public static native void v4l2_requestbuffers_setMemory(
long v4l2_requestbuffers,
int memory);
public static native long v4l2_streamparm_alloc(int type);
public static native void v4l2_streamparm_setFps(long v4l2_streamparm, int fps);
private static native int VIDIOC_DQBUF();
private static native int VIDIOC_G_FMT();
@ -198,6 +213,8 @@ public static native void v4l2_requestbuffers_setMemory(
private static native int VIDIOC_S_FMT();
private static native int VIDIOC_S_PARM();
private static native int VIDIOC_STREAMOFF();
private static native int VIDIOC_STREAMON();

@ -25,7 +25,6 @@
public class Video4Linux2Stream
extends AbstractPullBufferStream
{
/**
* The pool of <tt>ByteBuffer</tt>s this instances is using to transfer the
* media data captured by the Video for Linux Two API Specification device
@ -88,6 +87,16 @@ public class Video4Linux2Stream
*/
private long v4l2_buffer;
/**
* Native Video for Linux Two pixel format.
*/
private int nativePixelFormat = 0;
/**
* Tell device to start capture in read() method.
*/
private boolean startInRead = false;
/**
* Initializes a new <tt>Video4Linux2Stream</tt> instance which is to have
* its <tt>Format</tt>-related information abstracted by a specific
@ -214,7 +223,8 @@ private Format getFdFormat()
= new AVFrameFormat(
new Dimension(width, height),
Format.NOT_SPECIFIED,
ffmpegPixFmt);
ffmpegPixFmt,
pixelformat);
}
}
}
@ -412,6 +422,7 @@ public void read(Buffer buffer)
if (!(format instanceof AVFrameFormat))
format = null;
if (format == null)
{
format = getFormat();
@ -419,6 +430,30 @@ public void read(Buffer buffer)
buffer.setFormat(format);
}
if(startInRead)
{
startInRead = false;
long v4l2_buf_type
= Video4Linux2.v4l2_buf_type_alloc(
Video4Linux2.V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (0 == v4l2_buf_type)
throw new OutOfMemoryError("v4l2_buf_type_alloc");
try
{
if (Video4Linux2.ioctl(fd, Video4Linux2.VIDIOC_STREAMON,
v4l2_buf_type) == -1)
{
throw new IOException("ioctl: request= VIDIOC_STREAMON");
}
}
finally
{
Video4Linux2.free(v4l2_buf_type);
}
}
if (Video4Linux2.ioctl(fd, Video4Linux2.VIDIOC_DQBUF, v4l2_buffer)
== -1)
throw new IOException("ioctl: request= VIDIOC_DQBUF");
@ -427,18 +462,38 @@ public void read(Buffer buffer)
try
{
ByteBuffer data = null;
int index = Video4Linux2.v4l2_buffer_getIndex(v4l2_buffer);
long mmap = mmaps[index];
int bytesused = Video4Linux2.v4l2_buffer_getBytesused(v4l2_buffer);
ByteBuffer data = byteBufferPool.getFreeBuffer(bytesused);
if (data != null)
if(nativePixelFormat == Video4Linux2.V4L2_PIX_FMT_MJPEG ||
nativePixelFormat == Video4Linux2.V4L2_PIX_FMT_JPEG)
{
/* JPEG/MJPEG are compressed formats, allocate bytes and
* convert captured data to RGB24
*/
Dimension videoSize = ((VideoFormat)format).getSize();
int captured_bytes = bytesused;
bytesused = videoSize.width * videoSize.height * 3;
data = byteBufferPool.getFreeBuffer(bytesused);
if(data != null)
Video4Linux2.convert_jpeg(data.ptr, mmap, captured_bytes);
}
else
{
int index = Video4Linux2.v4l2_buffer_getIndex(v4l2_buffer);
long mmap = mmaps[index];
data = byteBufferPool.getFreeBuffer(bytesused);
Video4Linux2.memcpy(data.ptr, mmap, bytesused);
data.setLength(bytesused);
FinalizableAVFrame.read(buffer, format, data, byteBufferPool);
if (data != null)
{
Video4Linux2.memcpy(data.ptr, mmap, bytesused);
}
}
data.setLength(bytesused);
FinalizableAVFrame.read(buffer, format, data, byteBufferPool);
}
finally
{
@ -536,9 +591,8 @@ private void setFdFormat(Format format)
if (format instanceof AVFrameFormat)
{
int pixFmt = ((AVFrameFormat) format).getPixFmt();
pixelformat = DataSource.getV4L2PixFmt(pixFmt);
pixelformat = ((AVFrameFormat) format).getDevicePixFmt();
nativePixelFormat = pixelformat;
}
if (Video4Linux2.V4L2_PIX_FMT_NONE == pixelformat)
throw new IOException("Unsupported format " + format);
@ -571,6 +625,7 @@ private void setFdFormat(Format format)
DataSource.DEFAULT_WIDTH,
DataSource.DEFAULT_HEIGHT);
}
if ((size != null)
&& ((size.width != width) || (size.height != height)))
{
@ -585,8 +640,10 @@ private void setFdFormat(Format format)
Video4Linux2.v4l2_pix_format_setPixelformat(
fmtPix,
pixelformat);
setFdFormat = true;
}
if (setFdFormat)
setFdFormat(v4l2_format, fmtPix, size, pixelformat);
}
@ -596,6 +653,21 @@ private void setFdFormat(Format format)
}
}
/**
* Sets the <tt>Format</tt> in which the Video for Linux Two API
* Specification device represented by the <tt>fd</tt> of this instance is
* to capture media data.
*
* @param v4l2_format native format to set on the Video for Linux Two API
* Specification device
* @param fmtPix native pixel format of the device
* @param size size to set on the device
* @param pixelformat requested pixel format
* @throws IOException if anything goes wrong while setting the
* native format of the media data to be captured by the Video for Linux
* Two API Specification device represented by the <tt>fd</tt> of this
* instance
*/
private void setFdFormat(
long v4l2_format,
long fmtPix,
@ -607,6 +679,7 @@ private void setFdFormat(
fmtPix,
Video4Linux2.V4L2_FIELD_NONE);
Video4Linux2.v4l2_pix_format_setBytesperline(fmtPix, 0);
if (Video4Linux2.ioctl(
fd,
Video4Linux2.VIDIOC_S_FMT,
@ -679,27 +752,13 @@ public void start()
Video4Linux2.free(v4l2_buffer);
}
long v4l2_buf_type
= Video4Linux2.v4l2_buf_type_alloc(
Video4Linux2.V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (0 == v4l2_buf_type)
throw new OutOfMemoryError("v4l2_buf_type_alloc");
try
{
if (Video4Linux2.ioctl(
fd,
Video4Linux2.VIDIOC_STREAMON,
v4l2_buf_type)
== -1)
{
throw new IOException("ioctl: request= VIDIOC_STREAMON");
}
}
finally
{
Video4Linux2.free(v4l2_buf_type);
}
/* we will start capture in read() method (i.e do the VIDIOC_STREAMON
* ioctl) because for some couple of fps/resolution the captured image
* will be weird (shift, not a JPEG for JPEG/MJPEG format, ...)
* if it is done here. Maybe it is due because sometime JMF do the
* sequence start/stop/start too quickly...
*/
startInRead = true;
}
/**

Loading…
Cancel
Save