Reduce kernel stack usage by lzjb_compress() by moving uint16 array
off the stack and on to the heap. The exact performance implications
of this I have not measured but we absolutely need to keep stack
usage to a minimum. If/when this becomes and issue we optimize.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
* source length if compression would overflow the destination buffer.
*/
* source length if compression would overflow the destination buffer.
*/
+#include <sys/zfs_context.h>
#define MATCH_BITS 6
#define MATCH_MIN 3
#define MATCH_BITS 6
#define MATCH_MIN 3
int copymask = 1 << (NBBY - 1);
int mlen, offset, hash;
uint16_t *hp;
int copymask = 1 << (NBBY - 1);
int mlen, offset, hash;
uint16_t *hp;
- uint16_t lempel[LEMPEL_SIZE] = { 0 };
+ lempel = kmem_zalloc(LEMPEL_SIZE * sizeof (uint16_t), KM_SLEEP);
while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) {
while (src < (uchar_t *)s_start + s_len) {
if ((copymask <<= 1) == (1 << NBBY)) {
- if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY)
+ if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) {
+ kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t));
copymask = 1;
copymap = dst;
*dst++ = 0;
copymask = 1;
copymap = dst;
*dst++ = 0;
+
+ kmem_free(lempel, LEMPEL_SIZE * sizeof (uint16_t));
return (dst - (uchar_t *)d_start);
}
return (dst - (uchar_t *)d_start);
}