Bibliographie

  • Articles

    2013

    • [PDF] [DOI] H. Cheng, Z. Liu, L. Yang, and Xuewen Chen, « Sparse representation and learning in visual recognition: theory and applications, » Signal Processing, vol. 93, iss. 6, pp. 1408-1425, 2013.
      [Bibtex]
      @Article{cheng-sigpro-2013,
      author = "Hong Cheng and Zicheng Liu and Lu Yang and Xuewen
      Chen",
      title = "Sparse representation and learning in visual
      recognition: Theory and applications",
      journal = SIGPRO,
      volume = "93",
      number = "6",
      month = jun,
      year = "2013",
      pages = "1408--1425",
      abstract = "Sparse representation and learning has been widely
      used in computational intelligence, machine learning,
      computer vision and pattern recognition, etc.
      Mathematically, solving sparse representation and
      learning involves seeking the sparsest linear
      combination of basis functions from an overcomplete
      dictionary. A rational behind this is the sparse
      connectivity between nodes in human brain. This paper
      presents a survey of some recent work on sparse
      representation, learning and modeling with emphasis on
      visual recognition. It covers both the theory and
      application aspects. We first review the sparse
      representation and learning theory including general
      sparse representation, structured sparse
      representation, high-dimensional nonlinear learning,
      Bayesian compressed sensing, sparse subspace learning,
      non-negative sparse representation, robust sparse
      representation, and efficient sparse representation. We
      then introduce the applications of sparse theory to
      various visual recognition tasks, including feature
      representation and selection, dictionary learning,
      Sparsity Induced Similarity (SIS) measures, sparse
      coding based classification frameworks, and
      sparsity-related topics",
      doi = "10.1016/j.sigpro.2012.09.011",
      }
    • M. Rossi, « Color image reconstruction via sparse signal representation, » phdthesis Master Thesis, 2013.
      [Bibtex]
      @MastersThesis{rossi-mastersthesis-2013,
      author = "Mattia Rossi",
      title = "Color Image Reconstruction via Sparse Signal
      Representation",
      school = "Università degli Studi di Padova",
      day = "16",
      month = jun,
      year = "2013",
      type = "phdthesis",
      URL = "http://tesi.cab.unipd.it/43651/",
      keywords = "demosaicing, representation, recovery",
      }
    • [PDF] [DOI] A. A. Moghadam, Mohammad Aghagolzadeh, M. Kumar, and H. Radha, « Compressive framework for demosaicing of natural images, » IEEE Transactions on Image Processing, vol. 22, iss. 6, pp. 2356-2371, 2013.
      [Bibtex]
      @Article{moghadam-ip-2013,
      author = "Abdolreza Abdolhosseini Moghadam and Mohammad
      Aghagolzadeh and Mrityunjay Kumar and Hayder Radha",
      title = "Compressive framework for demosaicing of natural
      images",
      journal = IP,
      volume = "22",
      number = "6",
      month = jun,
      year = "2013",
      pages = "2356--2371",
      abstract = "Typical consumer digital cameras sense only one out of
      three color components per image pixel. The problem of
      demosaicing deals with interpolating those missing
      color components. In this paper, we present compressive
      demosaicing (CD), a framework for demosaicing natural
      images based on the theory of compressed sensing (CS).
      Given sensed samples of an image, CD employs a CS
      solver to find the sparse representation of that image
      under a fixed sparsifying dictionary Psi. As opposed to
      state of the art CS-based demosaicing approaches, we
      consider a clear distinction between the interchannel
      (color) and interpixel correlations of natural images.
      Utilizing some well-known facts about the human visual
      system, those two types of correlations are utilized in
      a nonseparable format to construct the sparsifying
      transform Psi. Our simulation results verify that CD
      performs better (both visually and in terms of PSNR)
      than leading demosaicing approaches when applied to the
      majority of standard test images",
      doi = "10.1109/TIP.2013.2244215",
      }
    • [PDF] [DOI] S. Zhang, H. Yao, X. Sun, and X. Lu, « Sparse coding based visual tracking: review and experimental comparison, » Pattern Recognition, vol. 46, iss. 7, pp. 1772-1788, 2013.
      [Bibtex]
      @Article{zhang-pr-2013,
      author = "Shengping Zhang and Hongxun Yao and Xin Sun and
      Xiusheng Lu",
      title = "Sparse coding based visual tracking: Review and
      experimental comparison",
      journal = PR,
      volume = "46",
      number = "7",
      month = jun,
      year = "2013",
      pages = "1772--1788",
      abstract = "Recently, sparse coding has been successfully applied
      in visual tracking. The goal of this paper is to review
      the state-of-the-art tracking methods based on sparse
      coding. We first analyze the benefits of using sparse
      coding in visual tracking and then categorize these
      methods into appearance modeling based on sparse coding
      (AMSC) and target searching based on sparse
      representation (TSSR) as well as their combination. For
      each categorization, we introduce the basic framework
      and subsequent improvements with emphasis on their
      advantages and disadvantages. Finally, we conduct
      extensive experiments to compare the representative
      methods on a total of 20 test sequences. The
      experimental results indicate that: (1) AMSC methods
      significantly outperform TSSR methods. (2) For AMSC
      methods, both discriminative dictionary and spatial
      order reserved pooling operators are important for
      achieving high tracking accuracy. (3) For TSSR methods,
      the widely used identity pixel basis will degrade the
      performance when the target or candidate images are not
      aligned well or severe occlusion occurs. (4) For TSSR
      methods, L1 norm minimization is not necessary. In
      contrast, L2 norm minimization can obtain comparable
      performance but with lower computational cost. The open
      questions and future research topics are also
      discussed",
      doi = "10.1016/j.patcog.2012.10.006",
      }

    2012

    • [PDF] [DOI] L. Liu and P. W. Fieguth, « Texture classification from random features, » IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 34, iss. 3, pp. 574-586, 2012.
      [Bibtex]
      @Article{liu-pami-2012,
      author = "Li Liu and P. W. Fieguth",
      title = "Texture Classification from Random Features",
      journal = PAMI,
      volume = "34",
      number = "3",
      month = mar,
      year = "2012",
      pages = "574--586",
      abstract = "Inspired by theories of sparse representation and
      compressed sensing, this paper presents a simple,
      novel, yet very powerful approach for texture
      classification based on random projection, suitable for
      large texture database applications. At the feature
      extraction stage, a small set of random features is
      extracted from local image patches. The random features
      are embedded into a bag-of-words model to perform
      texture classification; thus, learning and
      classification are carried out in a compressed domain.
      The proposed unconventional random feature extraction
      is simple, yet by leveraging the sparse nature of
      texture images, our approach outperforms traditional
      feature extraction methods which involve careful design
      and complex steps. We have conducted extensive
      experiments on each of the CUReT, the Brodatz, and the
      MSRC databases, comparing the proposed approach to four
      state-of-the-art texture classification methods: Patch,
      Patch-MRF, MR8, and LBP. We show that our approach
      leads to significant improvements in classification
      accuracy and reductions in feature dimensionality",
      doi = "10.1109/TPAMI.2011.145",
      }

    2010

    • [PDF] [DOI] J. Jiao, X. Mo, and C. Shen, « Image clustering via sparse representation, » Lecture Notes in Computer Science, vol. 5916, pp. 761-766, 2010.
      [Bibtex]
      @Article{jiao-lncs-2010,
      author = "Jun Jiao and Xuan Mo and Chen Shen",
      title = "Image Clustering via Sparse Representation",
      journal = LNCS,
      volume = "5916",
      year = "2010",
      pages = "761--766",
      abstract = "In recent years, clustering techniques have become a
      useful tool in exploring data structures and have been
      employed in a broad range of applications. In this
      paper we derive a novel image clustering approach based
      on a sparse representation model, which assumes that
      each instance can be reconstructed by the sparse linear
      combination of other instances. Our method
      characterizes the graph adjacency structure and graph
      weights by sparse linear coefficients computed by
      solving L1-minimization. Spectral clustering algorithm
      using these coefficients as graph weight matrix is then
      used to discover the cluster structure. Experiments
      confirmed the effectiveness of our approach",
      doi = "10.1007/978-3-642-11301-7_82",
      }
    • [PDF] [DOI] K. I. Kim and Y. Kwon, « Single-image super-resolution using sparse regression and natural image prior, » IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 32, iss. 6, pp. 1127-1133, 2010.
      [Bibtex]
      @Article{kim-pami-2010,
      author = "Kwang In Kim and Younghee Kwon",
      title = "Single-Image Super-Resolution Using Sparse Regression
      and Natural Image Prior",
      journal = PAMI,
      volume = "32",
      number = "6",
      month = jan,
      year = "2010",
      pages = "1127--1133",
      abstract = "This paper proposes a framework for single-image
      super-resolution. The underlying idea is to learn a map
      from input low-resolution images to target
      high-resolution images based on example pairs of input
      and output images. Kernel ridge regression (KRR) is
      adopted for this purpose. To reduce the time complexity
      of training and testing for KRR, a sparse solution is
      found by combining the ideas of kernel matching pursuit
      and gradient descent. As a regularized solution, KRR
      leads to a better generalization than simply storing
      the examples as has been done in existing example-based
      algorithms and results in much less noisy images.
      However, this may introduce blurring and ringing
      artifacts around major edges as sharp changes are
      penalized severely. A prior model of a generic image
      class which takes into account the discontinuity
      property of images is adopted to resolve this problem.
      Comparison with existing algorithms shows the
      effectiveness of the proposed method",
      doi = "10.1109/TPAMI.2010.25",
      }

    2008

    • [PDF] [DOI] J. Mairal, M. Elad, and G. Sapiro, « Sparse representation for color image restoration, » IEEE Transactions on Image Processing, vol. 17, iss. 1, pp. 53-69, 2008.
      [Bibtex]
      @Article{mairal-ip-2008,
      author = "Julien Mairal and Michael Elad and Guillermo Sapiro",
      title = "Sparse Representation for Color Image Restoration",
      journal = IP,
      volume = "17",
      number = "1",
      month = jan,
      year = "2008",
      pages = "53--69",
      abstract = "Sparse representations of signals have drawn
      considerable interest in recent years. The assumption
      that natural signals, such as images, admit a sparse
      decomposition over a redundant dictionary leads to
      efficient algorithms for handling such sources of data.
      In particular, the design of well adapted dictionaries
      for images has been a major challenge. The K-SVD has
      been recently proposed for this task and shown to
      perform very well for various grayscale image
      processing tasks. In this paper, we address the problem
      of learning dictionaries for color images and extend
      the K-SVD-based grayscale image denoising algorithm
      that appears in . This work puts forward ways for
      handling nonhomogeneous noise and missing information,
      paving the way to state-of-the-art results in
      applications such as color image denoising,
      demosaicing, and inpainting, as demonstrated in this
      paper",
      doi = "10.1109/TIP.2007.911828",
      }

    2005

    • [PDF] [DOI] S. Lazebnik, C. Schmid, and J. Ponce, « A sparse texture representation using local affine regions, » IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 27, iss. 8, pp. 1265-1278, 2005.
      [Bibtex]
      @Article{lazebnik-pami-2005,
      author = "S. Lazebnik and C. Schmid and J. Ponce",
      title = "A sparse texture representation using local affine
      regions",
      journal = PAMI,
      volume = "27",
      number = "8",
      month = aug,
      year = "2005",
      pages = "1265--1278",
      abstract = "This paper introduces a texture representation
      suitable for recognizing images of textured surfaces
      under a wide range of transformations, including
      viewpoint changes and nonrigid deformations. At the
      feature extraction stage, a sparse set of affine Harris
      and Laplacian regions is found in the image. Each of
      these regions can be thought of as a texture element
      having a characteristic elliptic shape and a
      distinctive appearance pattern. This pattern is
      captured in an affine-invariant fashion via a process
      of shape normalization followed by the computation of
      two novel descriptors, the spin image and the RIFT
      descriptor. When affine invariance is not required, the
      original elliptical shape serves as an additional
      discriminative feature for texture recognition. The
      proposed approach is evaluated in retrieval and
      classification tasks using the entire Brodatz database
      and a publicly available collection of 1,000
      photographs of textured surfaces taken from different
      viewpoint",
      doi = "10.1109/TPAMI.2005.151",
      }
  • Ressources
    Compressive Sensing Resources at Rice University