diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 203b789ac2485afeb0b65490aaad3be1a05dd177..57c9c42d389910f0120fd827788b30637f7c0398 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,16 +1,29 @@
-workflow:
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "merge_request_event" || $CI_COMMIT_REF_NAME =~ /master/
-
 default:
   image: $CI_REGISTRY/orfeotoolbox/otb-build-env/otb-ubuntu-native-develop-headless:20.04
   tags:
     - light
+  interruptible: true
+
+variables:
+  PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
+
+cache:
+  key: $CI_COMMIT_REF_SLUG
+  paths:
+    - .cache/pip
+
+workflow:
+  rules:
+    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS && $CI_PIPELINE_SOURCE == "push"
+      when: never
+    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
+    - if: $CI_COMMIT_TAG
+    - if: $CI_COMMIT_REF_PROTECTED == "true"
 
 stages:
   - Static Analysis
-  - Documentation test
   - Tests
+  - Documentation
   - Ship
 
 #  -------------------------------- Static analysis --------------------------------
@@ -18,117 +31,121 @@ stages:
 .static_analysis:
   stage: Static Analysis
   allow_failure: true
-
-codespell:
-  extends: .static_analysis
-  before_script:
-    - pip install codespell
-  script:
-    - codespell {pyotb,tests}
-
-flake8:
-  extends: .static_analysis
-  before_script:
-    - pip install flake8
-  script:
-    - flake8 --max-line-length=120 $PWD/pyotb --ignore=F403,E402,F401,W503,W504
+  rules:
+    - changes:
+        - pyotb/*.py
 
 pydocstyle:
   extends: .static_analysis
   before_script:
-    - pip install pydocstyle
+    - pip install pydocstyle tomli
   script:
-    - pydocstyle $PWD/pyotb --convention=google
+    - pydocstyle $PWD/pyotb
 
 pylint:
   extends: .static_analysis
   before_script:
     - pip install pylint
   script:
-    - pylint --max-line-length=120 $PWD/pyotb --disable=too-many-nested-blocks,too-many-locals,too-many-statements,too-few-public-methods,too-many-instance-attributes,too-many-arguments,invalid-name,fixme,too-many-return-statements,too-many-lines,too-many-branches,import-outside-toplevel,wrong-import-position,wrong-import-order,import-error,missing-class-docstring
-
-# ---------------------------------- Documentation  ----------------------------------
+    - pylint $PWD/pyotb --disable=fixme
 
-.docs:
+codespell:
+  extends: .static_analysis
+  rules:
+    - changes:
+        - "**/*.py"
+        - "**/*.md"
   before_script:
-    - apt-get update && apt-get -y install virtualenv
-    - virtualenv doc_env
-    - source doc_env/bin/activate
-    - pip install -U pip
-    - pip install -U -r doc/doc_requirements.txt
-
-pages_test:
-  stage: Documentation test
-  extends: .docs
-  except:
-    - master
-  when: manual
+    - pip install codespell
   script:
-    - mkdocs build --site-dir public_test
-  artifacts:
-    paths:
-      - public_test
+    - codespell {pyotb,tests,doc,README.md}
 
-# -------------------------------------- Tests -------------------------------------- 
+# -------------------------------------- Tests --------------------------------------
+test_install:
+  stage: Tests
+  only:
+    - tags
+  allow_failure: false
+  script:
+    - pip install .
 
 .tests:
   stage: Tests
   allow_failure: false
+  rules:
+    - changes:
+        - "**/*.py"
+        - .gitlab-ci.yml
   variables:
     OTB_ROOT: /opt/otb
     LD_LIBRARY_PATH: /opt/otb/lib
+    SPOT_IMG_URL: https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Input/SP67_FR_subset_1.tif
+    PLEIADES_IMG_URL: https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Baseline/OTB/Images/prTvOrthoRectification_pleiades-1_noDEM.tif
+  before_script:
+    - pip install pytest pytest-cov
+
+module_core:
+  extends: .tests
+  variables:
     OTB_LOGGER_LEVEL: INFO
     PYOTB_LOGGER_LEVEL: DEBUG
-    IMAGE_URL: https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Input/SP67_FR_subset_1.tif?inline=false
-    TEST_INPUT_IMAGE: tests/image.tif
   artifacts:
     reports:
-      junit: test-*.xml
-  before_script:
-    - wget $IMAGE_URL -O $TEST_INPUT_IMAGE
-    - pip install pytest
-
-test_core:
-  extends: .tests
-  script:
-    - python3 -m pytest --color=yes --junitxml=test-core.xml tests/test_core.py
-
-test_numpy:
-  extends: .tests
+      junit: test-module-core.xml
+      coverage_report:
+        coverage_format: cobertura
+        path: coverage.xml
+  coverage: '/TOTAL.*\s+(\d+%)$/'
   script:
-    - python3 -m pytest --color=yes --junitxml=test-numpy.xml tests/test_numpy.py
+    - curl -fsLI $SPOT_IMG_URL
+    - curl -fsLI $PLEIADES_IMG_URL
+    - python3 -m pytest -vv --junitxml=test-module-core.xml --cov-report xml:coverage.xml tests/test_core.py
 
-test_pipeline:
-  #when: manual
-  extends: .tests
-  script:
-    - python3 -m pytest --color=yes --junitxml=test-pipeline.xml tests/test_pipeline.py
-
-test_serialization:
+pipeline_permutations:
   extends: .tests
+  variables:
+    OTB_LOGGER_LEVEL: WARNING
+    PYOTB_LOGGER_LEVEL: INFO
+  artifacts:
+    reports:
+      junit: test-pipeline-permutations.xml
   script:
-    - python3 -m pytest --color=yes --junitxml=test-serialization.xml tests/test_serialization.py
+    - curl -fsLI $SPOT_IMG_URL
+    - python3 -m pytest -vv --junitxml=test-pipeline-permutations.xml tests/test_pipeline.py
 
-# -------------------------------------- Ship --------------------------------------- 
+# -------------------------------------- Docs ---------------------------------------
 
-pages:
-  stage: Ship
-  extends: .docs
-  only:
-    - master
+docs:
+  stage: Documentation
+  rules:
+    - changes:
+        - "*.txt"
+        - "*.md"
+        - mkdocs.yml
+        - doc/*
+        - pyotb/*.py
+  before_script:
+    - apt update && apt install -y python3.8-venv
+    - python3 -m venv docs_venv
+    - source docs_venv/bin/activate
+    - python3 -m pip install -U pip
+    - python3 -m pip install -r doc/doc_requirements.txt
   script:
     - mkdocs build --site-dir public
   artifacts:
     paths:
       - public
 
+# -------------------------------------- Ship ---------------------------------------
+
 pypi:
   stage: Ship
+  # when: manual
   only:
-   - master
+    - tags
   before_script:
-   - apt update && apt install -y python3.8-venv
-   - python3 -m pip install --upgrade build twine
+    - apt update && apt install -y python3.8-venv
+    - pip install build twine
   script:
-   - python3 -m build
-   - python3 -m twine upload --repository-url https://upload.pypi.org/legacy/ --non-interactive -u __token__ -p $pypi_token dist/*
+    - python3 -m build
+    - python3 -m twine upload --non-interactive -u __token__ -p $pypi_token dist/*
diff --git a/AUTHORS.md b/AUTHORS.md
new file mode 100644
index 0000000000000000000000000000000000000000..6518258b14e24c64202014b1015840145fcc644b
--- /dev/null
+++ b/AUTHORS.md
@@ -0,0 +1,10 @@
+# Project authors
+
+## Initial codebase
+
+* Nicolas NARÇON (INRAE, now ESA)
+
+## Current maintainers
+
+* Rémi CRESSON (INRAE)
+* Vincent DELBAR (La TeleScop)
diff --git a/README.md b/README.md
index 60659a86f5589eb45d2b55291fa5357698becb11..f5aa1841c554fd75311e815b95dd4dd3a5d2892a 100644
--- a/README.md
+++ b/README.md
@@ -1,318 +1,48 @@
-# pyotb: a pythonic extension of OTB
+# pyotb: Orfeo ToolBox for Python
 
-Full documentation is available at [pyotb.readthedocs.io](https://pyotb.readthedocs.io/)
+[![latest release](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/badges/release.svg)](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/releases)
+[![pipeline status](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/badges/develop/pipeline.svg)](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/commits/develop)
+[![coverage report](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/badges/develop/coverage.svg)](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/commits/develop)
+[![read the docs status](https://readthedocs.org/projects/pyotb/badge/?version=master)](https://pyotb.readthedocs.io/en/master/)
 
-[![Latest Release](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/badges/release.svg)](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/releases)
-[![pipeline status](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/badges/master/pipeline.svg)](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/-/commits/master)
+**pyotb** wraps the [Orfeo Toolbox](https://www.orfeo-toolbox.org/) in a pythonic, developer friendly 
+fashion.  
 
+## Key features
 
-## Installation
-Requirements:
-- python>=3.5 and numpy
-- OrfeoToolBox python API
+- Easy use of Orfeo ToolBox (OTB) applications from python
+- Simplify common sophisticated I/O features of OTB
+- Lazy execution of operations thanks to OTB streaming mechanism
+- Interoperable with popular python libraries ([numpy](https://numpy.org/) and 
+[rasterio](https://rasterio.readthedocs.io/))
+- Extensible
 
-```bash
-pip install pyotb --upgrade
-```
-
-For Python>=3.6, latest version available is pyotb 1.5.1 For Python 3.5, latest version available is pyotb 1.2.2
-
-## Quickstart: running an OTB application as a oneliner
-pyotb has been written so that it is more convenient to run an application in Python.
-
-You can pass the parameters of an application as a dictionary :
-```python
-import pyotb
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear',
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-```
-Note that pyotb has a 'lazy' evaluation: it only performs operation when it is needed, i.e. results are written to disk.
-Thus, the previous line doesn't trigger the application.
-
-To actually trigger the application execution, you need to write the result to disk:
-
-```python
-resampled.write('output.tif')  # this is when the application actually runs
-```
-
-## Using Python keyword arguments
-It is also possible to use the Python keyword arguments notation for passing the parameters:
-```python
-output = pyotb.SuperImpose(inr='reference_image.tif', inm='image.tif')
-```
-is equivalent to:
-```python
-output = pyotb.SuperImpose({'inr': 'reference_image.tif', 'inm': 'image.tif'})
-```
-
-Limitations : for this notation, python doesn't accept the parameter `in` or any parameter that contains a `.`. E.g., it is not possible to use `pyotb.RigidTransformResample(in=input_path...)` or `pyotb.VectorDataExtractROI(io.vd=vector_path...)`.
-
-
-
-
-## In-memory connections
-The big asset of pyotb is the ease of in-memory connections between apps.
-
-Let's start from our previous example. Consider the case where one wants to apply optical calibration and binary morphological dilatation 
-following the undersampling.
-
-Using pyotb, you can pass the output of an app as input of another app :
-```python
-import pyotb
-
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear', 
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-
-calibrated = pyotb.OpticalCalibration({'in': resampled, 'level': 'toa'}) 
-
-dilated = pyotb.BinaryMorphologicalOperation({'in': calibrated, 'out': 'output.tif', 'filter': 'dilate', 
-                                              'structype': 'ball', 'xradius': 3, 'yradius': 3})
-dilated.write('result.tif')
-```
-
-## Writing the result of an app
-Any pyotb object can be written to disk using the `write` method, e.g. :
-
-```python
-import pyotb
-
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear',
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-# Here you can set optionally pixel type and extended filename variables
-resampled.write({'out': 'output.tif'}, pixel_type='uint16', filename_extension='?nodata=65535')
-```
-
-Another possibility for writing results is to set the output parameter when initializing the application:
-```python
-import pyotb
-
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear', 'out': 'output.tif',
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-# Here you can set optionally pixel type and extended filename variables
-resampled.write(pixel_type='uint16', filename_extension='?nodata=65535')
-```
-
-## Arithmetic operations
-Every pyotb object supports arithmetic operations, such as addition, subtraction, comparison...
-Consider an example where we want to compute a vegeteation mask from NDVI, i.e. the arithmetic operation `(nir - red) / (nir + red) > 0.3`
-
-With pyotb, one can simply do :
-```python
-import pyotb
-
-# transforming filepaths to pyotb objects
-nir, red = pyotb.Input('nir.tif'), pyotb.Input('red.tif')
-
-res = (nir - red) / (nir + red) > 0.3
-print(res.exp)  # prints the BandMath expression: "((im1b1 - im2b1) / (im1b1 + im2b1)) > 0.3 ? 1 : 0"
-res.write('vegetation_mask.tif', pixel_type='uint8')
-```
-
-## Slicing
-pyotb objects support slicing in a Python fashion :
-
-```python
-import pyotb
+Documentation hosted at [pyotb.readthedocs.io](https://pyotb.readthedocs.io/).
 
-# transforming filepath to pyotb object
-inp = pyotb.Input('my_image.tif')
+## Example
 
-inp[:, :, :3]  # selecting first 3 bands
-inp[:, :, [0, 1, 4]]  # selecting bands 1, 2 & 5
-inp[:1000, :1000]  # selecting 1000x1000 subset, same as inp[:1000, :1000, :] 
-inp[:100, :100].write('my_image_roi.tif')  # write cropped image to disk
-```
-
-## Numpy-inspired functions
-Some functions have been written, entirely based on OTB, to mimic the behavior of some well-known numpy functions. 
-### pyotb.where
-Equivalent of `numpy.where`.
-It is the equivalent of the muparser syntax `condition ? x : y` that can be used in OTB's BandMath.
-
-```python
-import pyotb
-
-# transforming filepaths to pyotb objects
-labels, image1, image2 = pyotb.Input('labels.tif'), pyotb.Input('image1.tif') , pyotb.Input('image2.tif')
-
-# If labels = 1, returns image1. Else, returns image2 
-res = pyotb.where(labels == 1, image1, image2)  # this would also work: pyotb.where(labels == 1, 'image1.tif', 'image2.tif') 
-
-# A more complex example
-# If labels = 1, returns image1. If labels = 2, returns image2. If labels = 3, returns 3. Else 0
-res = pyotb.where(labels == 1, image1,
-                  pyotb.where(labels == 2, image2,
-                              pyotb.where(labels == 3, 3, 0)))
+Building a simple pipeline with OTB applications
 
-```
-
-### pyotb.clip
-Equivalent of `numpy.clip`. Clip (limit) the values in a raster to a range.
-
-```python
+```py
 import pyotb
 
-res = pyotb.clip('my_image.tif', 0, 255)  # clips the values between 0 and 255
-```
-
-### pyotb.all
-Equivalent of `numpy.all`. 
-
-For only one image, this function checks that all bands of the image are True (i.e. !=0) and outputs
-a singleband boolean raster.
-For several images, this function checks that all images are True (i.e. !=0) and outputs
-a boolean raster, with as many bands as the inputs.
-
-
-### pyotb.any
-Equivalent of `numpy.any`. 
-
-For only one image, this function checks that at least one band of the image is True (i.e. !=0) and outputs
-a singleband boolean raster.
-For several images, this function checks that at least one of the images is True (i.e. !=0) and outputs
-a boolean raster, with as many bands as the inputs.
-
-
-## Interaction with Numpy
-
-pyotb objects can be transparently used in numpy functions.
-
-For example:
-
-```python
-import pyotb
-import numpy as np
-
-inp = pyotb.Input('image.tif')  # this is a pyotb object
-
-# Creating a numpy array of noise
-white_noise = np.random.normal(0, 50, size=inp.shape)  # this is a numpy object
-
-# Adding the noise to the image
-noisy_image = inp + white_noise  # magic: this is a pyotb object that has the same georeference as input. 
-                                 # `np.add(inp, white_noise)` would have worked the same
-noisy_image.write('image_plus_noise.tif')
-```
-Limitations : 
-- The whole image is loaded into memory
-- The georeference can not be modified. Thus, numpy operations can not change the image or pixel size
-
-
-## Export to rasterio
-pyotb objects can also be exported in a format that is usable by rasterio.
-
-For example:
-
-```python
-import pyotb
-import rasterio
-from scipy import ndimage
-
-# Pansharpening + NDVI + creating bare soils mask
-pxs = pyotb.BundleToPerfectSensor(inp='panchromatic.tif', inxs='multispectral.tif')
-ndvi = pyotb.RadiometricIndices({'in': pxs, 'channels.red': 3, 'channels.nir': 4, 'list': 'Vegetation:NDVI'})
-bare_soil_mask = (ndvi < 0.3)
-
-# Exporting the result as array & profile usable by rasterio
-mask_array, profile = bare_soil_mask.to_rasterio()
-
-# Doing something in Python that is not possible with OTB, e.g. gathering the contiguous groups of pixels
-# with an integer index
-labeled_mask_array, nb_groups = ndimage.label(mask_array)
-
-# Writing the result to disk
-with rasterio.open('labeled_bare_soil.tif', 'w', **profile) as f:
-    f.write(labeled_mask_array)
-
-```
-This way of exporting pyotb objects is more flexible that exporting to numpy, as the user gets the `profile` dictionary. 
-If the georeference or pixel size is modified, the user can update the `profile` accordingly.
-
-
-## Interaction with Tensorflow
-
-We saw that numpy operations had some limitations. To bypass those limitations, it is possible to use some Tensorflow operations on pyotb objects.
-
-
-You need a working installation of OTBTF >=3.0 for this and then the code is like this:
-
-```python
-import pyotb
-
-def scalar_product(x1, x2):
-    """This is a function composed of tensorflow operations."""
-    import tensorflow as tf
-    return tf.reduce_sum(tf.multiply(x1, x2), axis=-1)
-
-# Compute the scalar product
-res = pyotb.run_tf_function(scalar_product)('image1.tif', 'image2.tif')  # magic: this is a pyotb object
-res.write('scalar_product.tif')
-```
-
-For some easy syntax, one can use `pyotb.run_tf_function` as a function decorator, such as:
-```python
-import pyotb
-
-@pyotb.run_tf_function  # The decorator enables the use of pyotb objects as inputs/output of the function
-def scalar_product(x1, x2):
-    import tensorflow as tf
-    return tf.reduce_sum(tf.multiply(x1, x2), axis=-1)
-
-res = scalar_product('image1.tif', 'image2.tif')  # magic: this is a pyotb object
-```
-
-Advantages :
-- The process supports streaming, hence the whole image is **not** loaded into memory
-- Can be integrated in OTB pipelines
-
-Limitations :
-- It is not possible to use the tensorflow python API inside a script where OTBTF is used because of compilation issues 
-between Tensorflow and OTBTF, i.e. `import tensorflow` doesn't work in a script where OTBTF apps have been initialized
-
-
-## Some examples
-### Compute the mean of several rasters, taking into account NoData
-Let's consider we have at disposal 73 NDVI rasters for a year, where clouds have been masked with NoData (nodata value of -10 000 for example).
-
-Goal: compute the mean across time (keeping the spatial dimension) of the NDVIs, excluding cloudy pixels. Piece of code to achieve that:
-
-```python
-import pyotb
-
-nodata = -10000
-ndvis = [pyotb.Input(path) for path in ndvi_paths]
-
-# For each pixel location, summing all valid NDVI values 
-summed = sum([pyotb.where(ndvi != nodata, ndvi, 0) for ndvi in ndvis])
-
-# Printing the generated BandMath expression
-print(summed.exp)  # this returns a very long exp: "0 + ((im1b1 != -10000) ? im1b1 : 0) + ((im2b1 != -10000) ? im2b1 : 0) + ... + ((im73b1 != -10000) ? im73b1 : 0)"
-
-# For each pixel location, getting the count of valid pixels
-count = sum([pyotb.where(ndvi == nodata, 0, 1) for ndvi in ndvis])
-
-mean = summed / count  # BandMath exp of this is very long: "(0 + ((im1b1 != -10000) ? im1b1 : 0) + ... + ((im73b1 != -10000) ? im73b1 : 0)) / (0 + ((im1b1 == -10000) ? 0 : 1) + ... + ((im73b1 == -10000) ? 0 : 1))"
-mean.write('ndvi_annual_mean.tif')
-```
-
-Note that no actual computation is executed before the last line where the result is written to disk.
-
-### Process raw Pleiades data
-This is a common case of Pleiades data preprocessing : optical calibration -> orthorectification -> pansharpening
-
-```python
-import pyotb
-srtm = '/media/data/raster/nasa/srtm_30m'
-geoid = '/media/data/geoid/egm96.grd'
+# RigidTransformResample, with input parameters as dict
+resampled = pyotb.RigidTransformResample({
+    "in": "https://myserver.ia/input.tif",  # Note: no /vsicurl/
+    "interpolator": "linear", 
+    "transform.type.id.scaley": 0.5,
+    "transform.type.id.scalex": 0.5
+})
 
-pan =  pyotb.OpticalCalibration('IMG_PHR1A_P_001/DIM_PHR1A_P_201509011347379_SEN_1791374101-001.XML', level='toa')
-ms = pyotb.OpticalCalibration('IMG_PHR1A_MS_002/DIM_PHR1A_MS_201509011347379_SEN_1791374101-002.XML', level='toa')
+# OpticalCalibration, with input parameters as args
+calib = pyotb.OpticalCalibration(resampled)
 
-pan_ortho = pyotb.OrthoRectification({'io.in': pan, 'elev.dem': srtm, 'elev.geoid': geoid})
-ms_ortho = pyotb.OrthoRectification({'io.in': ms, 'elev.dem': srtm, 'elev.geoid': geoid})
+# BandMath, with input parameters as kwargs
+ndvi = pyotb.BandMath(calib, exp="ndvi(im1b1, im1b4)")
 
-pxs = pyotb.BundleToPerfectSensor(inp=pan_ortho, inxs=ms_ortho, method='bayes', mode="default")
+# Pythonic slicing
+roi = ndvi[20:586, 9:572]
 
-# Here we trigger every app in the pipeline and the process is blocked until result is written to disk
-pxs.write('pxs_image.tif', pixel_type='uint16', filename_extension='?gdal:co:COMPRESS=DEFLATE&gdal:co:PREDICTOR=2')
+# Pipeline execution. The actual computation happens here!
+roi.write("output.tif", "float")
 ```
diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt
index 3086078f8eb85fdd642ee996769ca9f3c7ebe090..d7f6094769170ed1c7bda85144efd50e2bff4983 100644
--- a/RELEASE_NOTES.txt
+++ b/RELEASE_NOTES.txt
@@ -1,3 +1,25 @@
+---------------------------------------------------------------------
+2.00 (Nov 23, 2023) - Changes since version 1.5.4
+
+- Major refactoring (see troubleshooting/migration)
+- Pythonic extended filenames (can use dict, etc)
+- Easy access to image metadata
+- CI improvements (tests, coverage, doc, etc)
+- Documentation improvement
+- Code format
+- Allow OTB dotted parameters in kwargs
+- Easy access to pixel coordinates
+- Add function to transform x,y coordinates into row, col
+- Native support of vsicurl inputs
+- Fixes and enhancements in `summarize()`
+- Fixes in `shape`
+- Add typing to function defs to enhance documentation
+
+---------------------------------------------------------------------
+1.5.4 (Oct 01, 2022) - Changes since version 1.5.3
+
+- Fix slicer wrong end of slicing
+
 ---------------------------------------------------------------------
 1.5.3 (Sep 29, 2022) - Changes since version 1.5.2
 
diff --git a/doc/MISC.md b/doc/MISC.md
index 8da0eeace3eeb71cd0c20e92a88758fa5c072da2..6be696fe9f16702d33193efcf625b57c9423260e 100644
--- a/doc/MISC.md
+++ b/doc/MISC.md
@@ -1,9 +1,12 @@
 ## Miscellaneous: Work with images with different footprints / resolutions
-OrfeoToolBox provides a handy `Superimpose` application that enables the projection of an image into the geometry of another one.
+
+OrfeoToolBox provides a handy `Superimpose` application that enables the 
+projection of an image into the geometry of another one.
 
 In pyotb, a function has been created to handle more than 2 images.
 
-Let's consider the case where we have 3 images with different resolutions and different footprints :
+Let's consider the case where we have 3 images with different resolutions and 
+different footprints :
 
 ![Images](illustrations/pyotb_define_processing_area_initial.jpg)
 
@@ -11,12 +14,15 @@ Let's consider the case where we have 3 images with different resolutions and di
 import pyotb
 
 # transforming filepaths to pyotb objects
-s2_image, vhr_image, labels = pyotb.Input('image_10m.tif'), pyotb.Input('image_60cm.tif'), pyotb.Input('land_cover_2m.tif')
+s2_image = pyotb.Input('image_10m.tif')
+vhr_image = pyotb.Input('image_60cm.tif')
+labels = pyotb.Input('land_cover_2m.tif')
 
 print(s2_image.shape)  # (286, 195, 4)
 print(vhr_image.shape)  # (2048, 2048, 3)
 print(labels.shape)  # (1528, 1360, 1)
 ```
+
 Our goal is to obtain all images at the same footprint, same resolution and same shape. 
 Let's consider we want the intersection of all footprints and the same resolution as `labels` image.
 
@@ -25,11 +31,16 @@ Let's consider we want the intersection of all footprints and the same resolutio
 Here is the final result :
 ![Result](illustrations/pyotb_define_processing_area_result.jpg)
 
-The piece of code to achieve this : 
+The piece of code to achieve this :
+
 ```python
-s2_image, vhr_image, labels = pyotb.define_processing_area(s2_image, vhr_image, labels, window_rule='intersection',
-                                                           pixel_size_rule='same_as_input', 
-                                                           reference_pixel_size_input=labels, interpolator='bco')
+s2_image, vhr_image, labels = pyotb.define_processing_area(
+    s2_image, vhr_image, labels, 
+    window_rule='intersection',
+    pixel_size_rule='same_as_input',
+    reference_pixel_size_input=labels, 
+    interpolator='bco'
+)
 
 print(s2_image.shape)  # (657, 520, 4)
 print(vhr_image.shape)  # (657, 520, 3)
diff --git a/doc/comparison_otb.md b/doc/comparison_otb.md
index 612eab6ffc46d02e91ca730a99c3059662e06dd5..0129cb2e866d9a990f7d7df3fc1f6a031de6e64b 100644
--- a/doc/comparison_otb.md
+++ b/doc/comparison_otb.md
@@ -1,132 +1,302 @@
 ## Comparison between otbApplication and pyotb
 
 ### Single application execution
-Using OTB, the code would be like :
+
+<table>
+<tr>
+<th> OTB </th>
+<th> pyotb </th>
+</tr>
+<tr>
+<td>
+
 ```python
-import otbApplication
+import otbApplication as otb
 
 input_path = 'my_image.tif'
-resampled = otbApplication.Registry.CreateApplication('RigidTransformResample')
-resampled.SetParameterString('in', input_path)
-resampled.SetParameterString('interpolator', 'linear')
-resampled.SetParameterFloat('transform.type.id.scalex', 0.5)
-resampled.SetParameterFloat('transform.type.id.scaley', 0.5)
-resampled.SetParameterString('out', 'output.tif')
-resampled.SetParameterOutputImagePixelType('out', otbApplication.ImagePixelType_uint16)
-resampled.ExecuteAndWriteOutput()
+app = otb.Registry.CreateApplication(
+    'RigidTransformResample'
+)
+app.SetParameterString(
+    'in', input_path
+)
+app.SetParameterString(
+    'interpolator', 'linear'
+)
+app.SetParameterFloat(
+    'transform.type.id.scalex', 0.5
+)
+app.SetParameterFloat(
+    'transform.type.id.scaley', 0.5
+)
+app.SetParameterString(
+    'out', 'output.tif'
+)
+app.SetParameterOutputImagePixelType(
+    'out', otb.ImagePixelType_uint16
+)
+
+app.ExecuteAndWriteOutput()
 ```
 
-Using pyotb:
+</td>
+<td>
+
 ```python
 import pyotb
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear',
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-resampled.write('output.tif', pixel_type='uint16')
+
+app = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear',
+    'transform.type.id.scaley': 0.5,
+    'transform.type.id.scalex': 0.5
+})
+
+app.write(
+    'output.tif', 
+    pixel_type='uint16'
+)
 ```
 
+</td>
+</tr>
+</table>
+
 ### In-memory connections
 
-Using OTB :
+<table>
+<tr>
+<th> OTB </th>
+<th> pyotb </th>
+</tr>
+<tr>
+<td>
+
 ```python
-import otbApplication
-
-resampled = otbApplication.Registry.CreateApplication('RigidTransformResample')
-resampled.SetParameterString('in', 'my_image.tif')
-resampled.SetParameterString('interpolator', 'linear')
-resampled.SetParameterFloat('transform.type.id.scalex', 0.5)
-resampled.SetParameterFloat('transform.type.id.scaley', 0.5)
-resampled.Execute()
-
-calibrated = otbApplication.Registry.CreateApplication('OpticalCalibration')
-calibrated.ConnectImage('in', resampled, 'out')
-calibrated.SetParameterString('level', 'toa')
-calibrated.Execute()
-
-dilated = otbApplication.Registry.CreateApplication('BinaryMorphologicalOperation')
-dilated.ConnectImage('in', calibrated, 'out')
-dilated.SetParameterString("filter", 'dilate')
-dilated.SetParameterString("structype", 'ball')
-dilated.SetParameterInt("xradius", 3)
-dilated.SetParameterInt("yradius", 3)
-dilated.SetParameterString('out', 'output.tif')
-dilated.SetParameterOutputImagePixelType('out', otbApplication.ImagePixelType_uint16)
-dilated.ExecuteAndWriteOutput()
+import otbApplication as otb
+
+app1 = otb.Registry.CreateApplication(
+    'RigidTransformResample'
+)
+app1.SetParameterString(
+    'in', 'my_image.tif'
+)
+app1.SetParameterString(
+    'interpolator', 'linear'
+)
+app1.SetParameterFloat(
+    'transform.type.id.scalex', 0.5
+)
+app1.SetParameterFloat(
+    'transform.type.id.scaley', 0.5
+)
+app1.Execute()
+
+app2 = otb.Registry.CreateApplication(
+    'OpticalCalibration'
+)
+app2.ConnectImage('in', app1, 'out')
+app2.SetParameterString('level', 'toa')
+app2.Execute()
+
+app3 = otb.Registry.CreateApplication(
+    'BinaryMorphologicalOperation'
+)
+app3.ConnectImage(
+    'in', app2, 'out'
+)
+app3.SetParameterString(
+    'filter', 'dilate'
+)
+app3.SetParameterString(
+    'structype', 'ball'
+)
+app3.SetParameterInt(
+    'xradius', 3
+)
+app3.SetParameterInt(
+    'yradius', 3
+)
+app3.SetParameterString(
+    'out', 'output.tif'
+)
+app3.SetParameterOutputImagePixelType(
+    'out', otb.ImagePixelType_uint16
+)
+app3.ExecuteAndWriteOutput()
 ```
 
-Using pyotb:
+</td>
+<td>
+
 ```python
 import pyotb
 
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear', 
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
+app1 = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear',
+    'transform.type.id.scaley': 0.5, 
+    'transform.type.id.scalex': 0.5
+})
+
+app2 = pyotb.OpticalCalibration({
+    'in': app1, 
+    'level': 'toa'
+}) 
 
-calibrated = pyotb.OpticalCalibration({'in': resampled, 'level': 'toa'}) 
+app3 = pyotb.BinaryMorphologicalOperation({
+    'in': app2, 
+    'out': 'output.tif', 
+    'filter': 'dilate',
+    'structype': 'ball', 
+    'xradius': 3, 
+    'yradius': 3
+})
 
-dilated = pyotb.BinaryMorphologicalOperation({'in': calibrated, 'out': 'output.tif', 'filter': 'dilate', 
-                                              'structype': 'ball', 'xradius': 3, 'yradius': 3})
-dilated.write('result.tif', pixel_type='uint16')
+app3.write(
+    'result.tif', 
+    pixel_type='uint16'
+)
 ```
 
+</td>
+</tr>
+</table>
+
 ### Arithmetic operations
-Every pyotb object supports arithmetic operations, such as addition, subtraction, comparison...
-Consider an example where we want to perform the arithmetic operation `image1 * image2 - 2*image3`
 
-Using OTB, the following code works for 3-bands images :
+Every pyotb object supports arithmetic operations, such as addition, 
+subtraction, comparison...
+Consider an example where we want to perform the arithmetic operation 
+`image1 * image2 - 2*image3`.
+
+<table>
+<tr>
+<th> OTB </th>
+<th> pyotb </th>
+</tr>
+<tr>
+<td>
+
 ```python
-import otbApplication
+import otbApplication as otb
 
-bmx = otbApplication.Registry.CreateApplication('BandMathX')
-bmx.SetParameterStringList('il', ['image1.tif', 'image2.tif', 'image3.tif'])  # all images are 3-bands
-exp = 'im1b1*im2b1 - 2*im3b1; im1b2*im2b2 - 2*im3b2; im1b3*im2b3 - 2*im3b3'
+bmx = otb.Registry.CreateApplication(
+    'BandMathX'
+)
+bmx.SetParameterStringList(
+    'il', 
+    ['im1.tif', 'im2.tif', 'im3.tif']
+)
+exp = ('im1b1*im2b1-2*im3b1; '
+       'im1b2*im2b2-2*im3b2; '
+       'im1b3*im2b3-2*im3b3')
 bmx.SetParameterString('exp', exp)
-bmx.SetParameterString('out', 'output.tif')
-bmx.SetParameterOutputImagePixelType('out', otbApplication.ImagePixelType_uint8)
+bmx.SetParameterString(
+    'out', 
+    'output.tif'
+)
+bmx.SetParameterOutputImagePixelType(
+    'out', 
+    otb.ImagePixelType_uint8
+)
 bmx.ExecuteAndWriteOutput()
 ```
 
-With pyotb, the following works with images of any number of bands :
+Note: code limited to 3-bands images.
+
+</td>
+<td>
+
 ```python
 import pyotb
 
-# transforming filepaths to pyotb objects
-input1, input2, input3 = pyotb.Input('image1.tif'), pyotb.Input('image2.tif') , pyotb.Input('image3.tif')
+# filepaths --> pyotb objects
+in1 = pyotb.Input('im1.tif')
+in2 = pyotb.Input('im2.tif')
+in3 = pyotb.Input('im3.tif')
 
-res = input1 * input2 - 2 * input2
-res.write('output.tif', pixel_type='uint8')
+res = in1 * in2 - 2 * in3
+res.write(
+    'output.tif', 
+    pixel_type='uint8'
+)
 ```
 
+Note: works with any number of bands.
+
+</td>
+</tr>
+</table>
+
 ### Slicing
 
-Using OTB, for selection bands or ROI, the code looks like:
+<table>
+<tr>
+<th> OTB </th>
+<th> pyotb </th>
+</tr>
+<tr>
+<td>
+
+
 ```python
-import otbApplication
-
-# selecting first 3 bands
-extracted = otbApplication.Registry.CreateApplication('ExtractROI')
-extracted.SetParameterString('in', 'my_image.tif')
-extracted.SetParameterStringList('cl', ['Channel1', 'Channel2', 'Channel3'])
-extracted.Execute()
-
-# selecting 1000x1000 subset
-extracted = otbApplication.Registry.CreateApplication('ExtractROI')
-extracted.SetParameterString('in', 'my_image.tif')
-extracted.SetParameterString('mode', 'extent')
-extracted.SetParameterString('mode.extent.unit', 'pxl')
-extracted.SetParameterFloat('mode.extent.ulx', 0)
-extracted.SetParameterFloat('mode.extent.uly', 0)
-extracted.SetParameterFloat('mode.extent.lrx', 999)
-extracted.SetParameterFloat('mode.extent.lry', 999)
-extracted.Execute()
+import otbApplication as otb
+
+# first 3 channels
+app = otb.Registry.CreateApplication(
+    'ExtractROI'
+)
+app.SetParameterString(
+    'in', 'my_image.tif'
+)
+app.SetParameterStringList(
+    'cl', 
+    ['Channel1', 'Channel2', 'Channel3']
+)
+app.Execute()
+
+# 1000x1000 roi
+app = otb.Registry.CreateApplication(
+    'ExtractROI'
+)
+app.SetParameterString(
+    'in', 'my_image.tif'
+)
+app.SetParameterString(
+    'mode', 'extent'
+)
+app.SetParameterString(
+    'mode.extent.unit', 'pxl'
+)
+app.SetParameterFloat(
+    'mode.extent.ulx', 0
+)
+app.SetParameterFloat(
+    'mode.extent.uly', 0
+)
+app.SetParameterFloat(
+    'mode.extent.lrx', 999
+)
+app.SetParameterFloat(
+    'mode.extent.lry', 999
+)
+app.Execute()
 ```
 
-Instead, using pyotb: 
+</td>
+<td>
 
 ```python
 import pyotb
 
-# transforming filepath to pyotb object
+# filepath --> pyotb object
 inp = pyotb.Input('my_image.tif')
 
-extracted = inp[:, :, :3]  # selecting first 3 bands
-extracted = inp[:1000, :1000]  # selecting 1000x1000 subset
-```
\ No newline at end of file
+extracted = inp[:, :, :3]  # Bands 1,2,3
+extracted = inp[:1000, :1000]  # ROI
+```
+
+</td>
+</tr>
+</table>
\ No newline at end of file
diff --git a/doc/examples/nodata_mean.md b/doc/examples/nodata_mean.md
index 9ec49002294cf6f66950ce989ac3a447070c32dd..288be6e8fb48315ff946d0e9b5505fb49e9c4a58 100644
--- a/doc/examples/nodata_mean.md
+++ b/doc/examples/nodata_mean.md
@@ -1,7 +1,9 @@
 ### Compute the mean of several rasters, taking into account NoData
-Let's consider we have at disposal 73 NDVI rasters for a year, where clouds have been masked with NoData (nodata value of -10 000 for example).
+Let's consider we have at disposal 73 NDVI rasters for a year, where clouds 
+have been masked with NoData (nodata value of -10 000 for example).
 
-Goal: compute the mean across time (keeping the spatial dimension) of the NDVIs, excluding cloudy pixels. Piece of code to achieve that:
+Goal: compute the mean across time (keeping the spatial dimension) of the 
+NDVIs, excluding cloudy pixels. Piece of code to achieve that:
 
 ```python
 import pyotb
@@ -13,14 +15,21 @@ ndvis = [pyotb.Input(path) for path in ndvi_paths]
 summed = sum([pyotb.where(ndvi != nodata, ndvi, 0) for ndvi in ndvis])
 
 # Printing the generated BandMath expression
-print(summed.exp)  # this returns a very long exp: "0 + ((im1b1 != -10000) ? im1b1 : 0) + ((im2b1 != -10000) ? im2b1 : 0) + ... + ((im73b1 != -10000) ? im73b1 : 0)"
+print(summed.exp)
+# this returns a very long exp: 
+# "0 + ((im1b1 != -10000) ? im1b1 : 0) + ((im2b1 != -10000) ? im2b1 : 0) + ... 
+# ... + ((im73b1 != -10000) ? im73b1 : 0)"
 
 # For each pixel location, getting the count of valid pixels
 count = sum([pyotb.where(ndvi == nodata, 0, 1) for ndvi in ndvis])
 
-mean = summed / count  # BandMath exp of this is very long: "(0 + ((im1b1 != -10000) ? im1b1 : 0) + ... + ((im73b1 != -10000) ? im73b1 : 0)) / (0 + ((im1b1 == -10000) ? 0 : 1) + ... + ((im73b1 == -10000) ? 0 : 1))"
+mean = summed / count
+# BandMath exp of this is very long: 
+# "(0 + ((im1b1 != -10000) ? im1b1 : 0) + ... 
+# + ((im73b1 != -10000) ? im73b1 : 0)) / (0 + ((im1b1 == -10000) ? 0 : 1) + ...
+# + ((im73b1 == -10000) ? 0 : 1))"
 mean.write('ndvi_annual_mean.tif')
 ```
 
-Note that no actual computation is executed before the last line where the result is written to disk.
-
+Note that no actual computation is executed before the last line where the 
+result is written to disk.
diff --git a/doc/examples/pleiades.md b/doc/examples/pleiades.md
index 32d0447bba7361013d19d17bfe6f2a37074b7a7c..120a5542d03f519fcb54ff583c484f9212597aa1 100644
--- a/doc/examples/pleiades.md
+++ b/doc/examples/pleiades.md
@@ -1,20 +1,41 @@
 ### Process raw Pleiades data
-This is a common case of Pleiades data preprocessing : optical calibration -> orthorectification -> pansharpening
+This is a common case of Pleiades data preprocessing : 
+*optical calibration -> orthorectification -> pansharpening*
 
 ```python
 import pyotb
 srtm = '/media/data/raster/nasa/srtm_30m'
 geoid = '/media/data/geoid/egm96.grd'
 
-pan =  pyotb.OpticalCalibration('IMG_PHR1A_P_001/DIM_PHR1A_P_201509011347379_SEN_1791374101-001.XML', level='toa')
-ms = pyotb.OpticalCalibration('IMG_PHR1A_MS_002/DIM_PHR1A_MS_201509011347379_SEN_1791374101-002.XML', level='toa')
+pan =  pyotb.OpticalCalibration(
+    'IMG_PHR1A_P_001/DIM_PHR1A_P_201509011347379_SEN_1791374101-001.XML', 
+    level='toa'
+)
+ms = pyotb.OpticalCalibration(
+    'IMG_PHR1A_MS_002/DIM_PHR1A_MS_201509011347379_SEN_1791374101-002.XML', 
+    level='toa'
+)
 
-pan_ortho = pyotb.OrthoRectification({'io.in': pan, 'elev.dem': srtm, 'elev.geoid': geoid})
-ms_ortho = pyotb.OrthoRectification({'io.in': ms, 'elev.dem': srtm, 'elev.geoid': geoid})
+pan_ortho = pyotb.OrthoRectification({
+    'io.in': pan, 
+    'elev.dem': srtm, 
+    'elev.geoid': geoid
+})
+ms_ortho = pyotb.OrthoRectification({
+    'io.in': ms, 
+    'elev.dem': srtm, 
+    'elev.geoid': geoid
+})
 
-pxs = pyotb.BundleToPerfectSensor(inp=pan_ortho, inxs=ms_ortho, method='bayes', mode='default')
+pxs = pyotb.BundleToPerfectSensor(
+    inp=pan_ortho, 
+    inxs=ms_ortho, 
+    method='bayes', 
+    mode='default'
+)
 
 exfn = '?gdal:co:COMPRESS=DEFLATE&gdal:co:PREDICTOR=2&gdal:co:BIGTIFF=YES'
-# Here we trigger every app in the pipeline and the process is blocked until result is written to disk
-pxs.write('pxs_image.tif', pixel_type='uint16', filename_extension=exfn)
+# Here we trigger every app in the pipeline and the process is blocked until 
+# result is written to disk
+pxs.write('pxs_image.tif', pixel_type='uint16', ext_fname=exfn)
 ```
diff --git a/doc/extra.css b/doc/extra.css
new file mode 100644
index 0000000000000000000000000000000000000000..ec4d4bbd55f0bdf33df78446fa2437cfa8a69def
--- /dev/null
+++ b/doc/extra.css
@@ -0,0 +1,11 @@
+.rst-content div[class^=highlight] {
+    border: 0px;
+}
+
+.rst-content div[class^=highlight] pre {
+    padding: 0px;
+}
+
+.rst-content pre code {
+    background: #eeffcc;
+}
diff --git a/doc/features.md b/doc/features.md
index 7bc78ce1e318cbb3a9151dda019f9369d0b4631d..3a55c038f5218a226d4c00481ff262b000f3548d 100644
--- a/doc/features.md
+++ b/doc/features.md
@@ -1,8 +1,12 @@
 ## Arithmetic operations
-Every pyotb object supports arithmetic operations, such as addition, subtraction, comparison...
-Consider an example where we want to compute a vegeteation mask from NDVI, i.e. the arithmetic operation `(nir - red) / (nir + red) > 0.3`
+
+Every pyotb object supports arithmetic operations, such as addition, 
+subtraction, comparison...
+Consider an example where we want to compute a vegeteation mask from NDVI, 
+i.e. the arithmetic operation `(nir - red) / (nir + red) > 0.3`
 
 With pyotb, one can simply do :
+
 ```python
 import pyotb
 
@@ -10,11 +14,14 @@ import pyotb
 nir, red = pyotb.Input('nir.tif'), pyotb.Input('red.tif')
 
 res = (nir - red) / (nir + red) > 0.3
-print(res.exp)  # prints the BandMath expression: "((im1b1 - im2b1) / (im1b1 + im2b1)) > 0.3 ? 1 : 0"
+# Prints the BandMath expression:
+# "((im1b1 - im2b1) / (im1b1 + im2b1)) > 0.3 ? 1 : 0"
+print(res.exp)
 res.write('vegetation_mask.tif', pixel_type='uint8')
 ```
 
 ## Slicing
+
 pyotb objects support slicing in a Python fashion :
 
 ```python
@@ -25,20 +32,154 @@ inp = pyotb.Input('my_image.tif')
 
 inp[:, :, :3]  # selecting first 3 bands
 inp[:, :, [0, 1, 4]]  # selecting bands 1, 2 & 5
-inp[:1000, :1000]  # selecting 1000x1000 subset, same as inp[:1000, :1000, :] 
+inp[:, :, 1:-1]  # removing first and last band
+inp[:, :, ::2]  # selecting one band every 2 bands
+inp[:100, :100]  # selecting 100x100 subset, same as inp[:100, :100, :] 
 inp[:100, :100].write('my_image_roi.tif')  # write cropped image to disk
 ```
 
-## Shape attributes
-You can access the shape of any in-memory pyotb object.
+## Retrieving a pixel location in image coordinates
+
+One can retrieve a pixel location in image coordinates (i.e. row and column 
+indices) using `get_rowcol_from_xy()`:
 
 ```python
-import pyotb
+inp.get_rowcol_from_xy(760086.0, 6948092.0)  # (333, 5)
+```
 
-# transforming filepath to pyotb object
-inp = pyotb.Input('my_image.tif')
+## Reading a pixel value
+
+One can read a pixel value of a pyotb object using brackets, as if it was a 
+common array. Returned is a list of pixel values for each band:
+
+```python
+inp[10, 10]  # [217, 202, 182, 255]
+```
+
+!!! warning
+
+    Accessing multiple pixels values if not computationally efficient. Please 
+    use this with moderation, or consider numpy or pyotb applications to 
+    process efficiently blocks of pixels.
+
+## Attributes
+
+### Shape
+
+The shape of pyotb objects can be retrieved using `shape`.
+
+```python
 print(inp[:1000, :500].shape)  # (1000, 500, 4)
 ```
 
+### Pixel type
+
+The pixel type of pyotb objects can be retrieved using `dtype`.
+
+```python
+inp.dtype  # e.g. 'uint8'
+```
+
+!!! note
+
+    The `dtype` returns a `str` corresponding to values accepted by the 
+    `pixel_type` of `write()`
+
+### Transform
+
+The transform, as defined in GDAL, can be retrieved with the `transform` 
+attribute:
+
+```python
+inp.transform  # (6.0, 0.0, 760056.0, 0.0, -6.0, 6946092.0)
+```
+
+### Metadata
+
+Images metadata can be retrieved with the `metadata` attribute:
+
+```python
+print(inp.metadata)
+```
+
+Gives: 
+
+```
+{
+  'DataType': 1.0, 
+  'DriverLongName': 'GeoTIFF', 
+  'DriverShortName': 'GTiff', 
+  'GeoTransform': (760056.0, 6.0, 0.0, 6946092.0, 0.0, -6.0),
+  'LowerLeftCorner': (760056.0, 6944268.0), 
+  'LowerRightCorner': (761562.0, 6944268.0), 
+  'AREA_OR_POINT': 'Area', 
+  'TIFFTAG_SOFTWARE': 'CSinG - 13 SEPTEMBRE 2012', 
+  'ProjectionRef': 'PROJCS["RGF93 v1 / Lambert-93",\n...',
+  'ResolutionFactor': 0, 
+  'SubDatasetIndex': 0, 
+  'UpperLeftCorner': (760056.0, 6946092.0), 
+  'UpperRightCorner': (761562.0, 6946092.0), 
+  'TileHintX': 251.0, 
+  'TileHintY': 8.0
+}
+```
+
+## Information
+
+The information fetched by the `ReadImageInfo` OTB application is available 
+through `get_info()`:
+
+```python
+print(inp.get_info())
+```
+
+Gives:
+
+```json lines
+{
+  'indexx': 0, 
+  'indexy': 0, 
+  'sizex': 251, 
+  'sizey': 304, 
+  'spacingx': 6.0, 
+  'spacingy': -6.0, 
+  'originx': 760059.0, 
+  'originy': 6946089.0, 
+  'estimatedgroundspacingx': 5.978403091430664, 
+  'estimatedgroundspacingy': 5.996793270111084, 
+  'numberbands': 4, 
+  'datatype': 'unsigned_char', 
+  'ullat': 0.0, 
+  'ullon': 0.0, 
+  'urlat': 0.0, 
+  'urlon': 0.0, 
+  'lrlat': 0.0, 
+  'lrlon': 0.0, 
+  'lllat': 0.0, 
+  'lllon': 0.0, 
+  'rgb.r': 0, 
+  'rgb.g': 1, 
+  'rgb.b': 2, 
+  'projectionref': 'PROJCS["RGF93 v1 ..."EPSG","2154"]]',
+  'gcp.count': 0
+}
+```
+
+## Statistics
+
+Image statistics can be computed on-the-fly using `get_statistics()`:
+
+```python
+print(inp.get_statistics())
+```
 
+Gives:
 
+```json lines
+{
+  'out.mean': [79.5505, 109.225, 115.456, 249.349], 
+  'out.min': [33, 64, 91, 47], 
+  'out.max': [255, 255, 230, 255], 
+  'out.std': [51.0754, 35.3152, 23.4514, 20.3827]
+}
+```
\ No newline at end of file
diff --git a/doc/functions.md b/doc/functions.md
index 070ba0be35431fb2fe3191a291f85c5251edc09d..1e82c58d0c125fb68a0dac2f1b5ef742760cae4c 100644
--- a/doc/functions.md
+++ b/doc/functions.md
@@ -1,26 +1,47 @@
-Some functions have been written, entirely based on OTB, to mimic the behavior of some well-known numpy functions.
+Some functions have been written, entirely based on OTB, to mimic the behavior 
+of some well-known numpy functions.
+
 ## pyotb.where
+
 Equivalent of `numpy.where`.
-It is the equivalent of the muparser syntax `condition ? x : y` that can be used in OTB's BandMath.
+It is the equivalent of the muparser syntax `condition ? x : y` that can be 
+used in OTB's BandMath.
 
 ```python
 import pyotb
 
 # transforming filepaths to pyotb objects
-labels, image1, image2 = pyotb.Input('labels.tif'), pyotb.Input('image1.tif') , pyotb.Input('image2.tif')
+labels = pyotb.Input('labels.tif')
+image1 = pyotb.Input('image1.tif')
+image2 = pyotb.Input('image2.tif')
 
 # If labels = 1, returns image1. Else, returns image2
-res = pyotb.where(labels == 1, image1, image2)  # this would also work: pyotb.where(labels == 1, 'image1.tif', 'image2.tif')
+res = pyotb.where(labels == 1, image1, image2)
+# this would also work: `pyotb.where(labels == 1, 'image1.tif', 'image2.tif')`
 
-# A more complex example
-# If labels = 1, returns image1. If labels = 2, returns image2. If labels = 3, returns 3. Else 0
-res = pyotb.where(labels == 1, image1,
-                  pyotb.where(labels == 2, image2,
-                              pyotb.where(labels == 3, 3, 0)))
+# A more complex example:
+# - If labels = 1 --> returns image1,
+# - If labels = 2 --> returns image2,
+# - If labels = 3 --> returns 3.0,
+# - Else, returns 0.0
+res = pyotb.where(
+    labels == 1, 
+    image1,
+    pyotb.where(
+        labels == 2, 
+        image2, 
+        pyotb.where(
+            labels == 3, 
+            3.0, 
+            0.0
+        )
+    )
+)
 
 ```
 
 ## pyotb.clip
+
 Equivalent of `numpy.clip`. Clip (limit) the values in a raster to a range.
 
 ```python
@@ -30,18 +51,20 @@ res = pyotb.clip('my_image.tif', 0, 255)  # clips the values between 0 and 255
 ```
 
 ## pyotb.all
+
 Equivalent of `numpy.all`.
 
-For only one image, this function checks that all bands of the image are True (i.e. !=0) and outputs
-a singleband boolean raster.
-For several images, this function checks that all images are True (i.e. !=0) and outputs
-a boolean raster, with as many bands as the inputs.
+For only one image, this function checks that all bands of the image are True 
+(i.e. !=0) and outputs a single band boolean raster.
+For several images, this function checks that all images are True (i.e. !=0) 
+and outputs a boolean raster, with as many bands as the inputs.
 
 
 ## pyotb.any
+
 Equivalent of `numpy.any`.
 
-For only one image, this function checks that at least one band of the image is True (i.e. !=0) and outputs
-a singleband boolean raster.
-For several images, this function checks that at least one of the images is True (i.e. !=0) and outputs
-a boolean raster, with as many bands as the inputs.
\ No newline at end of file
+For only one image, this function checks that at least one band of the image 
+is True (i.e. !=0) and outputs a single band boolean raster.
+For several images, this function checks that at least one of the images is 
+True (i.e. !=0) and outputs a boolean raster, with as many bands as the inputs.
\ No newline at end of file
diff --git a/doc/index.md b/doc/index.md
index fa29918aa9d08c9b401e5140f56078bcd34803b9..4ecfaf4462efc96263c3e39b4de2237bc6968d16 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -1,26 +1,43 @@
-# Pyotb: Orfeo Toolbox for Python
+# pyotb: Orfeo Toolbox for Python
 
-pyotb is a Python extension of Orfeo Toolbox. It has been built on top of the existing Python API of OTB, in order 
+pyotb is a Python extension of Orfeo Toolbox. It has been built on top of the 
+existing Python API of OTB, in order 
 to make OTB more Python friendly.
 
 # Table of Contents
 
 ## Get started
+
 - [Installation](installation.md)
-- [How to use pyotb](quickstart.md)
+- [Quick start](quickstart.md)
 - [Useful features](features.md)
-- [Functions](features.md)
+- [Functions](functions.md)
 - [Interaction with Python libraries (numpy, rasterio, tensorflow)](interaction.md)
 
 ## Examples
+
 - [Pleiades data processing](examples/pleiades.md)
 - [Computing the mean of several rasters with NoData](examples/nodata_mean.md)
 
 ## Advanced use
+
 - [Comparison between pyotb and OTB native library](comparison_otb.md)
+- [Summarize applications](summarize.md)
 - [OTB versions](otb_versions.md)
 - [Managing loggers](managing_loggers.md)
 - [Troubleshooting & limitations](troubleshooting.md)
 
-
 ## API
+
+- See the API reference. If you have any doubts or questions, feel free to ask
+on github or gitlab!
+
+## Contribute
+
+Contributions are welcome !
+Open a PR/MR, or file an issue if you spot a bug or have any suggestion:
+
+- [Github](https://github.com/orfeotoolbox/pyotb) 
+- [Orfeo ToolBox GitLab instance](https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb).
+
+Thank you!
\ No newline at end of file
diff --git a/doc/installation.md b/doc/installation.md
index dd7b729f3281825483d8937bf5022bcf7b9e0554..404d7bd7d228ec6a0e850b7dcd8ba214f57b69c3 100644
--- a/doc/installation.md
+++ b/doc/installation.md
@@ -1,11 +1,28 @@
+## Prerequisites
 
-## Requirements
-- Python>=3.5 and NumPy
-- Orfeo ToolBox python API (instructions available [on the official website](https://www.orfeo-toolbox.org/CookBook/Installation.html))
+Requirements:
+
+- Python >= 3.7 and NumPy
+- Orfeo ToolBox binaries (follow these
+ [instructions](https://www.orfeo-toolbox.org/CookBook/Installation.html))
+- Orfeo ToolBox python binding (follow these
+ [instructions](https://www.orfeo-toolbox.org/CookBook/Installation.html#python-bindings))
+
+## Install with pip
 
-## Installation
 ```bash
 pip install pyotb --upgrade
 ```
 
-For Python>=3.6, latest version available is pyotb 1.5.0. For Python 3.5, latest version available is pyotb 1.2.2
+For development, use the following:
+
+```bash
+git clone https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb
+cd pyotb
+pip install -e ".[dev]"
+```
+
+## Old versions
+
+If you need compatibility with python3.6, install  `pyotb<2.0` and for
+ python3.5 use `pyotb==1.2.2`.  
diff --git a/doc/interaction.md b/doc/interaction.md
index d44ca68d240ed7561773f4cf244d24632b4fab2f..dcb87290936f6f60b65e7b66e4def90081e8c02d 100644
--- a/doc/interaction.md
+++ b/doc/interaction.md
@@ -1,17 +1,24 @@
-## Export to Numpy
+## Numpy
+
+### Export to numpy arrays
 
 pyotb objects can be exported to numpy array.
+
 ```python
 import pyotb
 import numpy as np
 
-calibrated = pyotb.OpticalCalibration('image.tif', level='toa')  # this is a pyotb object
-arr = np.asarray(calibrated)  # same as calibrated.to_numpy()
+# The following is a pyotb object
+calibrated = pyotb.OpticalCalibration('image.tif', level='toa')
 
-```
+# The following is a numpy array
+arr = np.asarray(calibrated)
 
+# Note that the following is equivalent:
+arr = calibrated.to_numpy()
+```
 
-## Interaction with Numpy
+### Interact with numpy functions
 
 pyotb objects can be transparently used in numpy functions.
 
@@ -21,24 +28,30 @@ For example:
 import pyotb
 import numpy as np
 
-inp = pyotb.Input('image.tif')  # this is a pyotb object
+# The following is a pyotb object
+inp = pyotb.Input('image.tif')
 
-# Creating a numpy array of noise
-white_noise = np.random.normal(0, 50, size=inp.shape)  # this is a numpy object
+# Creating a numpy array of noise. The following is a numpy object
+white_noise = np.random.normal(0, 50, size=inp.shape)
 
 # Adding the noise to the image
-noisy_image = inp + white_noise  # magic: this is a pyotb object that has the same georeference as input. 
-                                 # `np.add(inp, white_noise)` would have worked the same
+noisy_image = inp + white_noise
+# Magically, this is a pyotb object that has the same geo-reference as `inp`. 
+# Note the `np.add(inp, white_noise)` would have worked the same
+
+# Finally we can write the result like any pyotb object
 noisy_image.write('image_plus_noise.tif')
 ```
-Limitations : 
 
-- The whole image is loaded into memory
-- The georeference can not be modified. Thus, numpy operations can not change the image or pixel size
+!!! warning
 
+    - The whole image is loaded into memory
+    - The georeference can not be modified. Thus, numpy operations can not 
+    change the image or pixel size
 
-## Export to rasterio
-pyotb objects can also be exported in a format that is usable by rasterio.
+## Rasterio
+
+pyotb objects can also be exported in a format usable by rasterio.
 
 For example:
 
@@ -48,32 +61,42 @@ import rasterio
 from scipy import ndimage
 
 # Pansharpening + NDVI + creating bare soils mask
-pxs = pyotb.BundleToPerfectSensor(inp='panchromatic.tif', inxs='multispectral.tif')
-ndvi = pyotb.RadiometricIndices({'in': pxs, 'channels.red': 3, 'channels.nir': 4, 'list': 'Vegetation:NDVI'})
+pxs = pyotb.BundleToPerfectSensor(
+    inp='panchromatic.tif', 
+    inxs='multispectral.tif'
+)
+ndvi = pyotb.RadiometricIndices({
+    'in': pxs, 
+    'channels.red': 3, 
+    'channels.nir': 4, 
+    'list': 'Vegetation:NDVI'
+})
 bare_soil_mask = (ndvi < 0.3)
 
 # Exporting the result as array & profile usable by rasterio
 mask_array, profile = bare_soil_mask.to_rasterio()
 
-# Doing something in Python that is not possible with OTB, e.g. gathering the contiguous groups of pixels
-# with an integer index
+# Doing something in Python that is not possible with OTB, e.g. gathering 
+# the contiguous groups of pixels with an integer index
 labeled_mask_array, nb_groups = ndimage.label(mask_array)
 
 # Writing the result to disk
 with rasterio.open('labeled_bare_soil.tif', 'w', **profile) as f:
     f.write(labeled_mask_array)
-
 ```
-This way of exporting pyotb objects is more flexible that exporting to numpy, as the user gets the `profile` dictionary. 
-If the georeference or pixel size is modified, the user can update the `profile` accordingly.
-
 
-## Interaction with Tensorflow
+This way of exporting pyotb objects is more flexible that exporting to numpy, 
+as the user gets the `profile` dictionary. 
+If the georeference or pixel size is modified, the user can update the 
+`profile` accordingly.
 
-We saw that numpy operations had some limitations. To bypass those limitations, it is possible to use some Tensorflow operations on pyotb objects.
+## Tensorflow
 
+We saw that numpy operations had some limitations. To bypass those 
+limitations, it is possible to use some Tensorflow operations on pyotb objects.
 
-You need a working installation of OTBTF >=3.0 for this and then the code is like this:
+You need a working installation of OTBTF >=3.0 for this and then the code is 
+like this:
 
 ```python
 import pyotb
@@ -84,28 +107,37 @@ def scalar_product(x1, x2):
     return tf.reduce_sum(tf.multiply(x1, x2), axis=-1)
 
 # Compute the scalar product
-res = pyotb.run_tf_function(scalar_product)('image1.tif', 'image2.tif')  # magic: this is a pyotb object
+res = pyotb.run_tf_function(scalar_product)('image1.tif', 'image2.tif')  
+
+# Magically, `res` is a pyotb object
 res.write('scalar_product.tif')
 ```
 
-For some easy syntax, one can use `pyotb.run_tf_function` as a function decorator, such as:
+For some easy syntax, one can use `pyotb.run_tf_function` as a function 
+decorator, such as:
+
 ```python
 import pyotb
 
-@pyotb.run_tf_function  # The decorator enables the use of pyotb objects as inputs/output of the function
+# The `pyotb.run_tf_function` decorator enables the use of pyotb objects as 
+# inputs/output of the function
+@pyotb.run_tf_function
 def scalar_product(x1, x2):
     import tensorflow as tf
     return tf.reduce_sum(tf.multiply(x1, x2), axis=-1)
 
-res = scalar_product('image1.tif', 'image2.tif')  # magic: this is a pyotb object
+res = scalar_product('image1.tif', 'image2.tif')
+# Magically, `res` is a pyotb object
 ```
 
 Advantages :
 
-- The process supports streaming, hence the whole image is **not** loaded into memory
+- The process supports streaming, hence the whole image is **not** loaded into 
+memory
 - Can be integrated in OTB pipelines
 
-Limitations :
+!!! warning
 
-- It is not possible to use the tensorflow python API inside a script where OTBTF is used because of compilation issues 
-between Tensorflow and OTBTF, i.e. `import tensorflow` doesn't work in a script where OTBTF apps have been initialized
+    Due to compilation issues in OTBTF before version 4.0.0, tensorflow and 
+    pyotb can't be imported in the same python code. This problem has been 
+    fixed in OTBTF 4.0.0.
diff --git a/doc/managing_loggers.md b/doc/managing_loggers.md
index f329d5ed6e06c27633b58fd1005b8e04a5959f56..7220af482d781ad00fb62a69557def803b8d2d71 100644
--- a/doc/managing_loggers.md
+++ b/doc/managing_loggers.md
@@ -1,25 +1,32 @@
 ## Managing loggers
 
-Several environment variables are used in order to adjust logger level and behaviour. It should be set before importing pyotb.  
+Several environment variables are used in order to adjust logger level and 
+behaviour. It should be set before importing pyotb.  
 
 - `OTB_LOGGER_LEVEL` : used to set the default OTB logger level.
-- `PYOTB_LOGGER_LEVEL` : used to set the pyotb logger level. if not set, `OTB_LOGGER_LEVEL` will be used.
+- `PYOTB_LOGGER_LEVEL` : used to set the pyotb logger level. if not set, 
+- `OTB_LOGGER_LEVEL` will be used.
 
 If none of those two variables is set, the logger level will be set to 'INFO'.  
 Available levels are : DEBUG, INFO, WARNING, ERROR, CRITICAL  
 
-You may also change the logger level after import (for pyotb only) with the function `set_logger_level`.
+You may also change the logger level after import (for pyotb only) with the 
+function `set_logger_level`.
+
 ```python
 import pyotb
 pyotb.set_logger_level('DEBUG')
 ```
 
-Bonus : in some cases, yo may want to silence the GDAL driver logger (for example you will see a lot of errors when reading GML files with OGR).  
-One useful trick is to redirect these logs to a file. This can be done using the variable `CPL_LOG`.
+Bonus : in some cases, you may want to silence the GDAL driver logger (for 
+example you will see a lot of errors when reading GML files with OGR).  
+One useful trick is to redirect these logs to a file. This can be done using 
+the variable `CPL_LOG`.
 
 ## Named applications in logs
 
 It is possible to change an app name in order to track it easily in the logs :  
+
 ```python
 import os
 os.environ['PYOTB_LOGGER_LEVEL'] = 'DEBUG'
@@ -29,8 +36,9 @@ bm = pyotb.BandMath(['image.tif'], exp='im1b1 * 100')
 bm.name = 'CustomBandMathApp'
 bm.execute()
 ```
+
 ```text
 2022-06-14 14:22:38 (DEBUG) [pyOTB] CustomBandMathApp: run execute() with parameters={'exp': 'im1b1 * 100', 'il': ['/home/vidlb/Téléchargements/test_4b.tif']}
 2022-06-14 14:22:38 (INFO) BandMath: Image #1 has 4 components
 2022-06-14 14:22:38 (DEBUG) [pyOTB] CustomBandMathApp: execution succeeded
-```
\ No newline at end of file
+```
diff --git a/doc/otb_versions.md b/doc/otb_versions.md
index 6b06c28135d25a02bd792069f4d417d4ec64babd..aa8889d7a3b21edfe30f142e88d32412678b69fa 100644
--- a/doc/otb_versions.md
+++ b/doc/otb_versions.md
@@ -1,21 +1,27 @@
 ## System with multiple OTB versions
 
-If you want to quickly switch between OTB versions, or override the default system version, you may use the `OTB_ROOT` env variable :  
+If you want to quickly switch between OTB versions, or override the default 
+system version, you may use the `OTB_ROOT` env variable :
+
 ```python
 import os
 # This is equivalent to "[set/export] OTB_ROOT=/opt/otb" before launching python
 os.environ['OTB_ROOT'] = '/opt/otb'
 import pyotb
 ```
+
 ```text
 2022-06-14 13:59:03 (INFO) [pyOTB] Preparing environment for OTB in /opt/otb
 2022-06-14 13:59:04 (INFO) [pyOTB] Successfully loaded 126 OTB applications
 ```
 
-If you try to import pyotb without having set environment, it will try to find any OTB version installed on your system:  
+If you try to import pyotb without having set environment, it will try to find 
+any OTB version installed on your system:
+
 ```python
 import pyotb
 ```
+
 ```text
 2022-06-14 13:55:41 (INFO) [pyOTB] Failed to import OTB. Searching for it...
 2022-06-14 13:55:41 (INFO) [pyOTB] Found /opt/otb/lib/otb/
@@ -24,21 +30,30 @@ import pyotb
 2022-06-14 13:55:43 (INFO) [pyOTB] Preparing environment for OTB in /home/otbuser/Applications/OTB-8.0.1-Linux64
 2022-06-14 13:55:44 (INFO) [pyOTB] Successfully loaded 117 OTB applications
 ```
+
 Here is the path precedence for this automatic env configuration :
+
 ```text
     OTB_ROOT env variable > python bindings directory
     OR search for releases installations    :    HOME
     OR (for linux)                          :    /opt/otbtf > /opt/otb > /usr/local > /usr
     OR (for windows)                        :    C:/Program Files
 ```
-N.B. :  in case `otbApplication` is found in `PYTHONPATH` (and if `OTB_ROOT` was not set), the OTB which the python API is linked to will be used.  
+
+!!! Note
+
+    When `otbApplication` is found in `PYTHONPATH` (and `OTB_ROOT` not set), 
+    the OTB installation where the python API is linked, will be used.  
 
 ## Fresh OTB installation
 
-If you've just installed OTB binaries in a Linux environment, you may encounter an error at first import, pyotb will help you fix it :
+If you've just installed OTB binaries in a Linux environment, you may 
+encounter an error at first import, pyotb will help you fix it :
+
 ```python
 import pyotb
 ```
+
 ```text
 2022-06-14 14:00:34 (INFO) [pyOTB] Preparing environment for OTB in /home/otbuser/Applications/OTB-8.0.1-Linux64
 2022-07-07 16:56:04 (CRITICAL) [pyOTB] An error occurred while importing OTB Python API
diff --git a/doc/quickstart.md b/doc/quickstart.md
index 294d5123263ebfcd08bce4bb569e7d3756924b4f..fb15188d8f06ddca795b361b87485e9ef74f5f03 100644
--- a/doc/quickstart.md
+++ b/doc/quickstart.md
@@ -1,73 +1,206 @@
 ## Quickstart: running an OTB application with pyotb
-pyotb has been written so that it is more convenient to run an application in Python.
+
+pyotb has been written so that it is more convenient to run an application in 
+Python.
 
 You can pass the parameters of an application as a dictionary :
 
 ```python
 import pyotb
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'transform.type.id.scaley': 0.5,
-                                          'interpolator': 'linear', 'transform.type.id.scalex': 0.5})
+resampled = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'transform.type.id.scaley': 0.5,
+    'interpolator': 'linear', 
+    'transform.type.id.scalex': 0.5
+})
 ```
 
-Note that pyotb has a 'lazy' evaluation: it only performs operation when it is needed, i.e. results are written to disk.
-Thus, the previous line doesn't trigger the application.
+For now, `resampled` has not been executed. Indeed, pyotb has a 'lazy' 
+evaluation: applications are executed only when required. Generally, like in 
+this example, executions happen to write output images to disk. 
 
-To actually trigger the application execution, you need to write the result to disk:
+To actually trigger the application execution, `write()` has to be called:
 
 ```python
 resampled.write('output.tif')  # this is when the application actually runs
 ```
 
 ### Using Python keyword arguments
-It is also possible to use the Python keyword arguments notation for passing the parameters:
+
+One can use the Python keyword arguments notation for passing that parameters:
+
 ```python
 output = pyotb.SuperImpose(inr='reference_image.tif', inm='image.tif')
 ```
-is equivalent to:
+
+Which is equivalent to:
+
 ```python
 output = pyotb.SuperImpose({'inr': 'reference_image.tif', 'inm': 'image.tif'})
 ```
 
-Limitations : for this notation, python doesn't accept the parameter `in` or any parameter that contains a `.`. E.g., it is not possible to use `pyotb.RigidTransformResample(in=input_path...)` or `pyotb.VectorDataExtractROI(io.vd=vector_path...)`.
+!!! warning
+
+    For this notation, python doesn't accept the parameter `in` or any 
+    parameter that contains a dots (e.g. `io.in`). For `in` or other main 
+    input parameters of an OTB application, you may simply pass the value as 
+    first argument, pyotb will guess the parameter name. For parameters that 
+    contains dots, you can either use a dictionary, or replace dots (`.`) 
+    with underscores (`_`). 
+
+    Let's take the example of the `OrthoRectification` application of OTB, 
+    with the input image parameter named `io.in`:
+
+    Option #1, keyword-arg-free:
+
+    ```python
+    ortho = pyotb.OrthoRectification('my_image.tif')
+    ```
+    
+    Option #2, replacing dots with underscores in parameter name: 
+
+    ```python
+    ortho = pyotb.OrthoRectification(io_in='my_image.tif')
+    ``` 
 
 ## In-memory connections
-The big asset of pyotb is the ease of in-memory connections between apps.
 
-Let's start from our previous example. Consider the case where one wants to apply optical calibration and binary morphological dilatation 
-following the undersampling.
+One nice feature of pyotb is in-memory connection between apps. It relies on 
+the so-called [streaming](https://www.orfeo-toolbox.org/CookBook/C++/StreamingAndThreading.html)
+mechanism of OTB, that enables to process huge images with a limited memory 
+footprint.
+
+pyotb allows to pass any application's output to another. This enables to 
+build pipelines composed of several applications.
+
+Let's start from our previous example. Consider the case where one wants to 
+resample the image, then apply optical calibration and binary morphological 
+dilatation. We can write the following code to build a pipeline that will
+generate the output in an end-to-end fashion, without being limited with the 
+input image size or writing temporary files.
 
-Using pyotb, you can pass the output of an app as input of another app :
 ```python
 import pyotb
 
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear', 
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
+resampled = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear',
+    'transform.type.id.scaley': 0.5, 
+    'transform.type.id.scalex': 0.5
+})
+
+calibrated = pyotb.OpticalCalibration({
+    'in': resampled, 
+    'level': 'toa'
+}) 
+
+dilated = pyotb.BinaryMorphologicalOperation({
+    'in': calibrated, 
+    'out': 'output.tif', 
+    'filter': 'dilate',
+    'structype': 'ball', 
+    'xradius': 3, 
+    'yradius': 3
+})
+```
 
-calibrated = pyotb.OpticalCalibration({'in': resampled, 'level': 'toa'}) 
+We just have built our first pipeline! At this point, it's all symbolic since 
+no computation has been performed. To trigger our pipeline, one must call the 
+`write()` method from the pipeline termination:
 
-dilated = pyotb.BinaryMorphologicalOperation({'in': calibrated, 'out': 'output.tif', 'filter': 'dilate', 
-                                              'structype': 'ball', 'xradius': 3, 'yradius': 3})
-dilated.write('result.tif')
+```python
+dilated.write('output.tif')
 ```
 
+In the next section, we will detail how `write()` works. 
+
 ## Writing the result of an app
-Any pyotb object can be written to disk using the `write` method, e.g. :
+
+Any pyotb object can be written to disk using `write()`.
+
+Let's consider the following pyotb application instance:
 
 ```python
 import pyotb
+resampled = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear',
+    'transform.type.id.scaley': 0.5,
+    'transform.type.id.scalex': 0.5
+})
+```
+
+We can then write the output of `resampled` as following:
 
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear',
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-# Here you can set optionally pixel type and extended filename variables
-resampled.write({'out': 'output.tif'}, pixel_type='uint16', filename_extension='?nodata=65535')
+```python
+resampled.write('output.tif')
 ```
 
-Another possibility for writing results is to set the output parameter when initializing the application:
+!!! note
+
+    For applications that have multiple outputs, passing a `dict` of filenames 
+    can be considered. Let's take the example of `MeanShiftSmoothing` which 
+    has 2 output images:
+
+    ```python
+    import pyotb
+    meanshift = pyotb.MeanShiftSmoothing('my_image.tif')
+    meanshift.write({'fout': 'output_1.tif', 'foutpos': 'output_2.tif'})
+    ```
+
+Another possibility for writing results is to set the output parameter when 
+initializing the application:
+
 ```python
 import pyotb
 
-resampled = pyotb.RigidTransformResample({'in': 'my_image.tif', 'interpolator': 'linear', 'out': 'output.tif',
-                                          'transform.type.id.scaley': 0.5, 'transform.type.id.scalex': 0.5})
-# Here you can set optionally pixel type and extended filename variables
-resampled.write(pixel_type='uint16', filename_extension='?nodata=65535')
-```
\ No newline at end of file
+resampled = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear', 
+    'out': 'output.tif',
+    'transform.type.id.scaley': 0.5,
+    'transform.type.id.scalex': 0.5
+})
+```
+
+### Pixel type
+
+Setting the pixel type is optional, and can be achieved setting the 
+`pixel_type` argument: 
+
+```python
+resampled.write('output.tif', pixel_type='uint16')
+```
+
+The value of `pixel_type` corresponds to the name of a pixel type from OTB 
+applications (e.g. `'uint8'`, `'float'`, etc).
+
+### Extended filenames
+
+Extended filenames can be passed as `str` or `dict`.
+
+As `str`:
+
+```python
+resampled.write(
+    ...
+    ext_fname='nodata=65535&box=0:0:256:256'
+)
+```
+
+As `dict`:
+
+```python
+resampled.write(
+    ...
+    ext_fname={'nodata': '65535', 'box': '0:0:256:256'}
+)
+```
+
+!!! info
+
+    When `ext_fname` is provided and the output filenames contain already some 
+    extended filename pattern, the ones provided in the filenames take 
+    priority over the ones passed in `ext_fname`. This allows to fine-grained 
+    tune extended filenames for each output, with a common extended filenames 
+    keys/values basis.
diff --git a/doc/stylesheets/extra.css b/doc/stylesheets/extra.css
deleted file mode 100644
index c67f70f85c74bbb5ffdb34443d58881964a6611a..0000000000000000000000000000000000000000
--- a/doc/stylesheets/extra.css
+++ /dev/null
@@ -1,4 +0,0 @@
-/* this is for readthedocs theme */
-.wy-nav-content {
-    max-width: 1000px;
-}
\ No newline at end of file
diff --git a/doc/summarize.md b/doc/summarize.md
new file mode 100644
index 0000000000000000000000000000000000000000..13f30214b9e5c3a53962e6e3765ea40f2388423a
--- /dev/null
+++ b/doc/summarize.md
@@ -0,0 +1,167 @@
+## Summarize applications
+
+pyotb enables to summarize applications as a dictionary with keys/values for 
+parameters. This feature can be used to keep track of a process, composed of 
+multiple applications chained together.
+
+### Single application
+
+Let's take the example of one single application.
+
+```python
+import pyotb
+
+app = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear',
+    'transform.type.id.scaley': 0.5,
+    'transform.type.id.scalex': 0.5
+})
+```
+
+The application can be summarized using `pyotb.summarize()` or 
+`app.summary()`, which are equivalent.
+
+```python
+print(app.summarize())
+```
+
+Results in the following (lines have been pretty printed for the sake of 
+documentation):
+
+```json lines
+{
+  'name': 'RigidTransformResample', 
+  'parameters': {
+    'transform.type': 'id', 
+    'in': 'my_image.tif', 
+    'interpolator': 'linear', 
+    'transform.type.id.scaley': 0.5, 
+    'transform.type.id.scalex': 0.5
+  }
+}
+```
+
+Note that we can also summarize an application after it has been executed:
+
+```python
+app.write('output.tif', pixel_type='uint16')
+print(app.summarize())
+```
+
+Which results in the following:
+
+```json lines
+{
+  'name': 'RigidTransformResample', 
+  'parameters': {
+    'transform.type': 'id',
+    'in': 'my_image.tif', 
+    'interpolator': 'linear', 
+    'transform.type.id.scaley': 0.5, 
+    'transform.type.id.scalex': 0.5, 
+    'out': 'output.tif'
+  }
+}
+```
+
+Now `'output.tif'` has been added to the application parameters.
+
+### Multiple applications chained together (pipeline)
+
+When multiple applications are chained together, the summary of the last 
+application will describe all upstream processes.
+
+```python
+import pyotb
+
+app1 = pyotb.RigidTransformResample({
+    'in': 'my_image.tif', 
+    'interpolator': 'linear',
+    'transform.type.id.scaley': 0.5,
+    'transform.type.id.scalex': 0.5
+})
+app2 = pyotb.Smoothing(app1)
+print(app2.summarize())
+```
+
+Results in:
+
+```json lines
+{
+  'name': 'Smoothing', 
+  'parameters': {
+    'type': 'anidif', 
+    'type.anidif.timestep': 0.125, 
+    'type.anidif.nbiter': 10, 
+    'type.anidif.conductance': 1.0, 
+    'in': {
+      'name': 'RigidTransformResample', 
+      'parameters': {
+        'transform.type': 'id', 
+        'in': 'my_image.tif', 
+        'interpolator': 'linear', 
+        'transform.type.id.scaley': 0.5, 
+        'transform.type.id.scalex': 0.5
+      }
+    }
+  }
+}
+```
+
+### Remote files URL stripping
+
+Cloud-based raster URLs often include tokens or random strings resulting from 
+the URL signing.
+Those can be removed from the summarized paths, using the `strip_inpath` 
+and/or `strip_outpath` arguments respectively for inputs and/or outputs.
+
+Here is an example with Microsoft Planetary Computer:
+
+```python
+import planetary_computer
+import pyotb
+
+url = (
+    "https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/31/N/EA/2023/"
+    "11/03/S2A_MSIL2A_20231103T095151_N0509_R079_T31NEA_20231103T161409.SAFE/"
+    "GRANULE/L2A_T31NEA_A043691_20231103T100626/IMG_DATA/R10m/T31NEA_20231103"
+    "T095151_B02_10m.tif"
+)
+signed_url = planetary_computer.sign_inplace(url)
+app = pyotb.Smoothing(signed_url)
+```
+
+By default, the summary does not strip the URL.
+
+```python
+print(app.summarize()["parameters"]["in"])
+```
+
+This results in:
+
+```
+/vsicurl/https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/31/N/EA/...
+2023/11/03/S2A_MSIL2A_20231103T095151_N0509_R079_T31NEA_20231103T161409.SAFE...
+/GRANULE/L2A_T31NEA_A043691_20231103T100626/IMG_DATA/R10m/T31NEA_20231103T...
+095151_B02_10m.tif?st=2023-11-07T15%3A52%3A47Z&se=2023-11-08T16%3A37%3A47Z&...
+sp=rl&sv=2021-06-08&sr=c&skoid=c85c15d6-d1ae-42d4-af60-e2ca0f81359b&sktid=...
+72f988bf-86f1-41af-91ab-2d7cd011db47&skt=2023-11-08T11%3A41%3A41Z&ske=2023-...
+11-15T11%3A41%3A41Z&sks=b&skv=2021-06-08&sig=xxxxxxxxxxx...xxxxx
+```
+
+Now we can strip the URL to keep only the resource identifier and get rid of 
+the token:
+
+```python
+print(app.summarize(strip_inpath=True)["parameters"]["in"])
+```
+
+Which now results in:
+
+```
+/vsicurl/https://sentinel2l2a01.blob.core.windows.net/sentinel2-l2/31/N/EA/...
+2023/11/03/S2A_MSIL2A_20231103T095151_N0509_R079_T31NEA_20231103T161409.SAFE...
+/GRANULE/L2A_T31NEA_A043691_20231103T100626/IMG_DATA/R10m/T31NEA_20231103T...
+095151_B02_10m.tif
+```
\ No newline at end of file
diff --git a/doc/troubleshooting.md b/doc/troubleshooting.md
index e6b0b4c375e6c862d31b37d636cb86047ce36700..9cf5b84eff67fba64b7edb622a4214d87a562578 100644
--- a/doc/troubleshooting.md
+++ b/doc/troubleshooting.md
@@ -1,14 +1,42 @@
-## Troubleshooting: Known limitations
+# Troubleshooting
 
-### Failure of intermediate writing
+## Migration from pyotb 1.5.4 (oct 2022) to 2.x.y
 
-When chaining applications in-memory, there may be some problems when writing intermediate results, depending on the order
+List of breaking changes:
+
+- `otbObject` has ben renamed `OTBObject`
+- `otbObject.get_infos()` has been renamed `OTBObject.get_info()`
+- `otbObject.key_output_image` has been renamed `OTBObject.output_image_key`
+- `otbObject.key_input_image` has been renamed `OTBObject.input_image_key`
+- `otbObject.read_values_at_coords()` has been renamed `OTBObject.get_values_at_coords()`
+- `otbObject.xy_to_rowcol()` has been renamed `OTBObject.get_rowcol_from_xy()`
+- `App.output_param` has been replaced with `App.output_image_key`
+- `App.write()` argument `filename_extension` has been renamed `ext_fname`
+- `App.save_objects()` has been renamed `App.__sync_parameters()`
+- use `pyotb_app['paramname']` or `pyotb_app.app.GetParameterValue('paramname')` instead of `pyotb_app.GetParameterValue('paramname')` to access parameter `paramname` value
+- use `pyotb_app['paramname']` instead of `pyotb_app.paramname` to access parameter `paramname` value
+- `Output.__init__()` arguments `app` and `output_parameter_key` have been renamed `pyotb_app` and `param_key`
+- `Output.pyotb_app` has been renamed `Output.parent_pyotb_app`
+- `logicalOperation` has been renamed `LogicalOperation`
+
+## Known limitations with old versions
+
+!!! note
+
+    All defects described below have been fixed since OTB 8.1.2 and pyotb 2.0.0
+
+### Failure of intermediate writing (otb < 8.1, pyotb < 1.5.4)
+
+When chaining applications in-memory, there may be some problems when writing 
+intermediate results, depending on the order
 the writings are requested. Some examples can be found below:
 
 #### Example of failures involving slicing
 
-For some applications (non-exhaustive know list: OpticalCalibration, DynamicConvert, BandMath), we can face unexpected 
-failures when using channels slicing
+For some applications (non-exhaustive know list: OpticalCalibration, 
+DynamicConvert, BandMath), we can face unexpected failures when using channels 
+slicing
+
 ```python
 import pyotb
 
@@ -28,6 +56,7 @@ one_band.write('one_band.tif')  # Failure here
 ```
 
 When writing is triggered right after the application declaration, no problem occurs:
+
 ```python
 import pyotb
 
@@ -51,11 +80,10 @@ inp.write('stretched.tif')
 one_band.write('one_band.tif')
 ```
 
-
 #### Example of failures involving arithmetic operation
 
-One can meet errors when using arithmetic operations at the end of a pipeline when DynamicConvert, BandMath or
-OpticalCalibration is involved:
+One can meet errors when using arithmetic operations at the end of a pipeline 
+when DynamicConvert, BandMath or OpticalCalibration is involved:
 
 ```python
 import pyotb
diff --git a/mkdocs.yml b/mkdocs.yml
index 56542796967f59adaed72babd6de011ec9afd6cd..88589b11d42843d02b111f71b1d0e20546412d3e 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -55,21 +55,27 @@ extra:
     - icon: fontawesome/brands/gitlab
       link: https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb
 extra_css:
-  - stylesheets/extra.css
+  - https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/8.1.2-rc1/Documentation/Cookbook/_static/css/otb_theme.css
+  - extra.css
 use_directory_urls: false  # this creates some pyotb/core.html pages instead of pyotb/core/index.html
 
 markdown_extensions:
+  - admonition
   - toc:
       permalink: true
       title: On this page
       toc_depth: 1-2
   - pymdownx.highlight:
       anchor_linenums: true
-  - pymdownx.inlinehilite
-  - pymdownx.snippets
+  - pymdownx.details
+  - pymdownx.superfences:
+     custom_fences:
+       - name: python
+         class: python
+         format: !!python/name:pymdownx.superfences.fence_code_format
 
-# Rest of the navigation..
-site_name: "pyotb documentation: a Python extension of OTB"
+# Rest of the navigation.
+site_name: "pyotb: Orfeo ToolBox for Python"
 repo_url: https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb
 repo_name: pyotb
 docs_dir: doc/
diff --git a/pyotb/__init__.py b/pyotb/__init__.py
index 187b64a3eab5232464472d46eeca1e1f0ca9f161..5b455abe8a07775d7b5e865053e6641867bd246d 100644
--- a/pyotb/__init__.py
+++ b/pyotb/__init__.py
@@ -1,8 +1,25 @@
 # -*- coding: utf-8 -*-
 """This module provides convenient python wrapping of otbApplications."""
-__version__ = "1.5.4"
+__version__ = "2.0.0"
 
-from .apps import *
-from .core import App, Output, Input, get_nbchannels, get_pixel_type
-from .functions import *  # pylint: disable=redefined-builtin
+from .install import install_otb
 from .helpers import logger, set_logger_level
+from .core import (
+    OTBObject,
+    App,
+    Input,
+    Output,
+    get_nbchannels,
+    get_pixel_type,
+    summarize,
+)
+from .apps import *
+
+from .functions import (  # pylint: disable=redefined-builtin
+    all,
+    any,
+    clip,
+    define_processing_area,
+    run_tf_function,
+    where,
+)
diff --git a/pyotb/apps.py b/pyotb/apps.py
index e5287bbd21a30326b0c9a8341d1d9bb0fa74d3f4..45e49c4788837bfce7bc09091cd2c976a02aa8b3 100644
--- a/pyotb/apps.py
+++ b/pyotb/apps.py
@@ -1,85 +1,38 @@
-# -*- coding: utf-8 -*-
 """Search for OTB (set env if necessary), subclass core.App for each available application."""
+from __future__ import annotations
+
 import os
-import sys
-from pathlib import Path
 
-from .helpers import logger, find_otb
+import otbApplication as otb  # pylint: disable=import-error
 
-otb = find_otb()
+from .core import App
+from .helpers import logger
 
 
-def get_available_applications(as_subprocess=False):
+def get_available_applications() -> tuple[str]:
     """Find available OTB applications.
 
-    Args:
-        as_subprocess: indicate if function should list available applications using subprocess call
-
     Returns:
         tuple of available applications
 
+    Raises:
+        SystemExit: if no application is found
+
     """
-    app_list = ()
-    if as_subprocess and sys.executable:
-        # Currently, there is an incompatibility between OTBTF and Tensorflow that causes segfault
-        # when OTBTF apps are used in a script where tensorflow has already been imported.
-        # See https://github.com/remicres/otbtf/issues/28
-        # Thus, we run this piece of code in a clean independent `subprocess` that doesn't interact with Tensorflow
-        env = os.environ.copy()
-        if "PYTHONPATH" not in env:
-            env["PYTHONPATH"] = ""
-        env["PYTHONPATH"] = ":" + str(Path(otb.__file__).parent)
-        env["OTB_LOGGER_LEVEL"] = "CRITICAL"  # in order to suppress warnings while listing applications
-        pycmd = "import otbApplication; print(otbApplication.Registry.GetAvailableApplications())"
-        cmd_args = [sys.executable, "-c", pycmd]
-
-        try:
-            import subprocess  # pylint: disable=import-outside-toplevel
-            params = {"env": env, "stdout": subprocess.PIPE, "stderr": subprocess.PIPE}
-            with subprocess.Popen(cmd_args, **params) as p:
-                logger.debug('Exec "%s \'%s\'"', ' '.join(cmd_args[:-1]), pycmd)
-                stdout, stderr = p.communicate()
-                stdout, stderr = stdout.decode(), stderr.decode()
-                # ast.literal_eval is secure and will raise more handy Exceptions than eval
-                from ast import literal_eval  # pylint: disable=import-outside-toplevel
-                app_list = literal_eval(stdout.strip())
-                assert isinstance(app_list, (tuple, list))
-        except subprocess.SubprocessError:
-            logger.debug("Failed to call subprocess")
-        except (ValueError, SyntaxError, AssertionError):
-            logger.debug("Failed to decode output or convert to tuple:\nstdout=%s\nstderr=%s", stdout, stderr)
-
-        if not app_list:
-            logger.info("Failed to list applications in an independent process. Falling back to local python import")
-    # Find applications using the normal way
-    if not app_list:
-        app_list = otb.Registry.GetAvailableApplications()
-    if not app_list:
-        logger.warning("Unable to load applications. Set env variable OTB_APPLICATION_PATH then try again")
-        return ()
-
-    logger.info("Successfully loaded %s OTB applications", len(app_list))
-    return app_list
-
-
-AVAILABLE_APPLICATIONS = get_available_applications(as_subprocess=True)
-
-# First core.py call (within __init__ scope)
-from .core import App  # pylint: disable=wrong-import-position
-
-# This is to enable aliases of Apps, i.e. using apps like `pyotb.AppName(...)` instead of `pyotb.App("AppName", ...)`
-_CODE_TEMPLATE = """
-class {name}(App):
-    """ """
-    def __init__(self, *args, **kwargs):
-        super().__init__('{name}', *args, **kwargs)
-"""
+    app_list = otb.Registry.GetAvailableApplications()
+    if app_list:
+        logger.info("Successfully loaded %s OTB applications", len(app_list))
+        return app_list
+    raise SystemExit(
+        "Unable to load applications. Set env variable OTB_APPLICATION_PATH and try again."
+    )
 
 
 class OTBTFApp(App):
-    """Helper for OTBTF."""
+    """Helper for OTBTF to ensure the nb_sources variable is set."""
+
     @staticmethod
-    def set_nb_sources(*args, n_sources=None):
+    def set_nb_sources(*args, n_sources: int = None):
         """Set the number of sources of TensorflowModelServe. Can be either user-defined or deduced from the args.
 
         Args:
@@ -89,52 +42,46 @@ class OTBTFApp(App):
 
         """
         if n_sources:
-            os.environ['OTB_TF_NSOURCES'] = str(int(n_sources))
+            os.environ["OTB_TF_NSOURCES"] = str(int(n_sources))
         else:
             # Retrieving the number of `source#.il` parameters
-            params_dic = {k: v for arg in args if isinstance(arg, dict) for k, v in arg.items()}
-            n_sources = len([k for k in params_dic if 'source' in k and k.endswith('.il')])
+            params_dic = {
+                k: v for arg in args if isinstance(arg, dict) for k, v in arg.items()
+            }
+            n_sources = len(
+                [k for k in params_dic if "source" in k and k.endswith(".il")]
+            )
             if n_sources >= 1:
-                os.environ['OTB_TF_NSOURCES'] = str(n_sources)
+                os.environ["OTB_TF_NSOURCES"] = str(n_sources)
 
-    def __init__(self, app_name, *args, n_sources=None, **kwargs):
+    def __init__(self, name: str, *args, n_sources: int = None, **kwargs):
         """Constructor for an OTBTFApp object.
 
         Args:
-            app_name: name of the OTBTF app
-            *args: arguments (dict). NB: we don't need kwargs because it cannot contain source#.il
+            name: name of the OTBTF app
             n_sources: number of sources. Default is None (resolves the number of sources based on the
                        content of the dict passed in args, where some 'source' str is found)
-            **kwargs: kwargs
+
         """
         self.set_nb_sources(*args, n_sources=n_sources)
-        super().__init__(app_name, *args, **kwargs)
+        super().__init__(name, *args, **kwargs)
 
 
-for _app in AVAILABLE_APPLICATIONS:
-    # Customize the behavior for some OTBTF applications. The user doesn't need to set the env variable
-    # `OTB_TF_NSOURCES`, it is handled in pyotb
-    if _app == 'TensorflowModelServe':
-        class TensorflowModelServe(OTBTFApp):
-            """Serve a Tensorflow model using OTBTF."""
-            def __init__(self, *args, n_sources=None, **kwargs):
-                """Constructor for a TensorflowModelServe object."""
-                super().__init__('TensorflowModelServe', *args, n_sources=n_sources, **kwargs)
-
-    elif _app == 'PatchesExtraction':
-        class PatchesExtraction(OTBTFApp):
-            """Extract patches using OTBTF."""
-            def __init__(self, *args, n_sources=None, **kwargs):
-                """Constructor for a PatchesExtraction object."""
-                super().__init__('PatchesExtraction', *args, n_sources=n_sources, **kwargs)
-
-    elif _app == 'TensorflowModelTrain':
-        class TensorflowModelTrain(OTBTFApp):
-            """Train a Tensorflow model using OTBTF."""
-            def __init__(self, *args, n_sources=None, **kwargs):
-                """Constructor for a TensorflowModelTrain object."""
-                super().__init__('TensorflowModelTrain', *args, n_sources=n_sources, **kwargs)
+AVAILABLE_APPLICATIONS = get_available_applications()
 
+# This is to enable aliases of Apps, i.e. `pyotb.AppName(...)` instead of `pyotb.App("AppName", ...)`
+_CODE_TEMPLATE = """
+class {name}(App):
+    def __init__(self, *args, **kwargs):
+        super().__init__('{name}', *args, **kwargs)
+"""
+
+for _app in AVAILABLE_APPLICATIONS:
+    # Customize the behavior for some OTBTF applications. `OTB_TF_NSOURCES` is now handled by pyotb
+    if _app in ("PatchesExtraction", "TensorflowModelTrain", "TensorflowModelServe"):
+        exec(  # pylint: disable=exec-used
+            _CODE_TEMPLATE.format(name=_app).replace("(App)", "(OTBTFApp)")
+        )
     # Default behavior for any OTB application
     else:
         exec(_CODE_TEMPLATE.format(name=_app))  # pylint: disable=exec-used
diff --git a/pyotb/core.py b/pyotb/core.py
index 5b9a895b35e342bc16af2b5ab7334b269fa33e49..9cd9d4eeff26ce85ccd1eb63ffef30b3445e3935 100644
--- a/pyotb/core.py
+++ b/pyotb/core.py
@@ -1,480 +1,371 @@
-# -*- coding: utf-8 -*-
 """This module is the core of pyotb."""
+from __future__ import annotations
+
+import re
+from abc import ABC, abstractmethod
+from ast import literal_eval
 from pathlib import Path
+from time import perf_counter
+from typing import Any
 
 import numpy as np
-import otbApplication as otb
+import otbApplication as otb  # pylint: disable=import-error
 
 from .helpers import logger
+from .depreciation import deprecated_alias, depreciation_warning, deprecated_attr
 
 
-class otbObject:
-    """Base class that gathers common operations for any OTB in-memory raster."""
-    _name = ""
-    app = None
-    output_param = ""
+class OTBObject(ABC):
+    """Abstraction of an image object, for a whole app or one specific output."""
 
     @property
-    def name(self):
-        """Application name that will be printed in logs.
+    @abstractmethod
+    def name(self) -> str:
+        """Application name by default, but a custom name may be passed during init."""
 
-        Returns:
-            user's defined name or appname
+    @property
+    @abstractmethod
+    def app(self) -> otb.Application:
+        """Reference to the otb.Application instance linked to this object."""
 
-        """
-        return self._name or self.app.GetName()
+    @property
+    @abstractmethod
+    def output_image_key(self) -> str:
+        """Return the name of a parameter key associated to the main output image of the object."""
+
+    @property
+    @deprecated_attr(replacement="output_image_key")
+    def output_param(self) -> str:
+        """Return the name of a parameter key associated to the main output image of the object (deprecated)."""
 
-    @name.setter
-    def name(self, val):
-        """Set custom name.
+    @property
+    @abstractmethod
+    def exports_dic(self) -> dict[str, dict]:
+        """Ref to an internal dict of np.array exports, to avoid duplicated ExportImage()."""
 
-        Args:
-          val: new name
+    @property
+    def metadata(self) -> dict[str, (str, float, list[float])]:
+        """Return image metadata as dictionary.
+
+        The returned dict results from the concatenation of the first output
+        image metadata dictionary and the metadata dictionary.
 
         """
-        self._name = val
+        # Image Metadata
+        otb_imd = self.app.GetImageMetadata(self.output_image_key)
+        cats = ["Num", "Str", "L1D", "Time"]
+        imd = {
+            key: getattr(otb_imd, f"get_{cat.lower()}")(key)
+            for cat in cats
+            for key in getattr(otb_imd, f"GetKeyList{cat}")().split(" ")
+            if getattr(otb_imd, "has")(key)
+        }
+
+        # Other metadata dictionary: key-value pairs parsing is required
+        mdd = dict(self.app.GetMetadataDictionary(self.output_image_key))
+        new_mdd = {}
+        for key, val in mdd.items():
+            new_key = key
+            new_val = val
+            if isinstance(val, str):
+                splits = val.split("=")
+                if key.lower().startswith("metadata_") and len(splits) == 2:
+                    new_key = splits[0].strip()
+                    new_val = splits[1].strip()
+            new_mdd[new_key] = new_val
+
+        return {**new_mdd, **imd}
 
     @property
-    def dtype(self):
-        """Expose the pixel type of an output image using numpy convention.
+    def dtype(self) -> np.dtype:
+        """Expose the pixel type of output image using numpy convention.
 
         Returns:
             dtype: pixel type of the output image
 
         """
-        try:
-            enum = self.app.GetParameterOutputImagePixelType(self.output_param)
-            return self.app.ConvertPixelTypeToNumpy(enum)
-        except RuntimeError:
-            return None
+        enum = self.app.GetParameterOutputImagePixelType(self.output_image_key)
+        return self.app.ConvertPixelTypeToNumpy(enum)
 
     @property
-    def shape(self):
+    def shape(self) -> tuple[int]:
         """Enables to retrieve the shape of a pyotb object using numpy convention.
 
         Returns:
             shape: (height, width, bands)
 
         """
-        width, height = self.app.GetImageSize(self.output_param)
-        bands = self.app.GetImageNbBands(self.output_param)
-        return (height, width, bands)
-
-    def write(self, *args, filename_extension="", pixel_type=None, **kwargs):
-        """Trigger execution, set output pixel type and write the output.
-
-        Args:
-            *args: Can be : - dictionary containing key-arguments enumeration. Useful when a key contains
-                              non-standard characters such as a point, e.g. {'io.out':'output.tif'}
-                            - string, useful when there is only one output, e.g. 'output.tif'
-                            - None if output file was passed during App init
-            filename_extension: Optional, an extended filename as understood by OTB (e.g. "&gdal:co:TILED=YES")
-                                Will be used for all outputs (Default value = "")
-            pixel_type: Can be : - dictionary {output_parameter_key: pixeltype} when specifying for several outputs
-                                 - str (e.g. 'uint16') or otbApplication.ImagePixelType_... When there are several
-                                   outputs, all outputs are written with this unique type.
-                                   Valid pixel types are uint8, uint16, uint32, int16, int32, float, double,
-                                   cint16, cint32, cfloat, cdouble. (Default value = None)
-            **kwargs: keyword arguments e.g. out='output.tif'
-        """
-        # Gather all input arguments in kwargs dict
-        for arg in args:
-            if isinstance(arg, dict):
-                kwargs.update(arg)
-            elif isinstance(arg, str) and kwargs:
-                logger.warning('%s: keyword arguments specified, ignoring argument "%s"', self.name, arg)
-            elif isinstance(arg, str):
-                kwargs.update({self.output_param: arg})
-
-        dtypes = {}
-        if isinstance(pixel_type, dict):
-            dtypes = {k: parse_pixel_type(v) for k, v in pixel_type.items()}
-        elif pixel_type is not None:
-            typ = parse_pixel_type(pixel_type)
-            if isinstance(self, App):
-                dtypes = {key: typ for key in self.output_parameters_keys}
-            elif isinstance(self, otbObject):
-                dtypes = {self.output_param: typ}
-
-        if filename_extension:
-            logger.debug('%s: using extended filename for outputs: %s', self.name, filename_extension)
-            if not filename_extension.startswith('?'):
-                filename_extension = "?" + filename_extension
-
-        # Case output parameter was set during App init
-        if not kwargs:
-            if self.output_param in self.parameters:
-                if dtypes:
-                    self.app.SetParameterOutputImagePixelType(self.output_param, dtypes[self.output_param])
-                if filename_extension:
-                    new_val = self.parameters[self.output_param] + filename_extension
-                    self.app.SetParameterString(self.output_param, new_val)
-            else:
-                raise ValueError(f'{self.app.GetName()}: Output parameter is missing.')
-
-        # Parse kwargs
-        for key, output_filename in kwargs.items():
-            # Stop process if a bad parameter is given
-            if key not in self.app.GetParametersKeys():
-                raise KeyError(f'{self.app.GetName()}: Unknown parameter key "{key}"')
-            # Check if extended filename was not provided twice
-            if '?' in output_filename and filename_extension:
-                logger.warning('%s: extended filename was provided twice. Using the one found in path.', self.name)
-            elif filename_extension:
-                output_filename += filename_extension
-
-            logger.debug('%s: "%s" parameter is %s', self.name, key, output_filename)
-            self.app.SetParameterString(key, output_filename)
-
-            if key in dtypes:
-                self.app.SetParameterOutputImagePixelType(key, dtypes[key])
-
-        logger.debug('%s: flushing data to disk', self.name)
-        try:
-            self.app.WriteOutput()
-        except RuntimeError:
-            logger.debug('%s: failed to simply write output, executing once again then writing', self.name)
-            self.app.ExecuteAndWriteOutput()
+        width, height = self.app.GetImageSize(self.output_image_key)
+        bands = self.app.GetImageNbBands(self.output_image_key)
+        return height, width, bands
 
-    def to_numpy(self, preserve_dtype=True, copy=False):
-        """Export a pyotb object to numpy array.
+    @property
+    def transform(self) -> tuple[int]:
+        """Get image affine transform, rasterio style.
 
-        Args:
-            preserve_dtype: when set to True, the numpy array is created with the same pixel type as
-                                  the otbObject first output. Default is True.
-            copy: whether to copy the output array, default is False
-                  required to True if preserve_dtype is False and the source app reference is lost
+        See https://www.perrygeo.com/python-affine-transforms.html
 
         Returns:
-          a numpy array
-
+            transform: (X spacing, X offset, X origin, Y offset, Y spacing, Y origin)
         """
-        array = self.app.ExportImage(self.output_param)['array']
-        if preserve_dtype:
-            return array.astype(self.dtype)
-        if copy:
-            return array.copy()
-        return array
-
-    def to_rasterio(self):
-        """Export image as a numpy array and its metadata compatible with rasterio.
-
-        Returns:
-          array : a numpy array in the (bands, height, width) order
-          profile: a metadata dict required to write image using rasterio
-
-        """
-        array = self.to_numpy(preserve_dtype=True, copy=False)
-        array = np.moveaxis(array, 2, 0)
-        proj = self.app.GetImageProjection(self.output_param)
-        spacing_x, spacing_y = self.app.GetImageSpacing(self.output_param)
-        origin_x, origin_y = self.app.GetImageOrigin(self.output_param)
+        spacing_x, spacing_y = self.app.GetImageSpacing(self.output_image_key)
+        origin_x, origin_y = self.app.GetImageOrigin(self.output_image_key)
         # Shift image origin since OTB is giving coordinates of pixel center instead of corners
         origin_x, origin_y = origin_x - spacing_x / 2, origin_y - spacing_y / 2
-        profile = {
-            'crs': proj, 'dtype': array.dtype,
-            'count': array.shape[0], 'height': array.shape[1], 'width': array.shape[2],
-            'transform': (spacing_x, 0.0, origin_x, 0.0, spacing_y, origin_y)  # here we force pixel rotation to 0 !
-        }
-        return array, profile
+        return spacing_x, 0.0, origin_x, 0.0, spacing_y, origin_y
 
-    # Special methods
-    def __getitem__(self, key):
-        """Override the default __getitem__ behaviour.
-
-        This function enables 2 things :
-        - access attributes like that : object['any_attribute']
-        - slicing, i.e. selecting ROI/bands. For example, selecting first 3 bands: object[:, :, :3]
-                                                          selecting bands 1, 2 & 5 : object[:, :, [0, 1, 4]]
-                                                          selecting 1000x1000 subset : object[:1000, :1000]
+    def summarize(self, *args, **kwargs):
+        """Recursively summarize an app parameters and its parents.
 
         Args:
-            key: attribute key
+            *args: args for `pyotb.summarize()`
+            **kwargs: keyword args for `pyotb.summarize()`
 
         Returns:
-            attribute or Slicer
-        """
-        # Accessing string attributes
-        if isinstance(key, str):
-            return self.__dict__.get(key)
-        # Slicing
-        if not isinstance(key, tuple) or (isinstance(key, tuple) and (len(key) < 2 or len(key) > 3)):
-            raise ValueError(f'"{key}"cannot be interpreted as valid slicing. Slicing should be 2D or 3D.')
-        if isinstance(key, tuple) and len(key) == 2:
-            # Adding a 3rd dimension
-            key = key + (slice(None, None, None),)
-        (rows, cols, channels) = key
-        return Slicer(self, rows, cols, channels)
-
-    def __getattr__(self, name):
-        """This method is called when the default attribute access fails.
-
-        We choose to access the attribute `name` of self.app.
-        Thus, any method of otbApplication can be used transparently on otbObject objects,
-        e.g. SetParameterOutputImagePixelType() or ExportImage() work
-
-        Args:
-            name: attribute name
-
-        Returns:
-            attribute
-
-        Raises:
-            AttributeError: when `name` is not an attribute of self.app
-
-        """
-        try:
-            res = getattr(self.app, name)
-            return res
-        except AttributeError as e:
-            raise AttributeError(f'{self.name}: could not find attribute `{name}`') from e
-
-    def __add__(self, other):
-        """Overrides the default addition and flavours it with BandMathX.
-
-        Args:
-            other: the other member of the operation
-
-        Returns:
-             self + other
-
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('+', self, other)
-
-    def __sub__(self, other):
-        """Overrides the default subtraction and flavours it with BandMathX.
-
-        Args:
-            other: the other member of the operation
-
-        Returns:
-             self - other
+            app summary, same as `pyotb.summarize()`
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('-', self, other)
-
-    def __mul__(self, other):
-        """Overrides the default subtraction and flavours it with BandMathX.
+        return summarize(self, *args, **kwargs)
 
-        Args:
-            other: the other member of the operation
-
-        Returns:
-             self * other
+    def get_info(self) -> dict[str, (str, float, list[float])]:
+        """Return a dict output of ReadImageInfo for the first image output."""
+        return App("ReadImageInfo", self, quiet=True).data
 
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('*', self, other)
+    def get_statistics(self) -> dict[str, (str, float, list[float])]:
+        """Return a dict output of ComputeImagesStatistics for the first image output."""
+        return App("ComputeImagesStatistics", self, quiet=True).data
 
-    def __truediv__(self, other):
-        """Overrides the default subtraction and flavours it with BandMathX.
+    def get_values_at_coords(
+        self, row: int, col: int, bands: int | list[int] = None
+    ) -> list[float] | float:
+        """Get pixel value(s) at a given YX coordinates.
 
         Args:
-            other: the other member of the operation
+            row: index along Y / latitude axis
+            col: index along X / longitude axis
+            bands: band number(s) to fetch values from
 
         Returns:
-             self / other
-
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('/', self, other)
-
-    def __radd__(self, other):
-        """Overrides the default reverse addition and flavours it with BandMathX.
+            single numerical value or a list of values for each band
 
-        Args:
-            other: the other member of the operation
-
-        Returns:
-             other + self
+        Raises:
+            TypeError: if bands is not a slice or list
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('+', other, self)
-
-    def __rsub__(self, other):
-        """Overrides the default subtraction and flavours it with BandMathX.
+        channels = []
+        app = App("PixelValue", self, coordx=col, coordy=row, frozen=True, quiet=True)
+        if bands is not None:
+            if isinstance(bands, int):
+                if bands < 0:
+                    bands = self.shape[2] + bands
+                channels = [bands]
+            elif isinstance(bands, slice):
+                channels = self.channels_list_from_slice(bands)
+            elif not isinstance(bands, list):
+                raise TypeError(
+                    f"{self.name}: type '{type(bands)}' cannot be interpreted as a valid slicing"
+                )
+            if channels:
+                app.app.Execute()
+                app.set_parameters({"cl": [f"Channel{n + 1}" for n in channels]})
+        app.execute()
+        data = literal_eval(app.app.GetParameterString("value"))
+        return data[0] if len(channels) == 1 else data
+
+    def channels_list_from_slice(self, bands: slice) -> list[int]:
+        """Get list of channels to read values at, from a slice.
 
         Args:
-            other: the other member of the operation
+            bands: slice obtained when using app[:]
 
         Returns:
-             other - self
+            list of channels to select
 
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('-', other, self)
-
-    def __rmul__(self, other):
-        """Overrides the default multiplication and flavours it with BandMathX.
-
-        Args:
-            other: the other member of the operation
-
-        Returns:
-             other * self
+        Raises:
+            ValueError: if the slice is malformed
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('*', other, self)
-
-    def __rtruediv__(self, other):
-        """Overrides the default division and flavours it with BandMathX.
+        nb_channels = self.shape[2]
+        start, stop, step = bands.start, bands.stop, bands.step
+        start = nb_channels + start if isinstance(start, int) and start < 0 else start
+        stop = nb_channels + stop if isinstance(stop, int) and stop < 0 else stop
+        step = 1 if step is None else step
+        if start is not None and stop is not None:
+            return list(range(start, stop, step))
+        if start is not None and stop is None:
+            return list(range(start, nb_channels, step))
+        if start is None and stop is not None:
+            return list(range(0, stop, step))
+        if start is None and stop is None:
+            return list(range(0, nb_channels, step))
+        raise ValueError(
+            f"{self.name}: '{bands}' cannot be interpreted as valid slicing."
+        )
+
+    def export(
+        self, key: str = None, preserve_dtype: bool = True
+    ) -> dict[str, dict[str, np.ndarray]]:
+        """Export a specific output image as numpy array and store it in object exports_dic.
 
         Args:
-            other: the other member of the operation
-
-        Returns:
-             other / self
-
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return Operation('/', other, self)
-
-    def __abs__(self):
-        """Overrides the default abs operator and flavours it with BandMathX.
+            key: parameter key to export, if None then the default one will be used
+            preserve_dtype: convert the array to the same pixel type as the App first output
 
         Returns:
-            abs(self)
+            the exported numpy array
 
         """
-        return Operation('abs', self)
+        if key is None:
+            key = self.output_image_key
+        if key not in self.exports_dic:
+            self.exports_dic[key] = self.app.ExportImage(key)
+        if preserve_dtype:
+            self.exports_dic[key]["array"] = self.exports_dic[key]["array"].astype(
+                self.dtype
+            )
+        return self.exports_dic[key]
+
+    def to_numpy(
+        self, key: str = None, preserve_dtype: bool = True, copy: bool = False
+    ) -> np.ndarray:
+        """Export a pyotb object to numpy array.
 
-    def __ge__(self, other):
-        """Overrides the default greater or equal and flavours it with BandMathX.
+        A copy is avoided by default, but may be required if preserve_dtype is False
+         and the source app reference is lost.
 
         Args:
-            other: the other member of the operation
+            key: the output parameter name to export as numpy array
+            preserve_dtype:  convert the array to the same pixel type as the App first output
+            copy: whether to copy the output array instead of returning a reference
 
         Returns:
-             self >= other
+            a numpy array that may already have been cached in self.exports_dic
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('>=', self, other)
-
-    def __le__(self, other):
-        """Overrides the default less or equal and flavours it with BandMathX.
+        data = self.export(key, preserve_dtype)
+        return data["array"].copy() if copy else data["array"]
 
-        Args:
-            other: the other member of the operation
+    def to_rasterio(self) -> tuple[np.ndarray, dict[str, Any]]:
+        """Export image as a numpy array and its metadata compatible with rasterio.
 
         Returns:
-             self <= other
+          array : a numpy array in the (bands, height, width) order
+          profile: a metadata dict required to write image using rasterio
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('<=', self, other)
+        profile = {}
+        array = self.to_numpy(preserve_dtype=True, copy=False)
+        proj = self.app.GetImageProjection(self.output_image_key)
+        profile.update({"crs": proj, "dtype": array.dtype, "transform": self.transform})
+        height, width, count = array.shape
+        profile.update({"count": count, "height": height, "width": width})
+        return np.moveaxis(array, 2, 0), profile
 
-    def __gt__(self, other):
-        """Overrides the default greater operator and flavours it with BandMathX.
+    def get_rowcol_from_xy(self, x: float, y: float) -> tuple[int, int]:
+        """Find (row, col) index using (x, y) projected coordinates - image CRS is expected.
 
         Args:
-            other: the other member of the operation
+            x: longitude or projected X
+            y: latitude or projected Y
 
         Returns:
-             self > other
+            pixel index as (row, col)
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('>', self, other)
+        spacing_x, _, origin_x, _, spacing_y, origin_y = self.transform
+        row, col = (origin_y - y) / spacing_y, (x - origin_x) / spacing_x
+        return abs(int(row)), int(col)
 
-    def __lt__(self, other):
-        """Overrides the default less operator and flavours it with BandMathX.
+    @staticmethod
+    def __create_operator(op_cls, name, x, y) -> Operation:
+        """Create an operator.
 
         Args:
-            other: the other member of the operation
+            op_cls: Operator class
+            name: operator expression
+            x: first element
+            y: second element
 
         Returns:
-             self < other
+            an Operation object instance
 
         """
-        if isinstance(other, (np.ndarray, np.generic)):
+        if isinstance(y, (np.ndarray, np.generic)):
             return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('<', self, other)
+        return op_cls(name, x, y)
 
-    def __eq__(self, other):
-        """Overrides the default eq operator and flavours it with BandMathX.
+    def __add__(self, other: OTBObject | str | float) -> Operation:
+        """Addition."""
+        return self.__create_operator(Operation, "+", self, other)
 
-        Args:
-            other: the other member of the operation
+    def __sub__(self, other: OTBObject | str | float) -> Operation:
+        """Subtraction."""
+        return self.__create_operator(Operation, "-", self, other)
 
-        Returns:
-             self == other
+    def __mul__(self, other: OTBObject | str | float) -> Operation:
+        """Multiplication."""
+        return self.__create_operator(Operation, "*", self, other)
 
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('==', self, other)
+    def __truediv__(self, other: OTBObject | str | float) -> Operation:
+        """Division."""
+        return self.__create_operator(Operation, "/", self, other)
 
-    def __ne__(self, other):
-        """Overrides the default different operator and flavours it with BandMathX.
+    def __radd__(self, other: OTBObject | str | float) -> Operation:
+        """Right addition."""
+        return self.__create_operator(Operation, "+", other, self)
 
-        Args:
-            other: the other member of the operation
+    def __rsub__(self, other: OTBObject | str | float) -> Operation:
+        """Right subtraction."""
+        return self.__create_operator(Operation, "-", other, self)
 
-        Returns:
-             self != other
+    def __rmul__(self, other: OTBObject | str | float) -> Operation:
+        """Right multiplication."""
+        return self.__create_operator(Operation, "*", other, self)
 
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('!=', self, other)
+    def __rtruediv__(self, other: OTBObject | str | float) -> Operation:
+        """Right division."""
+        return self.__create_operator(Operation, "/", other, self)
 
-    def __or__(self, other):
-        """Overrides the default or operator and flavours it with BandMathX.
+    def __abs__(self) -> Operation:
+        """Absolute value."""
+        return Operation("abs", self)
 
-        Args:
-            other: the other member of the operation
+    def __ge__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Greater of equal than."""
+        return self.__create_operator(LogicalOperation, ">=", self, other)
 
-        Returns:
-             self || other
+    def __le__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Lower of equal than."""
+        return self.__create_operator(LogicalOperation, "<=", self, other)
 
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('||', self, other)
+    def __gt__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Greater than."""
+        return self.__create_operator(LogicalOperation, ">", self, other)
 
-    def __and__(self, other):
-        """Overrides the default and operator and flavours it with BandMathX.
+    def __lt__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Lower than."""
+        return self.__create_operator(LogicalOperation, "<", self, other)
 
-        Args:
-            other: the other member of the operation
-
-        Returns:
-             self && other
+    def __eq__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Equality."""
+        return self.__create_operator(LogicalOperation, "==", self, other)
 
-        """
-        if isinstance(other, (np.ndarray, np.generic)):
-            return NotImplemented  # this enables to fallback on numpy emulation thanks to __array_ufunc__
-        return logicalOperation('&&', self, other)
+    def __ne__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Inequality."""
+        return self.__create_operator(LogicalOperation, "!=", self, other)
 
-    # TODO: other operations ?
-    #  e.g. __pow__... cf https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
+    def __or__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Logical or."""
+        return self.__create_operator(LogicalOperation, "||", self, other)
 
-    def __hash__(self):
-        """Override the default behaviour of the hash function.
+    def __and__(self, other: OTBObject | str | float) -> LogicalOperation:
+        """Logical and."""
+        return self.__create_operator(LogicalOperation, "&&", self, other)
 
-        Returns:
-            self hash
-
-        """
-        return id(self)
+    # Some other operations could be implemented with the same pattern
+    # e.g. __pow__... cf https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
 
-    def __array__(self):
+    def __array__(self) -> np.ndarray:
         """This is called when running np.asarray(pyotb_object).
 
         Returns:
@@ -483,222 +374,580 @@ class otbObject:
         """
         return self.to_numpy()
 
-    def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+    def __array_ufunc__(self, ufunc, method, *inputs, **kwargs) -> App:
         """This is called whenever a numpy function is called on a pyotb object.
 
         Operation is performed in numpy, then imported back to pyotb with the same georeference as input.
+        At least one obj is unputs has to be an OTBObject.
 
         Args:
             ufunc: numpy function
             method: an internal numpy argument
-            inputs: inputs, at least one being pyotb object. If there are several pyotb objects, they must all have
-                    the same georeference and pixel size.
+            inputs: inputs, with equal shape in case of several images / OTBObject
             **kwargs: kwargs of the numpy function
 
         Returns:
             a pyotb object
 
         """
-        if method == '__call__':
+        if method == "__call__":
             # Converting potential pyotb inputs to arrays
             arrays = []
             image_dic = None
             for inp in inputs:
                 if isinstance(inp, (float, int, np.ndarray, np.generic)):
                     arrays.append(inp)
-                elif isinstance(inp, otbObject):
-                    image_dic = inp.app.ExportImage(self.output_param)
-                    array = image_dic['array']
-                    arrays.append(array)
+                elif isinstance(inp, OTBObject):
+                    image_dic = inp.export()
+                    arrays.append(image_dic["array"])
                 else:
-                    print(type(self))
+                    logger.debug(type(self))
                     return NotImplemented
-
             # Performing the numpy operation
             result_array = ufunc(*arrays, **kwargs)
             result_dic = image_dic
-            result_dic['array'] = result_array
-
-            # Importing back to OTB
-            app = App('ExtractROI', frozen=True, image_dic=result_dic)  # pass the result_dic just to keep reference
+            result_dic["array"] = result_array
+            # Importing back to OTB, pass the result_dic just to keep reference
+            pyotb_app = App("ExtractROI", frozen=True, quiet=True)
             if result_array.shape[2] == 1:
-                app.ImportImage('in', result_dic)
+                pyotb_app.app.ImportImage("in", result_dic)
             else:
-                app.ImportVectorImage('in', result_dic)
-            app.execute()
-            return app
-
+                pyotb_app.app.ImportVectorImage("in", result_dic)
+            pyotb_app.execute()
+            return pyotb_app
         return NotImplemented
 
-    def summarize(self):
-        """Return a nested dictionary summarizing the otbObject.
+    def __hash__(self) -> int:
+        """Override the default behaviour of the hash function.
 
         Returns:
-            Nested dictionary summarizing the otbObject
+            self hash
 
         """
-        params = self.parameters
-        for k, p in params.items():
-            # In the following, we replace each parameter which is an otbObject, with its summary.
-            if isinstance(p, otbObject):  # single parameter
-                params[k] = p.summarize()
-            elif isinstance(p, list):  # parameter list
-                params[k] = [pi.summarize() if isinstance(pi, otbObject) else pi for pi in p]
+        return id(self)
 
-        return {"name": self.name, "parameters": params}
+    def __getattr__(self, item: str):
+        """Provides depreciation of old methods to access the OTB application values.
 
+        This function will be removed completely in future releases.
 
-class App(otbObject):
-    """Class of an OTB app."""
-    def __init__(self, appname, *args, frozen=False, quiet=False,
-                 preserve_dtype=False, image_dic=None, **kwargs):
-        """Enables to init an OTB application as a oneliner. Handles in-memory connection between apps.
+        Args:
+            item: attribute name
+
+        """
+        note = (
+            "Since pyotb 2.0.0, OTBObject instances have stopped to forward "
+            "attributes to their own internal otbApplication instance. "
+            "`App.app` can be used to call otbApplications methods."
+        )
+        hint = None
+
+        if item in dir(self.app):
+            hint = f"Maybe try `pyotb_app.app.{item}` instead of `pyotb_app.{item}`? "
+            if item.startswith("GetParameter"):
+                hint += (
+                    "Note: `pyotb_app.app.GetParameterValue('paramname')` can be "
+                    "shorten with `pyotb_app['paramname']` to access parameters "
+                    "values."
+                )
+        elif item in self.parameters_keys:
+            # Because in pyotb 1.5.4, app outputs were added as instance attributes
+            hint = (
+                "Note: `pyotb_app.paramname` is no longer supported. Starting "
+                "from pyotb 2.0.0, `pyotb_app['paramname']` can be used to "
+                "access parameters values. "
+            )
+        if hint:
+            depreciation_warning(f"{note} {hint}")
+        raise AttributeError(
+            f"'{type(self).__name__}' object has no attribute '{item}'"
+        )
+
+    def __getitem__(self, key) -> Any | list[float] | float | Slicer:
+        """Override the default __getitem__ behaviour.
+
+        This function enables 2 things :
+            - slicing, i.e. selecting ROI/bands
+            - access pixel value(s) at a specified row, col index
 
         Args:
-            appname: name of the app, e.g. 'Smoothing'
-            *args: used for passing application parameters. Can be :
-                           - dictionary containing key-arguments enumeration. Useful when a key is python-reserved
-                             (e.g. "in") or contains reserved characters such as a point (e.g."mode.extent.unit")
-                           - string, App or Output, useful when the user wants to specify the input "in"
-                           - list, useful when the user wants to specify the input list 'il'
-            frozen: freeze OTB app in order to use execute() later and avoid blocking process during __init___
-            quiet: whether to print logs of the OTB app
-            preserve_dtype: propagate the pixel type from inputs to output. If several inputs, the type of an
-                                  arbitrary input is considered. If several outputs, all will have the same type.
-            image_dic: enables to keep a reference to image_dic. image_dic is a dictionary, such as
-                       the result of app.ExportImage(). Use it when the app takes a numpy array as input.
-                       See this related issue for why it is necessary to keep reference of object:
-                       https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/issues/1824
-            **kwargs: used for passing application parameters.
-                      e.g. il=['input1.tif', App_object2, App_object3.out], out='output.tif'
+            key: attribute key
+
+        Returns:
+            list of pixel values if vector image, or pixel value, or Slicer
+
+        Raises:
+            ValueError: if key is not a valid pixel index or slice
 
         """
-        self.appname = appname
-        self.frozen = frozen
-        self.quiet = quiet
-        self.preserve_dtype = preserve_dtype
-        self.image_dic = image_dic
-        if self.quiet:
-            self.app = otb.Registry.CreateApplicationWithoutLogger(appname)
-        else:
-            self.app = otb.Registry.CreateApplication(appname)
-        self.description = self.app.GetDocLongDescription()
-        self.output_parameters_keys = self.__get_output_parameters_keys()
-        if self.output_parameters_keys:
-            self.output_param = self.output_parameters_keys[0]
-
-        self.parameters = {}
-        if (args or kwargs):
+        # Accessing pixel value(s) using Y/X coordinates
+        if isinstance(key, tuple) and len(key) >= 2:
+            row, col = key[0], key[1]
+            if isinstance(row, int) and isinstance(col, int):
+                if row < 0 or col < 0:
+                    raise ValueError(
+                        f"{self.name} cannot read pixel value at negative coordinates ({row}, {col})"
+                    )
+                channels = key[2] if len(key) == 3 else None
+                return self.get_values_at_coords(row, col, channels)
+        # Slicing
+        if not isinstance(key, tuple) or (
+            isinstance(key, tuple) and (len(key) < 2 or len(key) > 3)
+        ):
+            raise ValueError(
+                f'"{key}" cannot be interpreted as valid slicing. Slicing should be 2D or 3D.'
+            )
+        if isinstance(key, tuple) and len(key) == 2:
+            key = key + (slice(None, None, None),)  # adding 3rd dimension
+        return Slicer(self, *key)
+
+    def __repr__(self) -> str:
+        """Return a string representation with object id.
+
+        This is used as key to store image ref in Operation dicts.
+
+        """
+        return f"<pyotb.{self.__class__.__name__} object, id {id(self)}>"
+
+
+class App(OTBObject):
+    """Wrapper around otb.Application to handle settings and execution.
+
+    Base class that gathers common operations for any OTB application lifetime (settings, exec, export, etc.)
+    Any app parameter may be passed either using a dict of parameters or keyword argument.
+
+    The first argument can be:
+        - filepath or OTBObject, the main input parameter name is automatically used
+        - list of inputs, useful when the user wants to specify the input list `il`
+        - dictionary of parameters, useful when a key is python-reserved (e.g. `in`, `map`)
+    Any key except "in" or "map" can also be passed via kwargs, replace "." with "_" e.g `map_epsg_code=4326`
+
+    Args:
+        appname: name of the OTB application to initialize, e.g. 'BandMath'
+        *args: can be a filepath, OTB object or a dict or parameters, several dicts will be merged in **kwargs
+        frozen: freeze OTB app in order avoid blocking during __init___
+        quiet: whether to print logs of the OTB app and the default progress bar
+        name: custom name that will show up in logs, appname will be used if not provided
+        **kwargs: any OTB application parameter key is accepted except "in" or "map"
+
+    """
+
+    INPUT_IMAGE_TYPES = [
+        otb.ParameterType_InputImage,
+        otb.ParameterType_InputImageList,
+    ]
+    INPUT_PARAM_TYPES = INPUT_IMAGE_TYPES + [
+        otb.ParameterType_InputVectorData,
+        otb.ParameterType_InputVectorDataList,
+        otb.ParameterType_InputFilename,
+        otb.ParameterType_InputFilenameList,
+    ]
+    OUTPUT_IMAGE_TYPES = [otb.ParameterType_OutputImage]
+    OUTPUT_PARAM_TYPES = OUTPUT_IMAGE_TYPES + [
+        otb.ParameterType_OutputVectorData,
+        otb.ParameterType_OutputFilename,
+    ]
+    INPUT_IMAGES_LIST_TYPES = [
+        otb.ParameterType_InputImageList,
+        otb.ParameterType_InputFilenameList,
+    ]
+    INPUT_LIST_TYPES = INPUT_IMAGES_LIST_TYPES + [
+        otb.ParameterType_StringList,
+        otb.ParameterType_ListView,
+        otb.ParameterType_InputVectorDataList,
+        otb.ParameterType_Band,
+    ]
+
+    def __init__(
+        self,
+        appname: str,
+        *args,
+        frozen: bool = False,
+        quiet: bool = False,
+        name: str = "",
+        **kwargs,
+    ):
+        """Common constructor for OTB applications, automatically handles in-memory connections."""
+        # Attributes and data structures used by properties
+        create = (
+            otb.Registry.CreateApplicationWithoutLogger
+            if quiet
+            else otb.Registry.CreateApplication
+        )
+        self._app = create(appname)
+        self._name = name or appname
+        self._exports_dic = {}
+        self._settings, self._auto_parameters = {}, {}
+        self._time_start, self._time_end = 0.0, 0.0
+        self.data, self.outputs = {}, {}
+        self.quiet, self.frozen = quiet, frozen
+
+        # Param keys and types
+        self.parameters_keys = tuple(self.app.GetParametersKeys())
+        self._all_param_types = {
+            key: self.app.GetParameterType(key) for key in self.parameters_keys
+        }
+        self._out_param_types = {
+            key: val
+            for key, val in self._all_param_types.items()
+            if val in self.OUTPUT_PARAM_TYPES
+        }
+        self._key_choices = {
+            key: [f"{key}.{choice}" for choice in self.app.GetChoiceKeys(key)]
+            for key in self.parameters_keys
+            if self.app.GetParameterType(key) == otb.ParameterType_Choice
+        }
+
+        # Init, execute and write (auto flush only when output param was provided)
+        if args or kwargs:
             self.set_parameters(*args, **kwargs)
+        # Create Output image objects
+        for key in (
+            key
+            for key, param in self._out_param_types.items()
+            if param == otb.ParameterType_OutputImage
+        ):
+            self.outputs[key] = Output(self, key, self._settings.get(key))
+
         if not self.frozen:
             self.execute()
+            if any(key in self._settings for key in self._out_param_types):
+                self.flush()
+        else:
+            self.__sync_parameters()  # since not called during execute()
+
+    @property
+    def name(self) -> str:
+        """Returns appname by default, or a custom name if passed during App init."""
+        return self._name
+
+    @property
+    def app(self) -> otb.Application:
+        """Reference to this app otb.Application instance."""
+        return self._app
+
+    @property
+    def parameters(self):
+        """Return used application parameters: automatic values or set by user."""
+        return {**self._auto_parameters, **self._settings}
+
+    @property
+    def exports_dic(self) -> dict[str, dict]:
+        """Reference to an internal dict object that contains numpy array exports."""
+        return self._exports_dic
+
+    def __is_one_of_types(self, key: str, param_types: list[int]) -> bool:
+        """Helper to check the type of a parameter."""
+        if key not in self._all_param_types:
+            raise KeyError(f"key {key} not found in the application parameters types")
+        return self._all_param_types[key] in param_types
+
+    def __is_multi_output(self):
+        """Check if app has multiple outputs to ensure re-execution during write()."""
+        return len(self.outputs) > 1
+
+    def is_input(self, key: str) -> bool:
+        """Returns True if the parameter key is an input."""
+        return self.__is_one_of_types(key=key, param_types=self.INPUT_PARAM_TYPES)
+
+    def is_output(self, key: str) -> bool:
+        """Returns True if the parameter key is an output."""
+        return self.__is_one_of_types(key=key, param_types=self.OUTPUT_PARAM_TYPES)
+
+    def is_key_list(self, key: str) -> bool:
+        """Check if a parameter key is an input parameter list."""
+        return self.app.GetParameterType(key) in self.INPUT_LIST_TYPES
+
+    def is_key_images_list(self, key: str) -> bool:
+        """Check if a parameter key is an input parameter image list."""
+        return self.app.GetParameterType(key) in self.INPUT_IMAGES_LIST_TYPES
+
+    def get_first_key(self, param_types: list[int]) -> str:
+        """Get the first param key for specific file types, try each list in args."""
+        for param_type in param_types:
+            # Return the first key, from the alphabetically sorted keys of the
+            # application, which has the parameter type matching param_type.
+            for key, value in sorted(self._all_param_types.items()):
+                if value == param_type:
+                    return key
+        raise TypeError(
+            f"{self.name}: could not find any key matching the provided types"
+        )
+
+    @property
+    def input_key(self) -> str:
+        """Get the name of first input parameter, raster > vector > file."""
+        return self.get_first_key(self.INPUT_PARAM_TYPES)
+
+    @property
+    def input_image_key(self) -> str:
+        """Name of the first input image parameter."""
+        return self.get_first_key(self.INPUT_IMAGE_TYPES)
+
+    @property
+    def output_key(self) -> str:
+        """Name of the first output parameter, raster > vector > file."""
+        return self.get_first_key(self.OUTPUT_PARAM_TYPES)
+
+    @property
+    def output_image_key(self) -> str:
+        """Get the name of first output image parameter."""
+        return self.get_first_key(self.OUTPUT_IMAGE_TYPES)
+
+    @property
+    def elapsed_time(self) -> float:
+        """Get elapsed time between app init and end of exec or file writing."""
+        return self._time_end - self._time_start
 
     def set_parameters(self, *args, **kwargs):
-        """Set some parameters of the app.
+        """Set parameters, using the right OTB API function depending on the key and type.
 
+        Parameters with dots may be passed as keyword arguments using "_", e.g. map_epsg_code=4326.
+        Additional checks are done for input and output (in-memory objects, remote filepaths, etc.).
         When useful, e.g. for images list, this function appends the parameters
         instead of overwriting them. Handles any parameters, i.e. in-memory & filepaths
 
         Args:
-            *args: Can be : - dictionary containing key-arguments enumeration. Useful when a key is python-reserved
-                              (e.g. "in") or contains reserved characters such as a point (e.g."mode.extent.unit")
-                            - string, App or Output, useful when the user implicitly wants to set the param "in"
-                            - list, useful when the user implicitly wants to set the param "il"
-            **kwargs: keyword arguments e.g. il=['input1.tif', oApp_object2, App_object3.out], out='output.tif'
+            *args: any input OTBObject, filepath or images list, or a dict of parameters
+            **kwargs: app parameters, with "_" instead of dots e.g. io_in="image.tif"
 
         Raises:
-            Exception: when the setting of a parameter failed
+            KeyError: when the parameter name wasn't recognized
+            RuntimeError: failed to set parameter value
 
         """
         parameters = kwargs
         parameters.update(self.__parse_args(args))
         # Going through all arguments
-        for param, obj in parameters.items():
-            if param not in self.app.GetParametersKeys():
-                raise Exception(f"{self.name}: parameter '{param}' was not recognized. "
-                                f"Available keys are {self.app.GetParametersKeys()}")
+        for key, obj in parameters.items():
+            if "_" in key:
+                key = key.replace("_", ".")
+            if key not in self.parameters_keys:
+                raise KeyError(
+                    f"{self.name}: parameter '{key}' was not recognized."
+                    f" Available keys are {self.parameters_keys}"
+                )
             # When the parameter expects a list, if needed, change the value to list
-            if self.__is_key_list(param) and not isinstance(obj, (list, tuple)):
-                parameters[param] = [obj]
+            if self.is_key_list(key) and not isinstance(obj, (list, tuple)):
                 obj = [obj]
-                logger.warning('%s: argument for parameter "%s" was converted to list', self.name, param)
+                logger.info(
+                    '%s: argument for parameter "%s" was converted to list',
+                    self.name,
+                    key,
+                )
             try:
-                # This is when we actually call self.app.SetParameter*
-                self.__set_param(param, obj)
+                if self.is_input(key):
+                    obj = self.__check_input_param(obj)
+                elif self.is_output(key):
+                    obj = self.__check_output_param(obj)
+                self.__set_param(key, obj)
             except (RuntimeError, TypeError, ValueError, KeyError) as e:
-                raise Exception(f"{self.name}: something went wrong before execution "
-                                f"(while setting parameter '{param}' to '{obj}')") from e
-        # Update _parameters using values from OtbApplication object
-        otb_params = self.app.GetParameters().items()
-        otb_params = {k: str(v) if isinstance(v, otb.ApplicationProxy) else v for k, v in otb_params}
-        self.parameters.update({**parameters, **otb_params})
-        # Update output images pixel types
-        if self.preserve_dtype:
-            self.__propagate_pixel_type()
+                raise RuntimeError(
+                    f"{self.name}: error before execution,"
+                    f" while setting '{key}' to '{obj}': {e})"
+                ) from e
+            # Save / update setting value and update the Output object initialized in __init__ without a filepath
+            self._settings[key] = obj
+            if key in self.outputs:
+                self.outputs[key].filepath = obj
+            if key in self._auto_parameters:
+                del self._auto_parameters[key]
+
+    def propagate_dtype(self, target_key: str = None, dtype: int = None):
+        """Propagate a pixel type from main input to every outputs, or to a target output key only.
+
+        With multiple inputs (if dtype is not provided), the type of the first input is considered.
+        With multiple outputs (if target_key is not provided), all outputs will be converted to the same pixel type.
+
+        Args:
+            target_key: output param key to change pixel type
+            dtype: data type to use
+
+        """
+        if not dtype:
+            param = self._settings.get(self.input_image_key)
+            if not param:
+                logger.warning(
+                    "%s: could not propagate pixel type from inputs to output",
+                    self.name,
+                )
+                return
+            if isinstance(param, (list, tuple)):
+                param = param[0]  # first image in "il"
+            try:
+                dtype = get_pixel_type(param)
+            except (TypeError, RuntimeError):
+                logger.warning(
+                    '%s: unable to identify pixel type of key "%s"', self.name, param
+                )
+                return
+        if target_key:
+            keys = [target_key]
+        else:
+            keys = [
+                k
+                for k, v in self._out_param_types.items()
+                if v == otb.ParameterType_OutputImage
+            ]
+        for key in keys:
+            self.app.SetParameterOutputImagePixelType(key, dtype)
 
     def execute(self):
         """Execute and write to disk if any output parameter has been set during init."""
         logger.debug("%s: run execute() with parameters=%s", self.name, self.parameters)
+        self._time_start = perf_counter()
         try:
             self.app.Execute()
         except (RuntimeError, FileNotFoundError) as e:
-            raise Exception(f'{self.name}: error during during app execution') from e
+            raise RuntimeError(
+                f"{self.name}: error during during app execution ({e}"
+            ) from e
         self.frozen = False
+        self._time_end = perf_counter()
         logger.debug("%s: execution ended", self.name)
-        if self.__has_output_param_key():
-            logger.debug('%s: flushing data to disk', self.name)
-            self.app.WriteOutput()
-        self.__save_objects()
+        self.__sync_parameters()
+
+    def flush(self):
+        """Flush data to disk, this is when WriteOutput is actually called."""
+        logger.debug("%s: flushing data to disk", self.name)
+        self.app.WriteOutput()
+        self._time_end = perf_counter()
+
+    @deprecated_alias(filename_extension="ext_fname")
+    def write(
+        self,
+        path: str | Path | dict[str, str] = None,
+        pixel_type: dict[str, str] | str = None,
+        preserve_dtype: bool = False,
+        ext_fname: dict[str, str] | str = None,
+        **kwargs,
+    ) -> bool:
+        """Set output pixel type and write the output raster files.
+
+        The first argument is expected to be:
+            - filepath, useful when there is only one output, e.g. 'output.tif'
+            - dictionary containing output filepath
+            - None if output file was passed during App init
+
+        In case of multiple outputs, pixel_type may also be a dictionary with parameter names as keys.
+        Accepted pixel types : uint8, uint16, uint32, int16, int32, float, double, cint16, cint32, cfloat, cdouble
 
-    def find_output(self):
-        """Find output files on disk using path found in parameters.
+        Args:
+            path: output filepath or dict of filepath with param keys
+            pixel_type: pixel type string representation
+            preserve_dtype: propagate main input pixel type to outputs, in case pixel_type is None
+            ext_fname: an OTB extended filename, will be applied to every output (but won't overwrite existing keys in output filepath)
+            **kwargs: keyword arguments e.g. out='output.tif' or io_out='output.tif'
 
         Returns:
-            list of files found on disk
-
-        """
-        files = []
-        missing = []
-        outputs = [p for p in self.output_parameters_keys if p in self.parameters]
-        for param in outputs:
-            filename = self.parameters[param]
-            # Remove filename extension
-            if '?' in filename:
-                filename = filename.split('?')[0]
-            path = Path(filename)
-            if path.exists():
-                files.append(str(path.absolute()))
-            else:
-                missing.append(str(path.absolute()))
-        if missing:
-            missing = tuple(missing)
-            for filename in missing:
-                logger.error("%s: execution seems to have failed, %s does not exist", self.name, filename)
-
-        return files
-
-    # Private functions
-    def __get_output_parameters_keys(self):
-        """Get raster output parameter keys.
+            True if all files are found on disk
 
-        Returns:
-            output parameters keys
         """
-        return [param for param in self.app.GetParametersKeys()
-                if self.app.GetParameterType(param) == otb.ParameterType_OutputImage]
-
-    def __has_output_param_key(self):
-        """Check if App has any output parameter key."""
-        if not self.output_param:
-            return True  # apps like ReadImageInfo with no filetype output param still needs to WriteOutput
-        types = (otb.ParameterType_OutputFilename, otb.ParameterType_OutputImage, otb.ParameterType_OutputVectorData)
-        outfile_params = [param for param in self.app.GetParametersKeys() if self.app.GetParameterType(param) in types]
-        return any(key in self.parameters for key in outfile_params)
-
-    @staticmethod
-    def __parse_args(args):
+        # Gather all input arguments in kwargs dict
+        if isinstance(path, dict):
+            kwargs.update(path)
+        elif isinstance(path, str) and kwargs:
+            logger.warning(
+                '%s: keyword arguments specified, ignoring argument "%s"',
+                self.name,
+                path,
+            )
+        elif isinstance(path, (str, Path)) and self.output_key:
+            kwargs[self.output_key] = str(path)
+        elif not path and self.output_key in self.parameters:
+            kwargs[self.output_key] = self.parameters[self.output_key]
+        elif path is not None:
+            raise TypeError(f"{self.name}: unsupported filepath type ({type(path)})")
+        if not (kwargs or any(k in self._settings for k in self._out_param_types)):
+            raise KeyError(
+                f"{self.name}: at least one filepath is required, if not provided during App init"
+            )
+        parameters = kwargs.copy()
+
+        # Append filename extension to filenames
+        if ext_fname:
+            if not isinstance(ext_fname, (dict, str)):
+                raise ValueError("Extended filename must be a str or a dict")
+
+            def _str2dict(ext_str):
+                """Function that converts str to dict."""
+                splits = [pair.split("=") for pair in ext_str.split("&")]
+                return dict(split for split in splits if len(split) == 2)
+
+            if isinstance(ext_fname, str):
+                ext_fname = _str2dict(ext_fname)
+            logger.debug("%s: extended filename for all outputs:", self.name)
+            for key, ext in ext_fname.items():
+                logger.debug("%s: %s", key, ext)
+
+            for key, filepath in kwargs.items():
+                if self._out_param_types[key] == otb.ParameterType_OutputImage:
+                    new_ext_fname = ext_fname.copy()
+                    # Grab already set extended filename key/values
+                    if "?&" in filepath:
+                        filepath, already_set_ext = filepath.split("?&", 1)
+                        # Extensions in filepath prevail over `new_ext_fname`
+                        new_ext_fname.update(_str2dict(already_set_ext))
+                    # tyransform dict to str
+                    ext_fname_str = "&".join(
+                        [f"{key}={value}" for key, value in new_ext_fname.items()]
+                    )
+                    parameters[key] = f"{filepath}?&{ext_fname_str}"
+
+        # Manage output pixel types
+        data_types = {}
+        if pixel_type:
+            if isinstance(pixel_type, str):
+                dtype = parse_pixel_type(pixel_type)
+                type_name = self.app.ConvertPixelTypeToNumpy(dtype)
+                logger.debug(
+                    '%s: output(s) will be written with type "%s"', self.name, type_name
+                )
+                for key in parameters:
+                    if self._out_param_types[key] == otb.ParameterType_OutputImage:
+                        data_types[key] = dtype
+            elif isinstance(pixel_type, dict):
+                data_types = {
+                    key: parse_pixel_type(dtype) for key, dtype in pixel_type.items()
+                }
+        elif preserve_dtype:
+            self.propagate_dtype()
+
+        # Set parameters and flush to disk
+        for key, filepath in parameters.items():
+            if Path(filepath.split("?")[0]).exists():
+                logger.warning("%s: overwriting file %s", self.name, filepath)
+            if key in data_types:
+                self.propagate_dtype(key, data_types[key])
+            self.set_parameters({key: filepath})
+        # TODO: drop multioutput special case when fixed on the OTB side. See discussion in MR !102
+        if self.frozen or self.__is_multi_output():
+            self.execute()
+        self.flush()
+        if not parameters:
+            return True
+
+        # Search and log missing files
+        files, missing = [], []
+        for key, filepath in parameters.items():
+            if not filepath.startswith("/vsi"):
+                filepath = Path(filepath.split("?")[0])
+                dest = files if filepath.exists() else missing
+                dest.append(str(filepath.absolute()))
+        for filename in missing:
+            logger.error(
+                "%s: execution seems to have failed, %s does not exist",
+                self.name,
+                filename,
+            )
+        return bool(files) and not missing
+
+    def __parse_args(self, args: list[str | OTBObject | dict | list]) -> dict[str, Any]:
         """Gather all input arguments in kwargs dict.
 
+        Args:
+            args: the list of arguments passed to set_parameters (__init__ *args)
+
         Returns:
             a dictionary with the right keyword depending on the object
 
@@ -707,238 +956,255 @@ class App(otbObject):
         for arg in args:
             if isinstance(arg, dict):
                 kwargs.update(arg)
-            elif isinstance(arg, (str, otbObject)):
-                kwargs.update({'in': arg})
-            elif isinstance(arg, list):
-                kwargs.update({'il': arg})
+            elif (
+                isinstance(arg, (str, OTBObject))
+                or isinstance(arg, list)
+                and self.is_key_list(self.input_key)
+            ):
+                kwargs.update({self.input_key: arg})
         return kwargs
 
-    def __set_param(self, param, obj):
+    def __check_input_param(
+        self, obj: list | tuple | OTBObject | str | Path
+    ) -> list | OTBObject | str:
+        """Check the type and value of an input parameter, add vsi prefixes if needed."""
+        if isinstance(obj, (list, tuple)):
+            return [self.__check_input_param(o) for o in obj]
+        if isinstance(obj, OTBObject):
+            return obj
+        if isinstance(obj, Path):
+            obj = str(obj)
+        if isinstance(obj, str):
+            if not obj.startswith("/vsi"):
+                # Remote file. TODO: add support for S3 / GS / AZ
+                if obj.startswith(("https://", "http://", "ftp://")):
+                    obj = "/vsicurl/" + obj
+                prefixes = {
+                    ".tar": "vsitar",
+                    ".tar.gz": "vsitar",
+                    ".tgz": "vsitar",
+                    ".gz": "vsigzip",
+                    ".7z": "vsi7z",
+                    ".zip": "vsizip",
+                    ".rar": "vsirar",
+                }
+                expr = r"(.*?)(\.7z|\.zip|\.rar|\.tar\.gz|\.tgz|\.tar|\.gz)(.*)"
+                parts = re.match(expr, obj)
+                if parts:
+                    file, ext = parts.group(1), parts.group(2)
+                    if not Path(file + ext).is_dir():
+                        obj = f"/{prefixes[ext]}/{obj}"
+            return obj
+        raise TypeError(f"{self.name}: wrong input parameter type ({type(obj)})")
+
+    def __check_output_param(self, obj: list | tuple | str | Path) -> list | str:
+        """Check the type and value of an output parameter."""
+        if isinstance(obj, (list, tuple)):
+            return [self.__check_output_param(o) for o in obj]
+        if isinstance(obj, Path):
+            obj = str(obj)
+        if isinstance(obj, str):
+            return obj
+        raise TypeError(f"{self.name}: wrong output parameter type ({type(obj)})")
+
+    def __set_param(
+        self, key: str, obj: str | float | list | tuple | OTBObject | otb.Application
+    ):
         """Set one parameter, decide which otb.Application method to use depending on target object."""
-        if obj is not None:
-            # Single-parameter cases
-            if isinstance(obj, otbObject):
-                self.app.ConnectImage(param, obj.app, obj.output_param)
-            elif isinstance(obj, otb.Application):  # this is for backward comp with plain OTB
-                outparamkey = [param for param in obj.GetParametersKeys()
-                               if obj.GetParameterType(param) == otb.ParameterType_OutputImage][0]
-                self.app.ConnectImage(param, obj, outparamkey)
-            elif param == 'ram':  # SetParameterValue in OTB<7.4 doesn't work for ram parameter cf gitlab OTB issue 2200
-                self.app.SetParameterInt('ram', int(obj))
-            elif not isinstance(obj, list):  # any other parameters (str, int...)
-                self.app.SetParameterValue(param, obj)
-            # Images list
-            elif self.__is_key_images_list(param):
-                # To enable possible in-memory connections, we go through the list and set the parameters one by one
-                for inp in obj:
-                    if isinstance(inp, otbObject):
-                        self.app.ConnectImage(param, inp.app, inp.output_param)
-                    elif isinstance(inp, otb.Application):  # this is for backward comp with plain OTB
-                        outparamkey = [param for param in inp.GetParametersKeys() if
-                                       inp.GetParameterType(param) == otb.ParameterType_OutputImage][0]
-                        self.app.ConnectImage(param, inp, outparamkey)
-                    else:  # here `input` should be an image filepath
-                        # Append `input` to the list, do not overwrite any previously set element of the image list
-                        self.app.AddParameterStringList(param, inp)
-            # List of any other types (str, int...)
-            else:
-                self.app.SetParameterValue(param, obj)
-
-    def __propagate_pixel_type(self):
-        """Propagate the pixel type from inputs to output.
-
-        For several inputs, or with an image list, the type of the first input is considered.
-        If several outputs, all outputs will have the same type.
-
-        """
-        pixel_type = None
-        for key, param in self.parameters.items():
-            if self.__is_key_input_image(key):
-                if not param:
-                    continue
-                if isinstance(param, list):
-                    param = param[0]  # first image in "il"
-                try:
-                    pixel_type = get_pixel_type(param)
-                    type_name = self.app.ConvertPixelTypeToNumpy(pixel_type)
-                    logger.debug('%s: output(s) will be written with type "%s"', self.name, type_name)
-                    for out_key in self.output_parameters_keys:
-                        self.app.SetParameterOutputImagePixelType(out_key, pixel_type)
-                    return
-                except TypeError:
-                    pass
-
-        logger.warning("%s: could not propagate pixel type from inputs to output, no valid input found", self.name)
+        if obj is None or (isinstance(obj, (list, tuple)) and not obj):
+            self.app.ClearValue(key)
+            return
+        # Single-parameter cases
+        if isinstance(obj, OTBObject):
+            self.app.ConnectImage(key, obj.app, obj.output_image_key)
+        elif isinstance(obj, otb.Application):
+            self.app.ConnectImage(key, obj, get_out_images_param_keys(obj)[0])
+        elif key == "ram":
+            # SetParameterValue in OTB<7.4 doesn't work for ram parameter cf OTB issue 2200
+            self.app.SetParameterInt("ram", int(obj))
+        # Any other parameters (str, int...)
+        elif not isinstance(obj, (list, tuple)):
+            self.app.SetParameterValue(key, obj)
+        # Images list
+        elif self.is_key_images_list(key):
+            for inp in obj:
+                if isinstance(inp, OTBObject):
+                    self.app.ConnectImage(key, inp.app, inp.output_image_key)
+                elif isinstance(inp, otb.Application):
+                    self.app.ConnectImage(key, obj, get_out_images_param_keys(inp)[0])
+                # Here inp is either str or Path, already checked by __check_*_param
+                else:
+                    # Append it to the list, do not overwrite any previously set element of the image list
+                    self.app.AddParameterStringList(key, inp)
+        # List of any other types (str, int...)
+        elif self.is_key_list(key):
+            self.app.SetParameterValue(key, obj)
+        else:
+            raise TypeError(
+                f"{self.name}: wrong parameter type ({type(obj)}) for '{key}'"
+            )
 
-    def __save_objects(self):
-        """Saving app parameters and outputs as attributes, so that they can be accessed with `obj.key`.
+    def __sync_parameters(self):
+        """Save app parameters in _auto_parameters or data dict.
 
-        This is useful when the key contains reserved characters such as a point eg "io.out"
+        This is always called during init or after execution, to ensure the
+         parameters property of the App is in sync with the otb.Application instance.
         """
-        for key in self.app.GetParametersKeys():
-            if key == 'parameters':  # skip forbidden attribute since it is already used by the App class
+        skip = [
+            k for k in self.parameters_keys if k.split(".")[-1] in ("ram", "default")
+        ]
+        # Prune unused choices child params
+        for key in self._key_choices:
+            choices = self._key_choices[key].copy()
+            choices.remove(f"{key}.{self.app.GetParameterValue(key)}")
+            skip.extend(
+                [k for k in self.parameters_keys if k.startswith(tuple(choices))]
+            )
+
+        self._auto_parameters.clear()
+        for key in self.parameters_keys:
+            if key in skip or key in self._settings or not self.app.HasValue(key):
                 continue
-            value = None
-            if key in self.output_parameters_keys:  # raster outputs
-                value = Output(self, key)
-            elif key in self.parameters:  # user or default app parameters
-                value = self.parameters[key]
-            else:  # any other app attribute (e.g. ReadImageInfo results)
+            value = self.app.GetParameterValue(key)
+            if isinstance(value, otb.ApplicationProxy):
                 try:
-                    value = self.app.GetParameterValue(key)
+                    value = str(value)
                 except RuntimeError:
-                    pass  # this is when there is no value for key
-            if value is not None:
-                setattr(self, key, value)
-
-    def __is_key_input_image(self, key):
-        """Check if a key of the App is an input parameter image list."""
-        return self.app.GetParameterType(key) in (otb.ParameterType_InputImage, otb.ParameterType_InputImageList)
-
-    def __is_key_list(self, key):
-        """Check if a key of the App is an input parameter list."""
-        return self.app.GetParameterType(key) in (otb.ParameterType_InputImageList, otb.ParameterType_StringList,
-                                                  otb.ParameterType_InputFilenameList, otb.ParameterType_ListView,
-                                                  otb.ParameterType_InputVectorDataList)
-
-    def __is_key_images_list(self, key):
-        """Check if a key of the App is an input parameter image list."""
-        return self.app.GetParameterType(key) in (otb.ParameterType_InputImageList, otb.ParameterType_InputFilenameList)
-
-    # Special methods
-    def __str__(self):
-        """Return a nice string representation with object id."""
-        return f'<pyotb.App {self.appname} object id {id(self)}>'
+                    continue
+            # Keep False or 0 values, but make sure to skip empty collections or str
+            if hasattr(value, "__iter__") and not value:
+                continue
+            # Here we should use AND self.app.IsParameterEnabled(key) but it's broken
+            if self.app.GetParameterRole(key) == 0 and (
+                self.app.HasAutomaticValue(key) or self.app.IsParameterEnabled(key)
+            ):
+                self._auto_parameters[key] = value
+            # Save static output data (ReadImageInfo, ComputeImageStatistics, etc.)
+            elif self.app.GetParameterRole(key) == 1:
+                if isinstance(value, str):
+                    try:
+                        value = literal_eval(value)
+                    except (ValueError, SyntaxError):
+                        pass
+                self.data[key] = value
+
+    # Special functions
+    def __getitem__(self, key: str | tuple) -> Any | list[float] | float | Slicer:
+        """This function is called when we use App()[...].
+
+        We allow to return attr if key is a parameter, or call OTBObject __getitem__ for pixel values or Slicer
+        """
+        if isinstance(key, tuple):
+            return super().__getitem__(key)  # to read pixel values, or slice
+        if isinstance(key, str):
+            if key in self.data:
+                return self.data[key]
+            if key in self.outputs:
+                return self.outputs[key]
+            if key in self.parameters:
+                return self.parameters[key]
+            raise KeyError(f"{self.name}: unknown or undefined parameter '{key}'")
+        raise TypeError(
+            f"{self.name}: cannot access object item or slice using {type(key)} object"
+        )
 
 
 class Slicer(App):
-    """Slicer objects i.e. when we call something like raster[:, :, 2] from Python."""
+    """Slicer objects, automatically created when using slicing e.g. app[:, :, 2].
 
-    def __init__(self, x, rows, cols, channels):
-        """Create a slicer object, that can be used directly for writing or inside a BandMath.
+    Can be used to select a subset of pixel and / or bands in the image.
+    This is a shortcut to an ExtractROI app that can be written to disk or used in pipelines.
 
-        It contains :
-        - an ExtractROI app that handles extracting bands and ROI and can be written to disk or used in pipelines
-        - in case the user only wants to extract one band, an expression such as "im1b#"
+    Args:
+        obj: input
+        rows: slice along Y / Latitude axis
+        cols: slice along X / Longitude axis
+        channels: bands to extract
 
-        Args:
-            x: input
-            rows: slice along Y / Latitude axis
-            cols: slice along X / Longitude axis
-            channels: channels, can be slicing, list or int
+    Raises:
+        TypeError: if channels param isn't slice, list or int
 
-        """
-        # Initialize the app that will be used for writing the slicer
-        self.name = 'Slicer'
+    """
+
+    def __init__(
+        self,
+        obj: OTBObject,
+        rows: slice,
+        cols: slice,
+        channels: slice | list[int] | int,
+    ):
+        """Create a slicer object, that can be used directly for writing or inside a BandMath."""
+        super().__init__(
+            "ExtractROI",
+            obj,
+            mode="extent",
+            quiet=True,
+            frozen=True,
+            name=f"Slicer from {obj.name}",
+        )
+        self.rows, self.cols = rows, cols
+        parameters = {}
 
-        self.output_parameter_key = 'out'
-        parameters = {'in': x, 'mode': 'extent'}
-        super().__init__('ExtractROI', parameters, preserve_dtype=True, frozen=True)
         # Channel slicing
         if channels != slice(None, None, None):
-            # Trigger source app execution if needed
-            nb_channels = get_nbchannels(x)
+            nb_channels = get_nbchannels(obj)
             self.app.Execute()  # this is needed by ExtractROI for setting the `cl` parameter
-            # if needed, converting int to list
             if isinstance(channels, int):
                 channels = [channels]
-            # if needed, converting slice to list
             elif isinstance(channels, slice):
-                channels_start = channels.start if channels.start is not None else 0
-                channels_start = channels_start if channels_start >= 0 else nb_channels + channels_start
-                channels_end = channels.stop if channels.stop is not None else nb_channels
-                channels_end = channels_end if channels_end >= 0 else nb_channels + channels_end
-                channels_step = channels.step if channels.step is not None else 1
-                channels = range(channels_start, channels_end, channels_step)
+                channels = self.channels_list_from_slice(channels)
             elif isinstance(channels, tuple):
                 channels = list(channels)
             elif not isinstance(channels, list):
-                raise ValueError(f'Invalid type for channels, should be int, slice or list of bands. : {channels}')
-
+                raise TypeError(
+                    f"Invalid type for channels ({type(channels)})."
+                    f" Should be int, slice or list of bands."
+                )
             # Change the potential negative index values to reverse index
             channels = [c if c >= 0 else nb_channels + c for c in channels]
-            parameters.update({'cl': [f'Channel{i + 1}' for i in channels]})
+            parameters.update({"cl": [f"Channel{i + 1}" for i in channels]})
 
         # Spatial slicing
         spatial_slicing = False
-        # TODO: handle PixelValue app so that accessing value is possible, e.g. raster[120, 200, 0]
-        # TODO TBD: handle the step value in the slice so that NN undersampling is possible ? e.g. raster[::2, ::2]
         if rows.start is not None:
-            parameters.update({'mode.extent.uly': rows.start})
+            parameters.update({"mode.extent.uly": rows.start})
             spatial_slicing = True
         if rows.stop is not None and rows.stop != -1:
-            parameters.update(
-                {'mode.extent.lry': rows.stop - 1})  # subtract 1 to be compliant with python convention
+            # Subtract 1 to respect python convention
+            parameters.update({"mode.extent.lry": rows.stop - 1})
             spatial_slicing = True
         if cols.start is not None:
-            parameters.update({'mode.extent.ulx': cols.start})
+            parameters.update({"mode.extent.ulx": cols.start})
             spatial_slicing = True
         if cols.stop is not None and cols.stop != -1:
-            parameters.update(
-                {'mode.extent.lrx': cols.stop - 1})  # subtract 1 to be compliant with python convention
+            # Subtract 1 to respect python convention
+            parameters.update({"mode.extent.lrx": cols.stop - 1})
             spatial_slicing = True
-        # Execute app
-        self.set_parameters(**parameters)
-        self.execute()
-
-        # These are some attributes when the user simply wants to extract *one* band to be used in an Operation
+        # When the user simply wants to extract *one* band to be used in an Operation
         if not spatial_slicing and isinstance(channels, list) and len(channels) == 1:
-            self.one_band_sliced = channels[0] + 1  # OTB convention: channels start at 1
-            self.input = x
-
-
-class Input(App):
-    """Class for transforming a filepath to pyOTB object."""
-
-    def __init__(self, filepath):
-        """Constructor for an Input object.
-
-        Args:
-            filepath: raster file path
-
-        """
-        self.filepath = filepath
-        super().__init__('ExtractROI', {'in': self.filepath}, preserve_dtype=True)
-
-    def __str__(self):
-        """Return a nice string representation with input file path."""
-        return f'<pyotb.Input object from {self.filepath}>'
+            # OTB convention: channels start at 1
+            self.one_band_sliced = channels[0] + 1
+            self.input = obj
 
-
-class Output(otbObject):
-    """Class for output of an app."""
-
-    def __init__(self, app, output_parameter_key):
-        """Constructor for an Output object.
-
-        Args:
-            app: The pyotb App
-            output_parameter_key: Output parameter key
-
-        """
-        # Keeping the OTB app and the pyotb app
-        self.pyotb_app, self.app = app, app.app
-        self.parameters = self.pyotb_app.parameters
-        self.output_param = output_parameter_key
-        self.name = f'Output {output_parameter_key} from {self.app.GetName()}'
-
-    def summarize(self):
-        """Return the summary of the pipeline that generates the Output object.
-
-        Returns:
-            Nested dictionary summarizing the pipeline that generates the Output object.
-
-        """
-        return self.pyotb_app.summarize()
-
-    def __str__(self):
-        """Return a nice string representation with object id."""
-        return f'<pyotb.Output {self.app.GetName()} object, id {id(self)}>'
+        # Execute app
+        self.set_parameters(parameters)
+        self.propagate_dtype()
+        self.execute()
 
 
 class Operation(App):
     """Class for arithmetic/math operations done in Python.
 
+    Given some inputs and an operator, this object enables to python operator to a BandMath operation.
+    Operations generally involve 2 inputs (+, -...). It can have only 1 input for `abs` operator.
+    It can have 3 inputs for the ternary operator `cond ? x : y`.
+
+    Args:
+        operator: (str) one of +, -, *, /, >, <, >=, <=, ==, !=, &, |, abs, ?
+        *inputs: operands of the expression to build
+        nb_bands: optionally specify the output nb of bands - used only internally by pyotb.where
+        name: override the default Operation name
+
     Example:
         Consider the python expression (input1 + 2 * input2)  >  0.
         This class enables to create a BandMathX app, with expression such as (im2 + 2 * im1) > 0 ? 1 : 0
@@ -957,120 +1223,130 @@ class Operation(App):
 
     """
 
-    def __init__(self, operator, *inputs, nb_bands=None):
-        """Given some inputs and an operator, this function enables to transform this into an OTB application.
-
-        Operations generally involve 2 inputs (+, -...). It can have only 1 input for `abs` operator.
-        It can have 3 inputs for the ternary operator `cond ? x : y`.
-
-        Args:
-            operator: (str) one of +, -, *, /, >, <, >=, <=, ==, !=, &, |, abs, ?
-            *inputs: inputs. Can be App, Output, Input, Operation, Slicer, filepath, int or float
-            nb_bands: to specify the output nb of bands. Optional. Used only internally by pyotb.where
-
-        """
+    def __init__(self, operator: str, *inputs, nb_bands: int = None, name: str = None):
+        """Operation constructor, one part of the logic is handled by App.__create_operator."""
         self.operator = operator
-        # We first create a 'fake' expression. E.g for the operation `input1 + input2` , we create a fake expression
-        # that is like "str(input1) + str(input2)"
+        # We first create a 'fake' expression. E.g for the operation `input1 + input2`
+        # we create a fake expression like "str(input1) + str(input2)"
         self.inputs = []
         self.nb_channels = {}
         self.fake_exp_bands = []
-        self.logical_fake_exp_bands = []
-
-        self.create_fake_exp(operator, inputs, nb_bands=nb_bands)
-
+        self.build_fake_expressions(operator, inputs, nb_bands=nb_bands)
         # Transforming images to the adequate im#, e.g. `input1` to "im1"
-        # creating a dictionary that is like {str(input1): 'im1', 'image2.tif': 'im2', ...}.
+        # using a dictionary : {str(input1): 'im1', 'image2.tif': 'im2', ...}.
         # NB: the keys of the dictionary are strings-only, instead of 'complex' objects, to enable easy serialization
         self.im_dic = {}
         self.im_count = 1
-        mapping_str_to_input = {}  # to be able to retrieve the real python object from its string representation
+        # To be able to retrieve the real python object from its string representation
+        map_repr_to_input = {}
         for inp in self.inputs:
             if not isinstance(inp, (int, float)):
                 if str(inp) not in self.im_dic:
-                    self.im_dic[str(inp)] = f'im{self.im_count}'
-                    mapping_str_to_input[str(inp)] = inp
+                    self.im_dic[repr(inp)] = f"im{self.im_count}"
+                    map_repr_to_input[repr(inp)] = inp
                     self.im_count += 1
+        # Getting unique image inputs, in the order im1, im2, im3 ...
+        self.unique_inputs = [
+            map_repr_to_input[id_str]
+            for id_str in sorted(self.im_dic, key=self.im_dic.get)
+        ]
+        self.exp_bands, self.exp = self.get_real_exp(self.fake_exp_bands)
+        appname = "BandMath" if len(self.exp_bands) == 1 else "BandMathX"
+        name = f'Operation exp="{self.exp}"'
+        super().__init__(
+            appname, il=self.unique_inputs, exp=self.exp, quiet=True, name=name
+        )
 
-        # getting unique image inputs, in the order im1, im2, im3 ...
-        self.unique_inputs = [mapping_str_to_input[str_input] for str_input in sorted(self.im_dic, key=self.im_dic.get)]
-        self.output_param = 'out'
+    def get_nb_bands(self, inputs: list[OTBObject | str | float]) -> int:
+        """Guess the number of bands of the output image, from the inputs.
 
-        # Computing the BandMath or BandMathX app
-        self.exp_bands, self.exp = self.get_real_exp(self.fake_exp_bands)
-        self.name = f'Operation exp="{self.exp}"'
+        Args:
+            inputs: the Operation operands
 
-        appname = 'BandMath' if len(self.exp_bands) == 1 else 'BandMathX'
-        super().__init__(appname, il=self.unique_inputs, exp=self.exp)
+        Raises:
+            ValueError: if all inputs don't have the same number of bands
 
-    def create_fake_exp(self, operator, inputs, nb_bands=None):
-        """Create a 'fake' expression.
+        """
+        if any(
+            isinstance(inp, Slicer) and hasattr(inp, "one_band_sliced")
+            for inp in inputs
+        ):
+            return 1
+        # Check that all inputs have the same band count
+        nb_bands_list = [
+            get_nbchannels(inp) for inp in inputs if not isinstance(inp, (float, int))
+        ]
+        all_same = all(x == nb_bands_list[0] for x in nb_bands_list)
+        if len(nb_bands_list) > 1 and not all_same:
+            raise ValueError("All images do not have the same number of bands")
+        return nb_bands_list[0]
+
+    def build_fake_expressions(
+        self,
+        operator: str,
+        inputs: list[OTBObject | str | float],
+        nb_bands: int = None,
+    ):
+        """Create a list of 'fake' expressions, one for each band.
 
         E.g for the operation input1 + input2, we create a fake expression that is like "str(input1) + str(input2)"
 
         Args:
-            operator: (str) one of +, -, *, /, >, <, >=, <=, ==, !=, &, |, abs, ?
-            inputs: inputs. Can be App, Output, Input, Operation, Slicer, filepath, int or float
+            operator: one of +, -, *, /, >, <, >=, <=, ==, !=, &, |, abs, ?
+            inputs: inputs. Can be OTBObject, filepath, int or float
             nb_bands: to specify the output nb of bands. Optional. Used only internally by pyotb.where
 
+        Raises:
+            ValueError: if all inputs don't have the same number of bands
+
         """
         self.inputs.clear()
         self.nb_channels.clear()
-
         logger.debug("%s, %s", operator, inputs)
-        # this is when we use the ternary operator with `pyotb.where` function. The output nb of bands is already known
-        if operator == '?' and nb_bands:
+        # When we use the ternary operator with `pyotb.where` function, the output nb of bands is already known
+        if operator == "?" and nb_bands:
             pass
         # For any other operations, the output number of bands is the same as inputs
         else:
-            if any(isinstance(inp, Slicer) and hasattr(inp, 'one_band_sliced') for inp in inputs):
-                nb_bands = 1
-            else:
-                nb_bands_list = [get_nbchannels(inp) for inp in inputs if not isinstance(inp, (float, int))]
-                # check that all inputs have the same nb of bands
-                if len(nb_bands_list) > 1:
-                    if not all(x == nb_bands_list[0] for x in nb_bands_list):
-                        raise Exception('All images do not have the same number of bands')
-                nb_bands = nb_bands_list[0]
-
+            nb_bands = self.get_nb_bands(inputs)
         # Create a list of fake expressions, each item of the list corresponding to one band
         self.fake_exp_bands.clear()
         for i, band in enumerate(range(1, nb_bands + 1)):
-            fake_exps = []
+            expressions = []
             for k, inp in enumerate(inputs):
-                # Generating the fake expression of the current input
+                # Generating the fake expression of the current input,
                 # this is a special case for the condition of the ternary operator `cond ? x : y`
                 if len(inputs) == 3 and k == 0:
-                    # when cond is monoband whereas the result is multiband, we expand the cond to multiband
-                    if nb_bands != inp.shape[2]:
-                        cond_band = 1
-                    else:
-                        cond_band = band
-                    fake_exp, corresponding_inputs, nb_channels = self.create_one_input_fake_exp(inp, cond_band,
-                                                                                                 keep_logical=True)
-                # any other input
+                    # When cond is monoband whereas the result is multiband, we expand the cond to multiband
+                    cond_band = 1 if nb_bands != inp.shape[2] else band
+                    fake_exp, corresp_inputs, nb_channels = self.make_fake_exp(
+                        inp, cond_band, keep_logical=True
+                    )
                 else:
-                    fake_exp, corresponding_inputs, nb_channels = self.create_one_input_fake_exp(inp, band,
-                                                                                                 keep_logical=False)
-                fake_exps.append(fake_exp)
+                    # Any other input
+                    fake_exp, corresp_inputs, nb_channels = self.make_fake_exp(
+                        inp, band, keep_logical=False
+                    )
+                expressions.append(fake_exp)
                 # Reference the inputs and nb of channels (only on first pass in the loop to avoid duplicates)
-                if i == 0 and corresponding_inputs and nb_channels:
-                    self.inputs.extend(corresponding_inputs)
+                if i == 0 and corresp_inputs and nb_channels:
+                    self.inputs.extend(corresp_inputs)
                     self.nb_channels.update(nb_channels)
 
             # Generating the fake expression of the whole operation
-            if len(inputs) == 1:  # this is only for 'abs'
-                fake_exp = f'({operator}({fake_exps[0]}))'
+            if len(inputs) == 1:
+                # This is only for 'abs()'
+                fake_exp = f"({operator}({expressions[0]}))"
             elif len(inputs) == 2:
                 # We create here the "fake" expression. For example, for a BandMathX expression such as '2 * im1 + im2',
                 # the false expression stores the expression 2 * str(input1) + str(input2)
-                fake_exp = f'({fake_exps[0]} {operator} {fake_exps[1]})'
-            elif len(inputs) == 3 and operator == '?':  # this is only for ternary expression
-                fake_exp = f'({fake_exps[0]} ? {fake_exps[1]} : {fake_exps[2]})'
-
+                fake_exp = f"({expressions[0]} {operator} {expressions[1]})"
+            elif len(inputs) == 3 and operator == "?":
+                # This is only for ternary expression
+                fake_exp = f"({expressions[0]} ? {expressions[1]} : {expressions[2]})"
             self.fake_exp_bands.append(fake_exp)
 
-    def get_real_exp(self, fake_exp_bands):
+    def get_real_exp(self, fake_exp_bands: str) -> tuple[list[str], str]:
         """Generates the BandMathX expression.
 
         Args:
@@ -1086,28 +1362,27 @@ class Operation(App):
         for one_band_fake_exp in fake_exp_bands:
             one_band_exp = one_band_fake_exp
             for inp in self.inputs:
-                # replace the name of in-memory object (e.g. '<pyotb.App object>b1' by 'im1b1')
-                one_band_exp = one_band_exp.replace(str(inp), self.im_dic[str(inp)])
+                # Replace the name of in-memory object (e.g. '<pyotb.App object>b1' by 'im1b1')
+                one_band_exp = one_band_exp.replace(repr(inp), self.im_dic[repr(inp)])
             exp_bands.append(one_band_exp)
-
         # Form the final expression (e.g. 'im1b1 + 1; im1b2 + 1')
-        exp = ';'.join(exp_bands)
-
-        return exp_bands, exp
+        return exp_bands, ";".join(exp_bands)
 
     @staticmethod
-    def create_one_input_fake_exp(x, band, keep_logical=False):
-        """This an internal function, only to be used by `create_fake_exp`.
+    def make_fake_exp(
+        x: OTBObject | str, band: int, keep_logical: bool = False
+    ) -> tuple[str, list[OTBObject], int]:
+        """This an internal function, only to be used by `build_fake_expressions`.
 
         Enable to create a fake expression just for one input and one band.
+        Regarding the "keep_logical" param:
+            - if True, for `input1 > input2`, returned fake expression is "str(input1) > str(input2)"
+            - if False, for `input1 > input2`, returned fake exp is "str(input1) > str(input2) ? 1 : 0"]  Default False
 
         Args:
             x: input
             band: which band to consider (bands start at 1)
             keep_logical: whether to keep the logical expressions "as is" in case the input is a logical operation.
-                          ex: if True, for `input1 > input2`, returned fake expression is "str(input1) > str(input2)"
-                          if False, for `input1 > input2`, returned fake exp is "str(input1) > str(input2) ? 1 : 0".
-                          Default False
 
         Returns:
             fake_exp: the fake expression for this band and input
@@ -1116,182 +1391,385 @@ class Operation(App):
 
         """
         # Special case for one-band slicer
-        if isinstance(x, Slicer) and hasattr(x, 'one_band_sliced'):
-            if keep_logical and isinstance(x.input, logicalOperation):
+        if isinstance(x, Slicer) and hasattr(x, "one_band_sliced"):
+            if keep_logical and isinstance(x.input, LogicalOperation):
                 fake_exp = x.input.logical_fake_exp_bands[x.one_band_sliced - 1]
-                inputs = x.input.inputs
-                nb_channels = x.input.nb_channels
+                inputs, nb_channels = x.input.inputs, x.input.nb_channels
             elif isinstance(x.input, Operation):
-                # keep only one band of the expression
+                # Keep only one band of the expression
                 fake_exp = x.input.fake_exp_bands[x.one_band_sliced - 1]
-                inputs = x.input.inputs
-                nb_channels = x.input.nb_channels
+                inputs, nb_channels = x.input.inputs, x.input.nb_channels
             else:
                 # Add the band number (e.g. replace '<pyotb.App object>' by '<pyotb.App object>b1')
-                fake_exp = str(x.input) + f'b{x.one_band_sliced}'
-                inputs = [x.input]
-                nb_channels = {x.input: 1}
-        # For logicalOperation, we save almost the same attributes as an Operation
-        elif keep_logical and isinstance(x, logicalOperation):
+                fake_exp = f"{repr(x.input)}b{x.one_band_sliced}"
+                inputs, nb_channels = [x.input], {repr(x.input): 1}
+        # For LogicalOperation, we save almost the same attributes as an Operation
+        elif keep_logical and isinstance(x, LogicalOperation):
             fake_exp = x.logical_fake_exp_bands[band - 1]
-            inputs = x.inputs
-            nb_channels = x.nb_channels
+            inputs, nb_channels = x.inputs, x.nb_channels
         elif isinstance(x, Operation):
             fake_exp = x.fake_exp_bands[band - 1]
-            inputs = x.inputs
-            nb_channels = x.nb_channels
+            inputs, nb_channels = x.inputs, x.nb_channels
         # For int or float input, we just need to save their value
         elif isinstance(x, (int, float)):
             fake_exp = str(x)
-            inputs = None
-            nb_channels = None
+            inputs, nb_channels = None, None
         # We go on with other inputs, i.e. pyotb objects, filepaths...
         else:
-            nb_channels = {x: get_nbchannels(x)}
-            inputs = [x]
             # Add the band number (e.g. replace '<pyotb.App object>' by '<pyotb.App object>b1')
-            fake_exp = str(x) + f'b{band}'
+            fake_exp = f"{repr(x)}b{band}"
+            inputs, nb_channels = [x], {repr(x): get_nbchannels(x)}
 
         return fake_exp, inputs, nb_channels
 
-    def __str__(self):
-        """Return a nice string representation with object id."""
-        return f'<pyotb.Operation `{self.operator}` object, id {id(self)}>'
-
+    def __repr__(self) -> str:
+        """Return a nice string representation with operator and object id."""
+        return f"<pyotb.Operation `{self.operator}` object, id {id(self)}>"
 
-class logicalOperation(Operation):
-    """A specialization of Operation class for boolean logical operations i.e. >, <, >=, <=, ==, !=, `&` and `|`.
 
-    The only difference is that not only the BandMath expression is saved (e.g. "im1b1 > 0 ? 1 : 0"), but also the
-    logical expression (e.g. "im1b1 > 0")
+class LogicalOperation(Operation):
+    """A specialization of Operation class for boolean logical operations.
 
-    """
+    Supported operators are >, <, >=, <=, ==, !=, `&` and `|`.
+    The only difference is that not only the BandMath expression is saved
+     (e.g. "im1b1 > 0 ? 1 : 0"), but also the logical expression (e.g. "im1b1 > 0")
 
-    def __init__(self, operator, *inputs, nb_bands=None):
-        """Constructor for a logicalOperation object.
-
-        Args:
-            operator: string operator (one of >, <, >=, <=, ==, !=, &, |)
-            *inputs: inputs
-            nb_bands: to specify the output nb of bands. Optional. Used only internally by pyotb.where
-
-        """
-        super().__init__(operator, *inputs, nb_bands=nb_bands)
-        self.logical_exp_bands, self.logical_exp = self.get_real_exp(self.logical_fake_exp_bands)
+    Args:
+        operator: string operator (one of >, <, >=, <=, ==, !=, &, |)
+        *inputs: inputs
+        nb_bands: optionally specify the output nb of bands - used only by pyotb.where
 
-    def create_fake_exp(self, operator, inputs, nb_bands=None):
-        """Create a 'fake' expression.
+    """
 
-        E.g for the operation input1 > input2, we create a fake expression that is like
-        "str(input1) > str(input2) ? 1 : 0" and a logical fake expression that is like "str(input1) > str(input2)"
+    def __init__(self, operator: str, *inputs, nb_bands: int = None):
+        """Constructor for a LogicalOperation object."""
+        self.logical_fake_exp_bands = []
+        super().__init__(operator, *inputs, nb_bands=nb_bands, name="LogicalOperation")
+        self.logical_exp_bands, self.logical_exp = self.get_real_exp(
+            self.logical_fake_exp_bands
+        )
+
+    def build_fake_expressions(
+        self,
+        operator: str,
+        inputs: list[OTBObject | str | float],
+        nb_bands: int = None,
+    ):
+        """Create a list of 'fake' expressions, one for each band.
+
+        For the operation input1 > input2, we create a fake expression like `str(input1) > str(input2) ? 1 : 0`
+         and a logical fake expression like `str(input1) > str(input2)`
 
         Args:
             operator: str (one of >, <, >=, <=, ==, !=, &, |)
-            inputs: Can be App, Output, Input, Operation, Slicer, filepath, int or float
-            nb_bands: to specify the output nb of bands. Optional. Used only internally by pyotb.where
+            inputs: Can be OTBObject, filepath, int or float
+            nb_bands: optionally specify the output nb of bands - used only internally by pyotb.where
 
         """
-        # For any other operations, the output number of bands is the same as inputs
-        if any(isinstance(inp, Slicer) and hasattr(inp, 'one_band_sliced') for inp in inputs):
-            nb_bands = 1
-        else:
-            nb_bands_list = [get_nbchannels(inp) for inp in inputs if not isinstance(inp, (float, int))]
-            # check that all inputs have the same nb of bands
-            if len(nb_bands_list) > 1:
-                if not all(x == nb_bands_list[0] for x in nb_bands_list):
-                    raise Exception('All images do not have the same number of bands')
-            nb_bands = nb_bands_list[0]
-
         # Create a list of fake exp, each item of the list corresponding to one band
-        for i, band in enumerate(range(1, nb_bands + 1)):
-            fake_exps = []
+        for i, band in enumerate(range(1, self.get_nb_bands(inputs) + 1)):
+            expressions = []
             for inp in inputs:
-                fake_exp, corresponding_inputs, nb_channels = super().create_one_input_fake_exp(inp, band,
-                                                                                                keep_logical=True)
-                fake_exps.append(fake_exp)
+                fake_exp, corresp_inputs, nb_channels = super().make_fake_exp(
+                    inp, band, keep_logical=True
+                )
+                expressions.append(fake_exp)
                 # Reference the inputs and nb of channels (only on first pass in the loop to avoid duplicates)
-                if i == 0 and corresponding_inputs and nb_channels:
-                    self.inputs.extend(corresponding_inputs)
+                if i == 0 and corresp_inputs and nb_channels:
+                    self.inputs.extend(corresp_inputs)
                     self.nb_channels.update(nb_channels)
-
             # We create here the "fake" expression. For example, for a BandMathX expression such as 'im1 > im2',
             # the logical fake expression stores the expression "str(input1) > str(input2)"
-            logical_fake_exp = f'({fake_exps[0]} {operator} {fake_exps[1]})'
-
+            logical_fake_exp = f"({expressions[0]} {operator} {expressions[1]})"
             # We keep the logical expression, useful if later combined with other logical operations
             self.logical_fake_exp_bands.append(logical_fake_exp)
             # We create a valid BandMath expression, e.g. "str(input1) > str(input2) ? 1 : 0"
-            fake_exp = f'({logical_fake_exp} ? 1 : 0)'
+            fake_exp = f"({logical_fake_exp} ? 1 : 0)"
             self.fake_exp_bands.append(fake_exp)
 
 
-def get_nbchannels(inp):
+class Input(App):
+    """Class for transforming a filepath to pyotb object.
+
+    Args:
+        filepath: Anything supported by GDAL (local file on the filesystem, remote resource, etc.)
+
+    """
+
+    def __init__(self, filepath: str):
+        """Initialize an ExtractROI OTB app from a filepath, set dtype and store filepath."""
+        super().__init__("ExtractROI", {"in": filepath}, quiet=True, frozen=True)
+        self._name = f"Input from {filepath}"
+        if not filepath.startswith(("/vsi", "http://", "https://", "ftp://")):
+            filepath = Path(filepath)
+        self.filepath = filepath
+        self.propagate_dtype()
+        self.execute()
+
+    def __repr__(self) -> str:
+        """Return a string representation with file path, used in Operation to store file ref."""
+        return f"<pyotb.Input object, from {self.filepath}>"
+
+
+class Output(OTBObject):
+    """Object that behave like a pointer to a specific application in-memory output or file.
+
+    Args:
+        pyotb_app: The pyotb App to store reference from
+        param_key: Output parameter key of the target app
+        filepath: path of the output file (if not memory)
+        mkdir: create missing parent directories
+
+    """
+
+    _filepath: str | Path = None
+
+    @deprecated_alias(app="pyotb_app", output_parameter_key="param_key")
+    def __init__(
+        self,
+        pyotb_app: App,
+        param_key: str = None,
+        filepath: str = None,
+        mkdir: bool = True,
+    ):
+        """Constructor for an Output object, initialized during App.__init__."""
+        self.parent_pyotb_app = pyotb_app  # keep trace of parent app
+        self.param_key = param_key
+        self.filepath = filepath
+        if mkdir and filepath is not None:
+            self.make_parent_dirs()
+
+    @property
+    def name(self) -> str:
+        """Return Output name containing filepath."""
+        return f"Output {self.param_key} from {self.parent_pyotb_app.name}"
+
+    @property
+    def app(self) -> otb.Application:
+        """Reference to the parent pyotb otb.Application instance."""
+        return self.parent_pyotb_app.app
+
+    @property
+    @deprecated_attr(replacement="parent_pyotb_app")
+    def pyotb_app(self) -> App:
+        """Reference to the parent pyotb App (deprecated)."""
+
+    @property
+    def exports_dic(self) -> dict[str, dict]:
+        """Reference to parent _exports_dic object that contains np array exports."""
+        return self.parent_pyotb_app.exports_dic
+
+    @property
+    def output_image_key(self) -> str:
+        """Force the right key to be used when accessing the OTBObject."""
+        return self.param_key
+
+    @property
+    def filepath(self) -> str | Path:
+        """Property to manage output URL."""
+        if self._filepath is None:
+            raise ValueError("Filepath is not set")
+        return self._filepath
+
+    @filepath.setter
+    def filepath(self, path: str):
+        if isinstance(path, str):
+            if path and not path.startswith(("/vsi", "http://", "https://", "ftp://")):
+                path = Path(path.split("?")[0])
+            self._filepath = path
+
+    def exists(self) -> bool:
+        """Check if the output file exist on disk.
+
+        Raises:
+            ValueError: if filepath is not set or is remote URL
+
+        """
+        if not isinstance(self.filepath, Path):
+            raise ValueError("Filepath is not set or points to a remote URL")
+        return self.filepath.exists()
+
+    def make_parent_dirs(self):
+        """Create missing parent directories.
+
+        Raises:
+            ValueError: if filepath is not set or is remote URL
+
+        """
+        if not isinstance(self.filepath, Path):
+            raise ValueError("Filepath is not set or points to a remote URL")
+        self.filepath.parent.mkdir(parents=True, exist_ok=True)
+
+    def write(self, filepath: None | str | Path = None, **kwargs) -> bool:
+        """Write output to disk, filepath is not required if it was provided to parent App during init.
+
+        Args:
+            filepath: path of the output file, can be None if a value was passed during app init
+
+        """
+        if filepath is None:
+            return self.parent_pyotb_app.write(
+                {self.output_image_key: self.filepath}, **kwargs
+            )
+        return self.parent_pyotb_app.write({self.output_image_key: filepath}, **kwargs)
+
+    def __str__(self) -> str:
+        """Return string representation of Output filepath."""
+        return str(self.filepath)
+
+
+def get_nbchannels(inp: str | Path | OTBObject) -> int:
     """Get the nb of bands of input image.
 
     Args:
-        inp: can be filepath or pyotb object
+        inp: input file or OTBObject
 
     Returns:
         number of bands in image
 
+    Raises:
+        TypeError: if inp band count cannot be retrieved
+
     """
-    if isinstance(inp, otbObject):
-        nb_channels = inp.shape[-1]
-    else:
+    if isinstance(inp, OTBObject):
+        return inp.shape[-1]
+    if isinstance(inp, (str, Path)):
         # Executing the app, without printing its log
         try:
             info = App("ReadImageInfo", inp, quiet=True)
-            nb_channels = info.GetParameterInt("numberbands")
-        except Exception as e:  # this happens when we pass a str that is not a filepath
-            raise TypeError(f'Could not get the number of channels of `{inp}`. Not a filepath or wrong filepath') from e
-    return nb_channels
+            return info["numberbands"]
+        except RuntimeError as info_err:  # e.g. file is missing
+            raise TypeError(
+                f"Could not get the number of channels file '{inp}' ({info_err})"
+            ) from info_err
+    raise TypeError(f"Can't read number of channels of type '{type(inp)}' object {inp}")
+
 
+def get_pixel_type(inp: str | Path | OTBObject) -> str:
+    """Get the encoding of input image pixels as integer enum.
 
-def get_pixel_type(inp):
-    """Get the encoding of input image pixels.
+    OTB enum e.g. `otbApplication.ImagePixelType_uint8'.
+    For an OTBObject with several outputs, only the pixel type of the first output is returned
 
     Args:
-        inp: can be filepath or pyotb object
+        inp: input file or OTBObject
 
     Returns:
-        pixel_type: OTB enum e.g. `otbApplication.ImagePixelType_uint8', which actually is an int.
-                    For an App with several outputs, only the pixel type of the first output is returned
+        OTB enum
+
+    Raises:
+        TypeError: if inp pixel type cannot be retrieved
 
     """
-    if isinstance(inp, str):
-        # Executing the app, without printing its log
+    if isinstance(inp, OTBObject):
+        return inp.app.GetParameterOutputImagePixelType(inp.output_image_key)
+    if isinstance(inp, (str, Path)):
         try:
             info = App("ReadImageInfo", inp, quiet=True)
-        except Exception as info_err:  # this happens when we pass a str that is not a filepath
-            raise TypeError(f'Could not get the pixel type of `{inp}`. Not a filepath or wrong filepath') from info_err
-        datatype = info.GetParameterString("datatype")  # which is such as short, float...
-        if not datatype:
-            raise Exception(f'Unable to read pixel type of image {inp}')
-        datatype_to_pixeltype = {'unsigned_char': 'uint8', 'short': 'int16', 'unsigned_short': 'uint16',
-                                 'int': 'int32', 'unsigned_int': 'uint32', 'long': 'int32', 'ulong': 'uint32',
-                                 'float': 'float', 'double': 'double'}
-        pixel_type = datatype_to_pixeltype[datatype]
-        pixel_type = getattr(otb, f'ImagePixelType_{pixel_type}')
-    elif isinstance(inp, (otbObject)):
-        pixel_type = inp.GetParameterOutputImagePixelType(inp.output_param)
-    else:
-        raise TypeError(f'Could not get the pixel type. Not supported type: {inp}')
-
-    return pixel_type
-
-
-def parse_pixel_type(pixel_type):
+            datatype = info["datatype"]  # which is such as short, float...
+        except (
+            RuntimeError
+        ) as info_err:  # this happens when we pass a str that is not a filepath
+            raise TypeError(
+                f"Could not get the pixel type of `{inp}` ({info_err})"
+            ) from info_err
+        if datatype:
+            return parse_pixel_type(datatype)
+    raise TypeError(f"Could not get the pixel type of {type(inp)} object {inp}")
+
+
+def parse_pixel_type(pixel_type: str | int) -> int:
     """Convert one str pixel type to OTB integer enum if necessary.
 
     Args:
-        pixel_type: pixel type. can be str, int or dict
+        pixel_type: pixel type to parse
 
     Returns:
-        pixel_type integer value
+        pixel_type OTB enum integer value
+
+    Raises:
+        KeyError: if pixel_type name is unknown
+        TypeError: if type(pixel_type) isn't int or str
 
     """
-    if isinstance(pixel_type, str):  # this correspond to 'uint8' etc...
-        return getattr(otb, f'ImagePixelType_{pixel_type}')
-    if isinstance(pixel_type, int):
+    if isinstance(pixel_type, int):  # normal OTB int enum
         return pixel_type
-    raise ValueError(f'Bad pixel type specification ({pixel_type})')
+    if isinstance(pixel_type, str):  # correspond to 'uint8' etc...
+        datatype_to_pixeltype = {
+            "unsigned_char": "uint8",
+            "short": "int16",
+            "unsigned_short": "uint16",
+            "int": "int32",
+            "unsigned_int": "uint32",
+            "long": "int32",
+            "ulong": "uint32",
+            "float": "float",
+            "double": "double",
+        }
+        if pixel_type in datatype_to_pixeltype.values():
+            return getattr(otb, f"ImagePixelType_{pixel_type}")
+        if pixel_type in datatype_to_pixeltype:
+            return getattr(otb, f"ImagePixelType_{datatype_to_pixeltype[pixel_type]}")
+        raise KeyError(
+            f"Unknown dtype `{pixel_type}`. Available ones: {datatype_to_pixeltype}"
+        )
+    raise TypeError(
+        f"Bad pixel type specification ({pixel_type} of type {type(pixel_type)})"
+    )
+
+
+def get_out_images_param_keys(otb_app: otb.Application) -> list[str]:
+    """Return every output parameter keys of a bare OTB app."""
+    return [
+        key
+        for key in otb_app.GetParametersKeys()
+        if otb_app.GetParameterType(key) == otb.ParameterType_OutputImage
+    ]
+
+
+def summarize(
+    obj: App | Output | str | float | list,
+    strip_inpath: bool = False,
+    strip_outpath: bool = False,
+) -> dict[str, dict | Any] | str | float | list:
+    """Recursively summarize parameters of an App or Output object and its parents.
+
+    At the deepest recursion level, this function just return any parameter value,
+     path stripped if needed, or app summarized in case of a pipeline.
+    If strip_path is enabled, paths are truncated after the first "?" character.
+    Can be useful to remove URLs tokens from inputs (e.g. SAS or S3 credentials),
+     or extended filenames from outputs.
+
+    Args:
+        obj: input object / parameter value to summarize
+        strip_inpath: strip all input paths
+        strip_outpath: strip all output paths
+
+    Returns:
+        nested dictionary containing name and parameters of an app and its parents
+
+    """
+    if isinstance(obj, list):
+        return [summarize(o) for o in obj]
+    if isinstance(obj, Output):
+        return summarize(obj.parent_pyotb_app)
+    # => This is the deepest recursion level
+    if not isinstance(obj, App):
+        return obj
+
+    def strip_path(param: str | Any):
+        if isinstance(param, list):
+            return [strip_path(p) for p in param]
+        if not isinstance(param, str):
+            return summarize(param)
+        return param.split("?")[0]
+
+    # Call / top level of recursion : obj is an App
+    parameters = {}
+    # We need to return parameters values, summarized if param is an App
+    for key, param in obj.parameters.items():
+        if strip_inpath and obj.is_input(key) or strip_outpath and obj.is_output(key):
+            parameters[key] = strip_path(param)
+        else:
+            parameters[key] = summarize(param)
+    return {"name": obj.app.GetName(), "parameters": parameters}
diff --git a/pyotb/depreciation.py b/pyotb/depreciation.py
new file mode 100644
index 0000000000000000000000000000000000000000..6794373dbf621315a9d3eaaa6263efe532d2ca23
--- /dev/null
+++ b/pyotb/depreciation.py
@@ -0,0 +1,114 @@
+"""Helps with deprecated classes and methods.
+
+Taken from https://stackoverflow.com/questions/49802412/how-to-implement-deprecation-in-python-with-argument-alias
+"""
+from typing import Callable, Dict, Any
+import functools
+import warnings
+
+
+WARN = "\033[91m"
+ENDC = "\033[0m"
+OKAY = "\033[92m"
+
+
+def depreciation_warning(message: str):
+    """Shows a warning message.
+
+    Args:
+        message: message to log
+
+    """
+    warnings.warn(
+        message=message,
+        category=DeprecationWarning,
+        stacklevel=3,
+    )
+
+
+def deprecated_alias(**aliases: str) -> Callable:
+    """Decorator for deprecated function and method arguments.
+
+    Use as follows:
+
+    @deprecated_alias(old_arg='new_arg')
+    def myfunc(new_arg):
+        ...
+
+    Args:
+        **aliases: aliases
+
+    Returns:
+        wrapped function
+
+    """
+
+    def deco(func: Callable):
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            rename_kwargs(func.__name__, kwargs, aliases)
+            return func(*args, **kwargs)
+
+        return wrapper
+
+    return deco
+
+
+def rename_kwargs(func_name: str, kwargs: Dict[str, Any], aliases: Dict[str, str]):
+    """Helper function for deprecating function arguments.
+
+    Args:
+        func_name: function
+        kwargs: keyword args
+        aliases: aliases
+
+    Raises:
+        ValueError: if both old and new arguments are provided
+
+    """
+    for alias, new in aliases.items():
+        if alias in kwargs:
+            if new in kwargs:
+                raise ValueError(
+                    f"{func_name} received both {alias} and {new} as arguments!"
+                    f" {alias} is deprecated, use {new} instead."
+                )
+            message = (
+                f"{WARN}`{alias}`{ENDC} is deprecated as an argument to "
+                f"`{func_name}`; use {OKAY}`{new}`{ENDC} instead."
+            )
+            depreciation_warning(message)
+            kwargs[new] = kwargs.pop(alias)
+
+
+def deprecated_attr(replacement: str) -> Callable:
+    """Decorator for deprecated attr.
+
+    Use as follows:
+
+    @deprecated_attr(replacement='new_attr')
+    def old_attr(...):
+        ...
+
+    Args:
+        replacement: name of the new attr (method or attribute)
+
+    Returns:
+        wrapped function
+
+    """
+
+    def deco(attr: Any):
+        @functools.wraps(attr)
+        def wrapper(self, *args, **kwargs):
+            depreciation_warning(
+                f"{WARN}`{attr.__name__}`{ENDC} will be removed in future "
+                f"releases. Please replace {WARN}`{attr.__name__}`{ENDC} with "
+                f"{OKAY}`{replacement}`{ENDC}."
+            )
+            out = getattr(self, replacement)
+            return out(*args, **kwargs) if isinstance(out, Callable) else out
+
+        return wrapper
+
+    return deco
diff --git a/pyotb/functions.py b/pyotb/functions.py
index eee401a3ba6638df6e86f9efe96cc8238c9a4655..fe93641f663dff43f45b749a922a8feb8eaa31d7 100644
--- a/pyotb/functions.py
+++ b/pyotb/functions.py
@@ -1,28 +1,35 @@
-# -*- coding: utf-8 -*-
 """This module provides a set of functions for pyotb."""
+from __future__ import annotations
+
 import inspect
 import os
+import subprocess
 import sys
 import textwrap
 import uuid
 from collections import Counter
+from pathlib import Path
 
-from .core import (otbObject, App, Input, Operation, logicalOperation, get_nbchannels)
+from .core import App, Input, LogicalOperation, Operation, get_nbchannels
 from .helpers import logger
 
 
-def where(cond, x, y):
+def where(cond: App | str, x: App | str | float, y: App | str | float) -> Operation:
     """Functionally similar to numpy.where. Where cond is True (!=0), returns x. Else returns y.
 
+    If cond is monoband whereas x or y are multiband, cond channels are expanded to match x & y ones.
+
     Args:
-        cond: condition, must be a raster (filepath, App, Operation...). If cond is monoband whereas x or y are
-              multiband, cond channels are expanded to match x & y ones.
-        x: value if cond is True. Can be float, int, App, filepath, Operation...
-        y: value if cond is False. Can be float, int, App, filepath, Operation...
+        cond: condition, must be a raster (filepath, App, Operation...).
+        x: value if cond is True. Can be: float, int, App, filepath, Operation...
+        y: value if cond is False. Can be: float, int, App, filepath, Operation...
 
     Returns:
         an output where pixels are x if cond is True, else y
 
+    Raises:
+        ValueError: if x and y have different number of bands
+
     """
     # Checking the number of bands of rasters. Several cases :
     # - if cond is monoband, x and y can be multibands. Then cond will adapt to match x and y nb of bands
@@ -32,62 +39,61 @@ def where(cond, x, y):
         x_nb_channels = get_nbchannels(x)
     if not isinstance(y, (int, float)):
         y_nb_channels = get_nbchannels(y)
-
     if x_nb_channels and y_nb_channels:
         if x_nb_channels != y_nb_channels:
-            raise ValueError('X and Y images do not have the same number of bands. '
-                             f'X has {x_nb_channels} bands whereas Y has {y_nb_channels} bands')
+            raise ValueError(
+                "X and Y images do not have the same number of bands. "
+                f"X has {x_nb_channels} bands whereas Y has {y_nb_channels} bands"
+            )
 
     x_or_y_nb_channels = x_nb_channels if x_nb_channels else y_nb_channels
     cond_nb_channels = get_nbchannels(cond)
-
-    # Get the number of bands of the result
-    if x_or_y_nb_channels:  # if X or Y is a raster
-        out_nb_channels = x_or_y_nb_channels
-    else:  # if only cond is a raster
-        out_nb_channels = cond_nb_channels
-
-    if cond_nb_channels != 1 and x_or_y_nb_channels and cond_nb_channels != x_or_y_nb_channels:
-        raise ValueError('Condition and X&Y do not have the same number of bands. Condition has '
-                         f'{cond_nb_channels} bands whereas X&Y have {x_or_y_nb_channels} bands')
-
+    if (
+        cond_nb_channels != 1
+        and x_or_y_nb_channels
+        and cond_nb_channels != x_or_y_nb_channels
+    ):
+        raise ValueError(
+            "Condition and X&Y do not have the same number of bands. Condition has "
+            f"{cond_nb_channels} bands whereas X&Y have {x_or_y_nb_channels} bands"
+        )
     # If needed, duplicate the single band binary mask to multiband to match the dimensions of x & y
     if cond_nb_channels == 1 and x_or_y_nb_channels and x_or_y_nb_channels != 1:
-        logger.info('The condition has one channel whereas X/Y has/have %s channels. Expanding number'
-                    ' of channels of condition to match the number of channels of X/Y', x_or_y_nb_channels)
-
-    operation = Operation('?', cond, x, y, nb_bands=out_nb_channels)
+        logger.info(
+            "The condition has one channel whereas X/Y has/have %s channels. Expanding number"
+            " of channels of condition to match the number of channels of X/Y",
+            x_or_y_nb_channels,
+        )
+    # Get the number of bands of the result
+    out_nb_channels = x_or_y_nb_channels or cond_nb_channels
 
-    return operation
+    return Operation("?", cond, x, y, nb_bands=out_nb_channels)
 
 
-def clip(a, a_min, a_max):
+def clip(image: App | str, v_min: App | str | float, v_max: App | str | float):
     """Clip values of image in a range of values.
 
     Args:
-        a: input raster, can be filepath or any pyotb object
-        a_min: minimum value of the range
-        a_max: maximum value of the range
+        image: input raster, can be filepath or any pyotb object
+        v_min: minimum value of the range
+        v_max: maximum value of the range
 
     Returns:
         raster whose values are clipped in the range
 
     """
-    if isinstance(a, str):
-        a = Input(a)
-
-    res = where(a <= a_min, a_min,
-                where(a >= a_max, a_max, a))
-    return res
+    if isinstance(image, (str, Path)):
+        image = Input(image)
+    return where(image <= v_min, v_min, where(image >= v_max, v_max, image))
 
 
 def all(*inputs):  # pylint: disable=redefined-builtin
     """Check if value is different than 0 everywhere along the band axis.
 
-    For only one image, this function checks that all bands of the image are True (i.e. !=0) and outputs
-    a singleband boolean raster
+    For only one image, this function checks that all bands of the image are True (i.e. !=0)
+     and outputs a singleband boolean raster
     For several images, this function checks that all images are True (i.e. !=0) and outputs
-    a boolean raster, with as many bands as the inputs
+     a boolean raster, with as many bands as the inputs
 
     Args:
         inputs: inputs can be 1) a single image or 2) several images, either passed as separate arguments
@@ -100,40 +106,40 @@ def all(*inputs):  # pylint: disable=redefined-builtin
     # If necessary, flatten inputs
     if len(inputs) == 1 and isinstance(inputs[0], (list, tuple)):
         inputs = inputs[0]
-
     # Add support for generator inputs (to have the same behavior as built-in `all` function)
-    if isinstance(inputs, tuple) and len(inputs) == 1 and inspect.isgenerator(inputs[0]):
+    if (
+        isinstance(inputs, tuple)
+        and len(inputs) == 1
+        and inspect.isgenerator(inputs[0])
+    ):
         inputs = list(inputs[0])
-
     # Transforming potential filepaths to pyotb objects
     inputs = [Input(inp) if isinstance(inp, str) else inp for inp in inputs]
 
     # Checking that all bands of the single image are True
     if len(inputs) == 1:
         inp = inputs[0]
-        if isinstance(inp, logicalOperation):
+        if isinstance(inp, LogicalOperation):
             res = inp[:, :, 0]
         else:
-            res = (inp[:, :, 0] != 0)
-
+            res = inp[:, :, 0] != 0
         for band in range(1, inp.shape[-1]):
-            if isinstance(inp, logicalOperation):
+            if isinstance(inp, LogicalOperation):
                 res = res & inp[:, :, band]
             else:
                 res = res & (inp[:, :, band] != 0)
+        return res
 
     # Checking that all images are True
+    if isinstance(inputs[0], LogicalOperation):
+        res = inputs[0]
     else:
-        if isinstance(inputs[0], logicalOperation):
-            res = inputs[0]
+        res = inputs[0] != 0
+    for inp in inputs[1:]:
+        if isinstance(inp, LogicalOperation):
+            res = res & inp
         else:
-            res = (inputs[0] != 0)
-        for inp in inputs[1:]:
-            if isinstance(inp, logicalOperation):
-                res = res & inp
-            else:
-                res = res & (inp != 0)
-
+            res = res & (inp != 0)
     return res
 
 
@@ -148,6 +154,7 @@ def any(*inputs):  # pylint: disable=redefined-builtin
     Args:
         inputs: inputs can be 1) a single image or 2) several images, either passed as separate arguments
                 or inside a list
+
     Returns:
         OR intersection
 
@@ -155,40 +162,41 @@ def any(*inputs):  # pylint: disable=redefined-builtin
     # If necessary, flatten inputs
     if len(inputs) == 1 and isinstance(inputs[0], (list, tuple)):
         inputs = inputs[0]
-
     # Add support for generator inputs (to have the same behavior as built-in `any` function)
-    if isinstance(inputs, tuple) and len(inputs) == 1 and inspect.isgenerator(inputs[0]):
+    if (
+        isinstance(inputs, tuple)
+        and len(inputs) == 1
+        and inspect.isgenerator(inputs[0])
+    ):
         inputs = list(inputs[0])
-
     # Transforming potential filepaths to pyotb objects
     inputs = [Input(inp) if isinstance(inp, str) else inp for inp in inputs]
 
     # Checking that at least one band of the image is True
     if len(inputs) == 1:
         inp = inputs[0]
-        if isinstance(inp, logicalOperation):
+        if isinstance(inp, LogicalOperation):
             res = inp[:, :, 0]
         else:
-            res = (inp[:, :, 0] != 0)
+            res = inp[:, :, 0] != 0
 
         for band in range(1, inp.shape[-1]):
-            if isinstance(inp, logicalOperation):
+            if isinstance(inp, LogicalOperation):
                 res = res | inp[:, :, band]
             else:
                 res = res | (inp[:, :, band] != 0)
+        return res
 
     # Checking that at least one image is True
+    if isinstance(inputs[0], LogicalOperation):
+        res = inputs[0]
     else:
-        if isinstance(inputs[0], logicalOperation):
-            res = inputs[0]
+        res = inputs[0] != 0
+    for inp in inputs[1:]:
+        if isinstance(inp, LogicalOperation):
+            res = res | inp
         else:
-            res = (inputs[0] != 0)
-        for inp in inputs[1:]:
-            if isinstance(inp, logicalOperation):
-                res = res | inp
-            else:
-                res = res | (inp != 0)
-
+            res = res | (inp != 0)
     return res
 
 
@@ -212,13 +220,19 @@ def run_tf_function(func):
     Returns:
         wrapper: a function that returns a pyotb object
 
+    Raises:
+        SystemError: if OTBTF apps are missing
+
     """
     try:
-        from .apps import TensorflowModelServe
-    except ImportError:
-        logger.error('Could not run Tensorflow function: failed to import TensorflowModelServe.'
-                     'Check that you have OTBTF configured (https://github.com/remicres/otbtf#how-to-install)')
-        raise
+        from .apps import (  # pylint: disable=import-outside-toplevel
+            TensorflowModelServe,
+        )
+    except ImportError as err:
+        raise SystemError(
+            "Could not run Tensorflow function: failed to import TensorflowModelServe."
+            "Check that you have OTBTF configured (https://github.com/remicres/otbtf#how-to-install)"
+        ) from err
 
     def get_tf_pycmd(output_dir, channels, scalar_inputs):
         """Create a string containing all python instructions necessary to create and save the Keras model.
@@ -233,14 +247,14 @@ def run_tf_function(func):
 
         """
         # Getting the string definition of the tf function (e.g. "def multiply(x1, x2):...")
-        # TODO: maybe not entirely foolproof, maybe we should use dill instead? but it would add a dependency
+        # Maybe not entirely foolproof, maybe we should use dill instead? but it would add a dependency
         func_def_str = inspect.getsource(func)
         func_name = func.__name__
 
         create_and_save_model_str = func_def_str
-
         # Adding the instructions to create the model and save it to output dir
-        create_and_save_model_str += textwrap.dedent(f"""
+        create_and_save_model_str += textwrap.dedent(
+            f"""
             import tensorflow as tf
 
             model_inputs = []
@@ -260,11 +274,12 @@ def run_tf_function(func):
             # Create and save the .pb model
             model = tf.keras.Model(inputs=model_inputs, outputs=output)
             model.save("{output_dir}")
-            """)
+            """
+        )
 
         return create_and_save_model_str
 
-    def wrapper(*inputs, tmp_dir='/tmp'):
+    def wrapper(*inputs, tmp_dir="/tmp"):
         """For the user point of view, this function simply applies some TensorFlow operations to some rasters.
 
         Implicitly, it saves a .pb model that describe the TF operations, then creates an OTB ModelServe application
@@ -284,45 +299,64 @@ def run_tf_function(func):
         raster_inputs = []
         for inp in inputs:
             try:
-                # this is for raster input
+                # This is for raster input
                 channel = get_nbchannels(inp)
                 channels.append(channel)
                 scalar_inputs.append(None)
                 raster_inputs.append(inp)
             except TypeError:
-                # this is for other inputs (float, int)
+                # This is for other inputs (float, int)
                 channels.append(None)
                 scalar_inputs.append(inp)
 
         # Create and save the model. This is executed **inside an independent process** because (as of 2022-03),
         # tensorflow python library and OTBTF are incompatible
-        out_savedmodel = os.path.join(tmp_dir, f'tmp_otbtf_model_{uuid.uuid4()}')
+        out_savedmodel = os.path.join(tmp_dir, f"tmp_otbtf_model_{uuid.uuid4()}")
         pycmd = get_tf_pycmd(out_savedmodel, channels, scalar_inputs)
         cmd_args = [sys.executable, "-c", pycmd]
+        # TODO: remove subprocess execution since this issues has been fixed with OTBTF 4.0
         try:
-            import subprocess
-            subprocess.run(cmd_args, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
+            subprocess.run(
+                cmd_args,
+                env=os.environ,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                check=True,
+            )
         except subprocess.SubprocessError:
             logger.debug("Failed to call subprocess")
         if not os.path.isdir(out_savedmodel):
             logger.info("Failed to save the model")
 
         # Initialize the OTBTF model serving application
-        model_serve = TensorflowModelServe({'model.dir': out_savedmodel, 'optim.disabletiling': 'on',
-                                            'model.fullyconv': 'on'}, n_sources=len(raster_inputs), frozen=True)
+        model_serve = TensorflowModelServe(
+            {
+                "model.dir": out_savedmodel,
+                "optim.disabletiling": "on",
+                "model.fullyconv": "on",
+            },
+            n_sources=len(raster_inputs),
+            frozen=True,
+        )
         # Set parameters and execute
         for i, inp in enumerate(raster_inputs):
-            model_serve.set_parameters({f'source{i + 1}.il': [inp]})
+            model_serve.set_parameters({f"source{i + 1}.il": [inp]})
         model_serve.execute()
-        # TODO: handle the deletion of the temporary model ?
+        # Possible ENH: handle the deletion of the temporary model ?
 
         return model_serve
 
     return wrapper
 
 
-def define_processing_area(*args, window_rule='intersection', pixel_size_rule='minimal', interpolator='nn',
-                           reference_window_input=None, reference_pixel_size_input=None):
+def define_processing_area(
+    *args,
+    window_rule: str = "intersection",
+    pixel_size_rule: str = "minimal",
+    interpolator: str = "nn",
+    reference_window_input: dict = None,
+    reference_pixel_size_input: str = None,
+) -> list[App]:
     """Given several inputs, this function handles the potential resampling and cropping to same extent.
 
     WARNING: Not fully implemented / tested
@@ -346,142 +380,168 @@ def define_processing_area(*args, window_rule='intersection', pixel_size_rule='m
             inputs.extend(arg)
         else:
             inputs.append(arg)
-
     # Getting metadatas of inputs
     metadatas = {}
     for inp in inputs:
         if isinstance(inp, str):  # this is for filepaths
-            metadata = Input(inp).GetImageMetaData('out')
-        elif isinstance(inp, otbObject):
-            metadata = inp.GetImageMetaData(inp.output_param)
+            metadata = Input(inp).app.GetImageMetaData("out")
+        elif isinstance(inp, App):
+            metadata = inp.app.GetImageMetaData(inp.output_param)
         else:
             raise TypeError(f"Wrong input : {inp}")
         metadatas[inp] = metadata
 
     # Get a metadata of an arbitrary image. This is just to compare later with other images
     any_metadata = next(iter(metadatas.values()))
-
     # Checking if all images have the same projection
-    if not all(metadata['ProjectionRef'] == any_metadata['ProjectionRef']
-               for metadata in metadatas.values()):
-        logger.warning('All images may not have the same CRS, which might cause unpredictable results')
+    if not all(
+        metadata["ProjectionRef"] == any_metadata["ProjectionRef"]
+        for metadata in metadatas.values()
+    ):
+        logger.warning(
+            "All images may not have the same CRS, which might cause unpredictable results"
+        )
 
     # Handling different spatial footprints
-    # TODO: there seems to have a bug, ImageMetaData is not updated when running an app,
+    # TODO: find possible bug - ImageMetaData is not updated when running an app
     #  cf https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/issues/2234. Should we use ImageOrigin instead?
-    if not all(metadata['UpperLeftCorner'] == any_metadata['UpperLeftCorner']
-               and metadata['LowerRightCorner'] == any_metadata['LowerRightCorner']
-               for metadata in metadatas.values()):
+    if not all(
+        md["UpperLeftCorner"] == any_metadata["UpperLeftCorner"]
+        and md["LowerRightCorner"] == any_metadata["LowerRightCorner"]
+        for md in metadatas.values()
+    ):
         # Retrieving the bounding box that will be common for all inputs
-        if window_rule == 'intersection':
+        if window_rule == "intersection":
             # The coordinates depend on the orientation of the axis of projection
-            if any_metadata['GeoTransform'][1] >= 0:
-                ulx = max(metadata['UpperLeftCorner'][0] for metadata in metadatas.values())
-                lrx = min(metadata['LowerRightCorner'][0] for metadata in metadatas.values())
+            if any_metadata["GeoTransform"][1] >= 0:
+                ulx = max(md["UpperLeftCorner"][0] for md in metadatas.values())
+                lrx = min(md["LowerRightCorner"][0] for md in metadatas.values())
             else:
-                ulx = min(metadata['UpperLeftCorner'][0] for metadata in metadatas.values())
-                lrx = max(metadata['LowerRightCorner'][0] for metadata in metadatas.values())
-            if any_metadata['GeoTransform'][-1] >= 0:
-                lry = min(metadata['LowerRightCorner'][1] for metadata in metadatas.values())
-                uly = max(metadata['UpperLeftCorner'][1] for metadata in metadatas.values())
+                ulx = min(md["UpperLeftCorner"][0] for md in metadatas.values())
+                lrx = max(md["LowerRightCorner"][0] for md in metadatas.values())
+            if any_metadata["GeoTransform"][-1] >= 0:
+                lry = min(md["LowerRightCorner"][1] for md in metadatas.values())
+                uly = max(md["UpperLeftCorner"][1] for md in metadatas.values())
             else:
-                lry = max(metadata['LowerRightCorner'][1] for metadata in metadatas.values())
-                uly = min(metadata['UpperLeftCorner'][1] for metadata in metadatas.values())
-
-        elif window_rule == 'same_as_input':
-            ulx = metadatas[reference_window_input]['UpperLeftCorner'][0]
-            lrx = metadatas[reference_window_input]['LowerRightCorner'][0]
-            lry = metadatas[reference_window_input]['LowerRightCorner'][1]
-            uly = metadatas[reference_window_input]['UpperLeftCorner'][1]
-        elif window_rule == 'specify':
-            pass
-            # TODO : it is when the user explicitly specifies the bounding box -> add some arguments in the function
-        elif window_rule == 'union':
-            pass
-            # TODO : it is when the user wants the final bounding box to be the union of all bounding box
+                lry = max(md["LowerRightCorner"][1] for md in metadatas.values())
+                uly = min(md["UpperLeftCorner"][1] for md in metadatas.values())
+
+        elif window_rule == "same_as_input":
+            ulx = metadatas[reference_window_input]["UpperLeftCorner"][0]
+            lrx = metadatas[reference_window_input]["LowerRightCorner"][0]
+            lry = metadatas[reference_window_input]["LowerRightCorner"][1]
+            uly = metadatas[reference_window_input]["UpperLeftCorner"][1]
+        elif window_rule == "specify":
+            # When the user explicitly specifies the bounding box -> add some arguments in the function
+            ...
+        elif window_rule == "union":
+            # When the user wants the final bounding box to be the union of all bounding box
             #  It should replace any 'outside' pixel by some NoData -> add `fillvalue` argument in the function
-
-        logger.info('Cropping all images to extent Upper Left (%s, %s), Lower Right (%s, %s)', ulx, uly, lrx, lry)
+            ...
 
         # Applying this bounding box to all inputs
+        bounds = (ulx, uly, lrx, lry)
+        logger.info(
+            "Cropping all images to extent Upper Left (%s, %s), Lower Right (%s, %s)",
+            *bounds,
+        )
         new_inputs = []
         for inp in inputs:
             try:
                 params = {
-                    'in': inp, 'mode': 'extent', 'mode.extent.unit': 'phy',
-                    'mode.extent.ulx': ulx, 'mode.extent.uly': lry,  # bug in OTB <= 7.3 :
-                    'mode.extent.lrx': lrx, 'mode.extent.lry': uly,  # ULY/LRY are inverted
+                    "in": inp,
+                    "mode": "extent",
+                    "mode.extent.unit": "phy",
+                    "mode.extent.ulx": ulx,
+                    "mode.extent.uly": uly,
+                    "mode.extent.lrx": lrx,
+                    "mode.extent.lry": lry,
                 }
-                new_input = App('ExtractROI', params)
-                # TODO: OTB 7.4 fixes this bug, how to handle different versions of OTB?
+                new_input = App("ExtractROI", params, quiet=True)
                 new_inputs.append(new_input)
                 # Potentially update the reference inputs for later resampling
-                if str(inp) == str(reference_pixel_size_input):  # we use comparison of string because calling '=='
+                if str(inp) == str(reference_pixel_size_input):
+                    # We use comparison of string because calling '=='
                     # on pyotb objects implicitly calls BandMathX application, which is not desirable
                     reference_pixel_size_input = new_input
-            except RuntimeError as e:
-                logger.error('Cannot define the processing area for input %s: %s', inp, e)
-                raise
+            except RuntimeError as err:
+                raise ValueError(
+                    f"Cannot define the processing area for input {inp}"
+                ) from err
         inputs = new_inputs
-
         # Update metadatas
-        metadatas = {input: input.GetImageMetaData('out') for input in inputs}
+        metadatas = {input: input.app.GetImageMetaData("out") for input in inputs}
 
     # Get a metadata of an arbitrary image. This is just to compare later with other images
     any_metadata = next(iter(metadatas.values()))
-
     # Handling different pixel sizes
-    if not all(metadata['GeoTransform'][1] == any_metadata['GeoTransform'][1]
-               and metadata['GeoTransform'][5] == any_metadata['GeoTransform'][5]
-               for metadata in metadatas.values()):
+    if not all(
+        md["GeoTransform"][1] == any_metadata["GeoTransform"][1]
+        and md["GeoTransform"][5] == any_metadata["GeoTransform"][5]
+        for md in metadatas.values()
+    ):
         # Retrieving the pixel size that will be common for all inputs
-        if pixel_size_rule == 'minimal':
+        if pixel_size_rule == "minimal":
             # selecting the input with the smallest x pixel size
-            reference_input = min(metadatas, key=lambda x: metadatas[x]['GeoTransform'][1])
-        if pixel_size_rule == 'maximal':
+            reference_input = min(
+                metadatas, key=lambda x: metadatas[x]["GeoTransform"][1]
+            )
+        if pixel_size_rule == "maximal":
             # selecting the input with the highest x pixel size
-            reference_input = max(metadatas, key=lambda x: metadatas[x]['GeoTransform'][1])
-        elif pixel_size_rule == 'same_as_input':
+            reference_input = max(
+                metadatas, key=lambda x: metadatas[x]["GeoTransform"][1]
+            )
+        elif pixel_size_rule == "same_as_input":
             reference_input = reference_pixel_size_input
-        elif pixel_size_rule == 'specify':
-            pass
-            # TODO : when the user explicitly specify the pixel size -> add argument inside the function
-        pixel_size = metadatas[reference_input]['GeoTransform'][1]
-        logger.info('Resampling all inputs to resolution: %s', pixel_size)
+        elif pixel_size_rule == "specify":
+            # When the user explicitly specify the pixel size -> add argument inside the function
+            ...
+
+        pixel_size = metadatas[reference_input]["GeoTransform"][1]
 
         # Perform resampling on inputs that do not comply with the target pixel size
+        logger.info("Resampling all inputs to resolution: %s", pixel_size)
         new_inputs = []
         for inp in inputs:
-            if metadatas[inp]['GeoTransform'][1] != pixel_size:
-                superimposed = App('Superimpose', inr=reference_input, inm=inp, interpolator=interpolator)
+            if metadatas[inp]["GeoTransform"][1] != pixel_size:
+                superimposed = App(
+                    "Superimpose",
+                    inr=reference_input,
+                    inm=inp,
+                    interpolator=interpolator,
+                )
                 new_inputs.append(superimposed)
             else:
                 new_inputs.append(inp)
         inputs = new_inputs
-
-        # Update metadatas
-        metadatas = {inp: inp.GetImageMetaData('out') for inp in inputs}
+        metadatas = {inp: inp.app.GetImageMetaData("out") for inp in inputs}
 
     # Final superimposition to be sure to have the exact same image sizes
-    # Getting the sizes of images
     image_sizes = {}
     for inp in inputs:
         if isinstance(inp, str):
             inp = Input(inp)
         image_sizes[inp] = inp.shape[:2]
-
     # Selecting the most frequent image size. It will be used as reference.
     most_common_image_size, _ = Counter(image_sizes.values()).most_common(1)[0]
-    same_size_images = [inp for inp, image_size in image_sizes.items() if image_size == most_common_image_size]
+    same_size_images = [
+        inp
+        for inp, image_size in image_sizes.items()
+        if image_size == most_common_image_size
+    ]
 
     # Superimposition for images that do not have the same size as the others
     new_inputs = []
     for inp in inputs:
         if image_sizes[inp] != most_common_image_size:
-            superimposed = App('Superimpose', inr=same_size_images[0], inm=inp, interpolator=interpolator)
+            superimposed = App(
+                "Superimpose",
+                inr=same_size_images[0],
+                inm=inp,
+                interpolator=interpolator,
+            )
             new_inputs.append(superimposed)
         else:
             new_inputs.append(inp)
-    inputs = new_inputs
 
-    return inputs
+    return new_inputs
diff --git a/pyotb/helpers.py b/pyotb/helpers.py
index 742fee18be4bfbf6951d8e754cb102d470b16b4a..0e6ea2a06eee8b1b7e8cb7502d03b95861c4ef7f 100644
--- a/pyotb/helpers.py
+++ b/pyotb/helpers.py
@@ -1,24 +1,30 @@
-# -*- coding: utf-8 -*-
-"""This module provides some helpers to properly initialize pyotb."""
+"""This module ensure we properly initialize pyotb, or raise SystemExit in case of broken install."""
+import logging
 import os
 import sys
-import logging
+import sysconfig
 from pathlib import Path
 from shutil import which
 
+from .install import install_otb, interactive_config
 
 # Allow user to switch between OTB directories without setting every env variable
 OTB_ROOT = os.environ.get("OTB_ROOT")
+DOCS_URL = "https://www.orfeo-toolbox.org/CookBook/Installation.html"
 
 # Logging
 # User can also get logger with `logging.getLogger("pyOTB")`
 # then use pyotb.set_logger_level() to adjust logger verbosity
-logger = logging.getLogger("pyOTB")
+logger = logging.getLogger("pyotb")
 logger_handler = logging.StreamHandler(sys.stdout)
-formatter = logging.Formatter(fmt="%(asctime)s (%(levelname)-4s) [pyOTB] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
+formatter = logging.Formatter(
+    fmt="%(asctime)s (%(levelname)-4s) [pyotb] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
+)
 logger_handler.setFormatter(formatter)
-# Search for PYOTB_LOGGER_LEVEL, else use OTB_LOGGER_LEVEL as pyOTB level, or fallback to INFO
-LOG_LEVEL = os.environ.get("PYOTB_LOGGER_LEVEL") or os.environ.get("OTB_LOGGER_LEVEL") or "INFO"
+# Search for PYOTB_LOGGER_LEVEL, else use OTB_LOGGER_LEVEL as pyotb level, or fallback to INFO
+LOG_LEVEL = (
+    os.environ.get("PYOTB_LOGGER_LEVEL") or os.environ.get("OTB_LOGGER_LEVEL") or "INFO"
+)
 logger.setLevel(getattr(logging, LOG_LEVEL))
 # Here it would be possible to use a different level for a specific handler
 # A more verbose one can go to text file while print only errors to stdout
@@ -26,7 +32,7 @@ logger_handler.setLevel(getattr(logging, LOG_LEVEL))
 logger.addHandler(logger_handler)
 
 
-def set_logger_level(level):
+def set_logger_level(level: str):
     """Allow user to change the current logging level.
 
     Args:
@@ -36,22 +42,28 @@ def set_logger_level(level):
     logger_handler.setLevel(getattr(logging, level))
 
 
-def find_otb(prefix=OTB_ROOT, scan=True, scan_userdir=True):
-    """Try to load OTB bindings or scan system, help user in case of failure, set env variables.
+def find_otb(prefix: str = OTB_ROOT, scan: bool = True):
+    """Try to load OTB bindings or scan system, help user in case of failure, set env.
 
-    Path precedence :                                OTB_ROOT > python bindings directory
-        OR search for releases installations    :    HOME
-        OR (for linux)                          :    /opt/otbtf > /opt/otb > /usr/local > /usr
-        OR (for windows)                        :    C:/Program Files
+    If in interactive prompt, user will be asked if he wants to install OTB.
+    The OTB_ROOT variable allow one to override default OTB version, with auto env setting.
+    Path precedence : $OTB_ROOT > location of python bindings location
+    Then, if OTB is not found:
+        search for releases installations: $HOME/Applications
+        OR (for Linux): /opt/otbtf > /opt/otb > /usr/local > /usr
+        OR (for Windows): C:/Program Files
 
     Args:
         prefix: prefix to search OTB in (Default value = OTB_ROOT)
         scan: find otb in system known locations (Default value = True)
-        scan_userdir: search for OTB release in user's home directory (Default value = True)
 
     Returns:
         otbApplication module
 
+    Raises:
+        SystemError: is OTB is not found (when using interactive mode)
+        SystemExit: if OTB is not found, since pyotb won't be usable
+
     """
     otb = None
     # Try OTB_ROOT env variable first (allow override default OTB version)
@@ -59,51 +71,67 @@ def find_otb(prefix=OTB_ROOT, scan=True, scan_userdir=True):
         try:
             set_environment(prefix)
             import otbApplication as otb  # pylint: disable=import-outside-toplevel
+
             return otb
-        except EnvironmentError as e:
+        except SystemError as e:
             raise SystemExit(f"Failed to import OTB with prefix={prefix}") from e
         except ImportError as e:
             __suggest_fix_import(str(e), prefix)
             raise SystemExit("Failed to import OTB. Exiting.") from e
     # Else try import from actual Python path
     try:
-        # Here, we can't properly set env variables before OTB import. We assume user did this before running python
+        # Here, we can't properly set env variables before OTB import.
+        # We assume user did this before running python
         # For LD_LIBRARY_PATH problems, use OTB_ROOT instead of PYTHONPATH
         import otbApplication as otb  # pylint: disable=import-outside-toplevel
+
         if "OTB_APPLICATION_PATH" not in os.environ:
             lib_dir = __find_lib(otb_module=otb)
             apps_path = __find_apps_path(lib_dir)
             otb.Registry.SetApplicationPath(apps_path)
         return otb
     except ImportError as e:
-        PYTHONPATH = os.environ.get("PYTHONPATH")
+        pythonpath = os.environ.get("PYTHONPATH")
         if not scan:
-            raise SystemExit(f"Failed to import OTB with env PYTHONPATH={PYTHONPATH}") from e
+            raise SystemExit(
+                f"Failed to import OTB with env PYTHONPATH={pythonpath}"
+            ) from e
     # Else search system
     logger.info("Failed to import OTB. Searching for it...")
-    prefix = __find_otb_root(scan_userdir)
-    # Try to import one last time before raising error
+    prefix = __find_otb_root()
+    # Try auto install if shell is interactive
+    if not prefix and hasattr(sys, "ps1"):
+        if input("OTB is missing. Do you want to install it ? (y/n): ") == "y":
+            return find_otb(install_otb(*interactive_config()))
+        raise SystemError("OTB libraries not found on disk. ")
+    if not prefix:
+        raise SystemExit(
+            "OTB libraries not found on disk. "
+            "To install it, open an interactive python shell and 'import pyotb'"
+        )
+    # If OTB was found on disk, set env and try to import one last time
     try:
         set_environment(prefix)
         import otbApplication as otb  # pylint: disable=import-outside-toplevel
+
         return otb
-    except EnvironmentError as e:
+    except SystemError as e:
         raise SystemExit("Auto setup for OTB env failed. Exiting.") from e
-    # Unknown error
-    except ModuleNotFoundError as e:
-        raise SystemExit("Can't run without OTB installed. Exiting.") from e
     # Help user to fix this
     except ImportError as e:
         __suggest_fix_import(str(e), prefix)
         raise SystemExit("Failed to import OTB. Exiting.") from e
 
 
-def set_environment(prefix):
+def set_environment(prefix: str):
     """Set environment variables (before OTB import), raise error if anything is wrong.
 
     Args:
         prefix: path to OTB root directory
 
+    Raises:
+        SystemError: if OTB or GDAL is not found
+
     """
     logger.info("Preparing environment for OTB in %s", prefix)
     # OTB root directory
@@ -111,33 +139,36 @@ def set_environment(prefix):
     if not prefix.exists():
         raise FileNotFoundError(str(prefix))
     built_from_source = False
-    if not (prefix / 'README').exists():
+    if not (prefix / "README").exists():
         built_from_source = True
     # External libraries
     lib_dir = __find_lib(prefix)
     if not lib_dir:
-        raise EnvironmentError("Can't find OTB external libraries")
-    # This does not seems to work
+        raise SystemError("Can't find OTB external libraries")
+    # LD library path : this does not seems to work
     if sys.platform == "linux" and built_from_source:
         new_ld_path = f"{lib_dir}:{os.environ.get('LD_LIBRARY_PATH') or ''}"
         os.environ["LD_LIBRARY_PATH"] = new_ld_path
+
     # Add python bindings directory first in PYTHONPATH
     otb_api = __find_python_api(lib_dir)
     if not otb_api:
-        raise EnvironmentError("Can't find OTB Python API")
+        raise SystemError("Can't find OTB Python API")
     if otb_api not in sys.path:
         sys.path.insert(0, otb_api)
-    # Add /bin first in PATH, in order to avoid conflicts with another GDAL install when using os.system()
+
+    # Add /bin first in PATH, in order to avoid conflicts with another GDAL install
     os.environ["PATH"] = f"{prefix / 'bin'}{os.pathsep}{os.environ['PATH']}"
-    # Applications path  (this can be tricky since OTB import will succeed even without apps)
+    # Ensure APPLICATION_PATH is set
     apps_path = __find_apps_path(lib_dir)
     if Path(apps_path).exists():
         os.environ["OTB_APPLICATION_PATH"] = apps_path
     else:
-        raise EnvironmentError("Can't find OTB applications directory")
-
+        raise SystemError("Can't find OTB applications directory")
     os.environ["LC_NUMERIC"] = "C"
     os.environ["GDAL_DRIVER_PATH"] = "disable"
+
+    # Find GDAL libs
     if (prefix / "share/gdal").exists():
         # Local GDAL (OTB Superbuild, .run, .exe)
         gdal_data = str(prefix / "share/gdal")
@@ -150,25 +181,26 @@ def set_environment(prefix):
         gdal_data = str(prefix / "share/data")
         proj_lib = str(prefix / "share/proj")
     else:
-        raise EnvironmentError(f"Can't find GDAL location with current OTB prefix '{prefix}' or in /usr")
-
+        raise SystemError(
+            f"Can't find GDAL location with current OTB prefix '{prefix}' or in /usr"
+        )
     os.environ["GDAL_DATA"] = gdal_data
     os.environ["PROJ_LIB"] = proj_lib
 
 
-def __find_lib(prefix=None, otb_module=None):
+def __find_lib(prefix: str = None, otb_module=None):
     """Try to find OTB external libraries directory.
 
     Args:
         prefix: try with OTB root directory
-        otb_module: try with OTB python module (otbApplication) library path if found, else None
+        otb_module: try with otbApplication library path if found, else None
 
     Returns:
-        lib path
+        lib path, or None if not found
 
     """
     if prefix is not None:
-        lib_dir = prefix / 'lib'
+        lib_dir = prefix / "lib"
         if lib_dir.exists():
             return lib_dir.absolute()
     if otb_module is not None:
@@ -187,14 +219,14 @@ def __find_lib(prefix=None, otb_module=None):
     return None
 
 
-def __find_python_api(lib_dir):
+def __find_python_api(lib_dir: Path):
     """Try to find the python path.
 
     Args:
         prefix: prefix
 
     Returns:
-        python API path if found, else None
+        OTB python API path, or None if not found
 
     """
     otb_api = lib_dir / "python"
@@ -206,14 +238,14 @@ def __find_python_api(lib_dir):
     return None
 
 
-def __find_apps_path(lib_dir):
+def __find_apps_path(lib_dir: Path):
     """Try to find the OTB applications path.
 
     Args:
         lib_dir: library path
 
     Returns:
-        application path if found, else empty string
+        application path, or empty string if not found
 
     """
     if lib_dir.exists():
@@ -225,14 +257,11 @@ def __find_apps_path(lib_dir):
     return ""
 
 
-def __find_otb_root(scan_userdir=False):
+def __find_otb_root():
     """Search for OTB root directory in well known locations.
 
-    Args:
-        scan_userdir: search with glob in $HOME directory
-
     Returns:
-        str path of the OTB directory
+        str path of the OTB directory, or None if not found
 
     """
     prefix = None
@@ -253,57 +282,72 @@ def __find_otb_root(scan_userdir=False):
                 prefix = path.parent.parent.parent
             else:
                 prefix = path.parent.parent
-            prefix = prefix.absolute()
     elif sys.platform == "win32":
-        for path in Path("c:/Program Files").glob("**/OTB-*/lib"):
+        for path in sorted(Path("c:/Program Files").glob("**/OTB-*/lib")):
             logger.info("Found %s", path.parent)
-            prefix = path.parent.absolute()
-    elif sys.platform == "darwin":
-        # TODO: find OTB in macOS
-        pass
-
-    # If possible, use OTB found in user's HOME tree (this may take some time)
-    if scan_userdir:
-        for path in Path().home().glob("**/OTB-*/lib"):
-            logger.info("Found %s", path.parent)
-            prefix = path.parent.absolute()
-
-    return prefix
+            prefix = path.parent
+    # Search for pyotb OTB install, or default on macOS
+    apps = Path.home() / "Applications"
+    for path in sorted(apps.glob("OTB-*/lib/")):
+        logger.info("Found %s", path.parent)
+        prefix = path.parent
+    # Return latest found prefix (and version), see precedence in find_otb() docstrings
+    if isinstance(prefix, Path):
+        return prefix.absolute()
+    return None
 
 
-def __suggest_fix_import(error_message, prefix):
+def __suggest_fix_import(error_message: str, prefix: str):
     """Help user to fix the OTB installation with appropriate log messages."""
     logger.critical("An error occurred while importing OTB Python API")
     logger.critical("OTB error message was '%s'", error_message)
-    if sys.platform == "linux":
-        if error_message.startswith('libpython3.'):
-            logger.critical("It seems like you need to symlink or recompile python bindings")
-            if sys.executable.startswith('/usr/bin'):
-                lib = f"/usr/lib/x86_64-linux-gnu/libpython3.{sys.version_info.minor}.so"
-                if which('ctest'):
-                    logger.critical("To recompile python bindings, use 'cd %s ; source otbenv.profile ; "
-                                    "ctest -S share/otb/swig/build_wrapping.cmake -VV'", prefix)
-                elif Path(lib).exists():
-                    expect_minor = int(error_message[11])
-                    if expect_minor != sys.version_info.minor:
-                        logger.critical("Python library version mismatch (OTB was expecting 3.%s) : "
-                                        "a simple symlink may not work, depending on your python version", expect_minor)
-                    target_lib = f"{prefix}/lib/libpython3.{expect_minor}.so.rh-python3{expect_minor}-1.0"
-                    logger.critical("Use 'ln -s %s %s'", lib, target_lib)
-                else:
-                    logger.critical("You may need to install cmake in order to recompile python bindings")
-            else:
-                logger.critical("Unable to automatically locate python dynamic library of %s", sys.executable)
-            return
-    elif sys.platform == "win32":
+    if sys.platform == "win32":
         if error_message.startswith("DLL load failed"):
             if sys.version_info.minor != 7:
-                logger.critical("You need Python 3.5 (OTB releases 6.4 to 7.4) or Python 3.7 (since OTB 8)")
-                issue_link = "https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/issues/2010"
-                logger.critical("Another workaround is to recompile Python bindings with cmake, see %s", issue_link)
+                logger.critical(
+                    "You need Python 3.5 (OTB 6.4 to 7.4) or Python 3.7 (since OTB 8)"
+                )
             else:
-                logger.critical("It seems that your env variables aren't properly set,"
-                                " first use 'call otbenv.bat' then try to import pyotb once again")
+                logger.critical(
+                    "It seems that your env variables aren't properly set,"
+                    " first use 'call otbenv.bat' then try to import pyotb once again"
+                )
+    elif error_message.startswith("libpython3."):
+        logger.critical(
+            "It seems like you need to symlink or recompile python bindings"
+        )
+        if (
+            sys.executable.startswith("/usr/bin")
+            and which("ctest")
+            and which("python3-config")
+        ):
+            logger.critical(
+                "To compile, use 'cd %s ; source otbenv.profile ; "
+                "ctest -S share/otb/swig/build_wrapping.cmake -VV'",
+                prefix,
+            )
             return
-    docs_link = "https://www.orfeo-toolbox.org/CookBook/Installation.html"
-    logger.critical("You can verify installation requirements for your OS at %s", docs_link)
+        logger.critical(
+            "You may need to install cmake, python3-dev and mesa's libgl"
+            " in order to recompile python bindings"
+        )
+        expected = int(error_message[11])
+        if expected != sys.version_info.minor:
+            logger.critical(
+                "Python library version mismatch (OTB expected 3.%s) : "
+                "a symlink may not work, depending on your python version",
+                expected,
+            )
+        lib_dir = sysconfig.get_config_var("LIBDIR")
+        lib = f"{lib_dir}/libpython3.{sys.version_info.minor}.so"
+        if Path(lib).exists():
+            target = f"{prefix}/lib/libpython3.{expected}.so.1.0"
+            logger.critical("If using OTB>=8.0, try 'ln -sf %s %s'", lib, target)
+    logger.critical(
+        "You can verify installation requirements for your OS at %s", DOCS_URL
+    )
+
+
+# This part of pyotb is the first imported during __init__ and checks if OTB is found
+# If OTB isn't found, a SystemExit is raised to prevent execution of the core module
+find_otb()
diff --git a/pyotb/install.py b/pyotb/install.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cf139e474495d182dcfb504e99a8b92711f4990
--- /dev/null
+++ b/pyotb/install.py
@@ -0,0 +1,208 @@
+"""This module contains functions for interactive auto installation of OTB."""
+from __future__ import annotations
+
+import json
+import os
+import re
+import subprocess
+import sys
+import sysconfig
+import tempfile
+import urllib.request
+import zipfile
+from pathlib import Path
+from shutil import which
+
+
+def interactive_config():
+    """Prompt user to configure installation variables."""
+    version = input("Choose a version to download (default is latest): ")
+    default_dir = Path.home() / "Applications"
+    path = input(f"Parent directory for installation (default is {default_dir}): ")
+    env = input("Permanently change user's environment variables ? (y/n): ") == "y"
+    return version, path, env
+
+
+def otb_latest_release_tag():
+    """Use gitlab API to find latest release tag name, but skip pre-releases."""
+    api_endpoint = "https://gitlab.orfeo-toolbox.org/api/v4/projects/53/repository/tags"
+    vers_regex = re.compile(r"^\d\.\d\.\d$")  # we ignore rc-* or alpha-*
+    with urllib.request.urlopen(api_endpoint) as stream:
+        data = json.loads(stream.read())
+    releases = sorted(
+        [tag["name"] for tag in data if vers_regex.match(tag["name"])],
+    )
+    return releases[-1]
+
+
+def check_versions(sysname: str, python_minor: int, otb_major: int) -> tuple[bool, int]:
+    """Verify if python version is compatible with major OTB version.
+
+    Args:
+        sysname: OTB's system name convention (Linux64, Darwin64, Win64)
+        python_minor: minor version of python
+        otb_major: major version of OTB to be installed
+
+    Returns:
+        (True, 0) if compatible or (False, expected_version) in case of conflicts
+
+    """
+    if sysname == "Win64":
+        expected = 5 if otb_major in (6, 7) else 7
+        if python_minor == expected:
+            return True, 0
+    elif sysname == "Darwin64":
+        expected = 7, 0
+        if python_minor == expected:
+            return True, 0
+    elif sysname == "Linux64":
+        expected = 5 if otb_major in (6, 7) else 8
+        if python_minor == expected:
+            return True, 0
+    return False, expected
+
+
+def env_config_unix(otb_path: Path):
+    """Update env profile for current user with new otb_env.profile call.
+
+    Args:
+        otb_path: the path of the new OTB installation
+
+    """
+    profile = Path.home() / ".profile"
+    with profile.open("a", encoding="utf-8") as buf:
+        buf.write(f'\n. "{otb_path}/otbenv.profile"\n')
+        print(f"##### Added new environment variables to {profile}")
+
+
+def env_config_windows(otb_path: Path):
+    """Update user's registry hive with new OTB_ROOT env variable.
+
+    Args:
+        otb_path: path of the new OTB installation
+
+    """
+    import winreg  # pylint: disable=import-error,import-outside-toplevel
+
+    with winreg.OpenKeyEx(
+        winreg.HKEY_CURRENT_USER, "Environment", 0, winreg.KEY_SET_VALUE
+    ) as reg_key:
+        winreg.SetValueEx(reg_key, "OTB_ROOT", 0, winreg.REG_EXPAND_SZ, str(otb_path))
+        print(
+            "##### Environment variable 'OTB_ROOT' added to user's registry. "
+            "You'll need to login / logout to apply this change."
+        )
+        reg_cmd = "reg.exe delete HKEY_CURRENT_USER\\Environment /v OTB_ROOT /f"
+        print(f"To undo this, you may use '{reg_cmd}'")
+
+
+def install_otb(version: str = "latest", path: str = "", edit_env: bool = True):
+    """Install pre-compiled OTB binaries in path, use latest release by default.
+
+    Args:
+        version: OTB version tag, e.g. '8.1.2'
+        path: installation directory, default is $HOME/Applications
+        edit_env: whether or not to permanently modify user's environment variables
+
+    Returns:
+        full path of the new installation
+
+    Raises:
+        SystemExit: if python version is not compatible with major OTB version
+        SystemError: if automatic env config failed
+
+    """
+    # Read env config
+    if sys.version_info.major == 2:
+        raise SystemExit("Python 3 is required for OTB bindings.")
+    python_minor = sys.version_info.minor
+    if not version or version == "latest":
+        version = otb_latest_release_tag()
+    name_corresp = {"linux": "Linux64", "darwin": "Darwin64", "win32": "Win64"}
+    sysname = name_corresp[sys.platform]
+    ext = "zip" if sysname == "Win64" else "run"
+    cmd = which("zsh") or which("bash") or which("sh")
+    otb_major = int(version[0])
+    check, expected = check_versions(sysname, python_minor, otb_major)
+    if sysname == "Win64" and not check:
+        raise SystemExit(
+            f"Python 3.{expected} is required to import bindings on Windows."
+        )
+
+    # Fetch archive and run installer
+    filename = f"OTB-{version}-{sysname}.{ext}"
+    url = f"https://www.orfeo-toolbox.org/packages/archives/OTB/{filename}"
+    tmpdir = tempfile.gettempdir()
+    tmpfile = Path(tmpdir) / filename
+    print(f"##### Downloading {url}")
+    urllib.request.urlretrieve(url, tmpfile)
+    if path:
+        default_path = False
+        path = Path(path)
+    else:
+        default_path = True
+        path = Path.home() / "Applications" / tmpfile.stem
+    if sysname == "Win64":
+        with zipfile.ZipFile(tmpfile) as zipf:
+            print("##### Extracting zip file")
+            # Unzip will always create a dir with OTB-version name
+            zipf.extractall(path.parent if default_path else path)
+    else:
+        install_cmd = f"{cmd} {tmpfile} --target {path} --accept"
+        print(f"##### Executing '{install_cmd}'\n")
+        subprocess.run(install_cmd, shell=True, check=True)
+    tmpfile.unlink()  # cleaning
+
+    # Add env variable to profile
+    if edit_env:
+        if sysname == "Win64":
+            env_config_windows(path)
+        else:
+            env_config_unix(path)
+    elif not default_path:
+        ext = "bat" if sysname == "Win64" else "profile"
+        print(
+            f"Remember to call '{path}{os.sep}otbenv.{ext}' before importing pyotb, "
+            f"or add 'OTB_ROOT=\"{path}\"' to your env variables."
+        )
+    # Requirements are met, no recompilation or symlink required
+    if check:
+        return str(path)
+
+    # Else try recompile bindings : can fail because of OpenGL
+    # Here we check for /usr/bin because CMake's will find_package() only there
+    if (
+        sys.executable.startswith("/usr/bin")
+        and which("ctest")
+        and which("python3-config")
+    ):
+        try:
+            print("\n!!!!! Python version mismatch, trying to recompile bindings")
+            ctest_cmd = (
+                ". ./otbenv.profile && ctest -S share/otb/swig/build_wrapping.cmake -V"
+            )
+            print(f"##### Executing '{ctest_cmd}'")
+            subprocess.run(ctest_cmd, cwd=path, check=True, shell=True)
+            return str(path)
+        except subprocess.CalledProcessError:
+            print("\nCompilation failed.")
+    # TODO: support for sudo auto build deps install using apt, pacman/yay, brew...
+    print(
+        "You need cmake, python3-dev and libgl1-mesa-dev installed."
+        "\nTrying to symlink libraries instead - this may fail with newest versions."
+    )
+
+    # Finally try with cross version python symlink (only tested on Ubuntu)
+    suffix = "so.1.0" if otb_major >= 8 else f"so.rh-python3{expected}-1.0"
+    target_lib = f"{path}/lib/libpython3.{expected}.{suffix}"
+    lib_dir = sysconfig.get_config_var("LIBDIR")
+    lib = f"{lib_dir}/libpython3.{sys.version_info.minor}.so"
+    if Path(lib).exists():
+        print(f"##### Creating symbolic link: {lib} -> {target_lib}")
+        ln_cmd = f'ln -sf "{lib}" "{target_lib}"'
+        subprocess.run(ln_cmd, executable=cmd, shell=True, check=True)
+        return str(path)
+    raise SystemError(
+        f"Unable to automatically locate library for executable '{sys.executable}', "
+        f"you could manually create a symlink from that file to {target_lib}"
+    )
diff --git a/pyproject.toml b/pyproject.toml
index ddb1465f3a361addd19954bd24367980b66ec754..fdaa0a35167bea61244a4d4cfb500c2bbf7cd7d2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,7 +1,75 @@
 [build-system]
-requires = [
-    "setuptools>=42",
-    "wheel",
-    "numpy>=1.13,<1.23"
-]
+requires = ["setuptools >= 61.0", "wheel"]
 build-backend = "setuptools.build_meta"
+
+[project]
+name = "pyotb"
+description = "Library to enable easy use of the Orfeo ToolBox (OTB) in Python"
+authors = [
+    { name = "Rémi Cresson", email = "remi.cresson@inrae.fr" },
+    { name = "Nicolas Narçon" },
+    { name = "Vincent Delbar" },
+]
+requires-python = ">=3.7"
+keywords = ["gis", "remote sensing", "otb", "orfeotoolbox", "orfeo toolbox"]
+dependencies = ["numpy>=1.16"]
+readme = "README.md"
+license = { text = "Apache-2.0" }
+dynamic = ["version"]
+classifiers = [
+    "Programming Language :: Python :: 3",
+    "Programming Language :: Python :: 3.7",
+    "Programming Language :: Python :: 3.8",
+    "Programming Language :: Python :: 3.9",
+    "Programming Language :: Python :: 3.10",
+    "Programming Language :: Python :: 3.11",
+    "Topic :: Scientific/Engineering :: GIS",
+    "Topic :: Scientific/Engineering :: Image Processing",
+    "License :: OSI Approved :: Apache Software License",
+    "Operating System :: OS Independent",
+]
+
+[project.optional-dependencies]
+dev = [
+    "pytest",
+    "pytest-cov",
+    "pylint",
+    "codespell",
+    "pydocstyle",
+    "tomli",
+    "requests",
+]
+
+[project.urls]
+documentation = "https://pyotb.readthedocs.io"
+homepage = "https://github.com/orfeotoolbox/pyotb"
+repository = "https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb"
+
+[tool.setuptools]
+packages = ["pyotb"]
+
+[tool.setuptools.dynamic]
+version = { attr = "pyotb.__version__" }
+
+[tool.pylint]
+max-line-length = 88
+max-module-lines = 2000
+good-names = ["x", "y", "i", "j", "k", "e"]
+disable = [
+    "line-too-long",
+    "too-many-locals",
+    "too-many-branches",
+    "too-many-statements",
+    "too-many-instance-attributes",
+]
+
+[tool.pydocstyle]
+convention = "google"
+
+[tool.black]
+line-length = 88
+
+[tool.pytest.ini_options]
+minversion = "7.0"
+addopts = "--color=yes --cov=pyotb --no-cov-on-fail --cov-report term"
+testpaths = ["tests"]
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 778d2740a0ec3dd1d980878d620f444953ee0dee..0000000000000000000000000000000000000000
--- a/setup.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[metadata]
-description-file=README.md
-
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 519ba5d8e87a593b9d5caa06e23a39131766b655..0000000000000000000000000000000000000000
--- a/setup.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# -*- coding: utf-8 -*-
-import setuptools
-
-with open("README.md", "r", encoding="utf-8") as fh:
-    long_description = fh.read()
-
-setuptools.setup(
-    name="pyotb",
-    version="1.5.4",
-    author="Nicolas Narçon",
-    author_email="nicolas.narcon@gmail.com",
-    description="Library to enable easy use of the Orfeo Tool Box (OTB) in Python",
-    long_description=long_description,
-    long_description_content_type="text/markdown",
-    url="https://gitlab.orfeo-toolbox.org/nicolasnn/pyotb/",
-    classifiers=[
-        "Programming Language :: Python :: 3",
-        "Programming Language :: Python :: 3.6",
-        "Programming Language :: Python :: 3.7",
-        "Programming Language :: Python :: 3.8",
-        "Programming Language :: Python :: 3.9",
-        "Topic :: Scientific/Engineering :: GIS",
-        "Topic :: Scientific/Engineering :: Image Processing",
-        "License :: OSI Approved :: Apache Software License",
-        "Operating System :: OS Independent",
-    ],
-    packages=setuptools.find_packages(),
-    python_requires=">=3.6",
-    keywords="remote sensing, otb, orfeotoolbox, orfeo toolbox",
-)
-#package_dir={"": "src"},
diff --git a/tests/pipeline_summary.json b/tests/pipeline_summary.json
new file mode 100644
index 0000000000000000000000000000000000000000..443f9ef09cf9676debe889a9d775c43924fde596
--- /dev/null
+++ b/tests/pipeline_summary.json
@@ -0,0 +1,121 @@
+{
+  "SIMPLE": {
+    "name": "ManageNoData",
+    "parameters": {
+      "usenan": false,
+      "mode": "buildmask",
+      "mode.buildmask.inv": 1.0,
+      "mode.buildmask.outv": 0.0,
+      "in": {
+        "name": "BandMath",
+        "parameters": {
+          "il": [
+            {
+              "name": "OrthoRectification",
+              "parameters": {
+                "map": "utm",
+                "map.utm.zone": 31,
+                "map.utm.northhem": true,
+                "outputs.mode": "auto",
+                "outputs.ulx": 560000.8382510637,
+                "outputs.uly": 5495732.692591702,
+                "outputs.sizex": 251,
+                "outputs.sizey": 304,
+                "outputs.spacingx": 5.997312290795521,
+                "outputs.spacingy": -5.997312290795521,
+                "outputs.lrx": 561506.1636360534,
+                "outputs.lry": 5493909.5096553,
+                "outputs.isotropic": true,
+                "interpolator": "bco",
+                "interpolator.bco.radius": 2,
+                "opt.rpc": 10,
+                "opt.gridspacing": 4.0,
+                "io.in": "/vsicurl/https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Input/SP67_FR_subset_1.tif"
+              }
+            }
+          ],
+          "exp": "im1b1"
+        }
+      }
+    }
+  },
+  "DIAMOND": {
+    "name": "BandMathX",
+    "parameters": {
+      "il": [
+        {
+          "name": "OrthoRectification",
+          "parameters": {
+            "map": "utm",
+            "map.utm.zone": 31,
+            "map.utm.northhem": true,
+            "outputs.mode": "auto",
+            "outputs.ulx": 560000.8382510637,
+            "outputs.uly": 5495732.692591702,
+            "outputs.sizex": 251,
+            "outputs.sizey": 304,
+            "outputs.spacingx": 5.997312290795521,
+            "outputs.spacingy": -5.997312290795521,
+            "outputs.lrx": 561506.1636360534,
+            "outputs.lry": 5493909.5096553,
+            "outputs.isotropic": true,
+            "interpolator": "bco",
+            "interpolator.bco.radius": 2,
+            "opt.rpc": 10,
+            "opt.gridspacing": 4.0,
+            "io.in": {
+              "name": "BandMath",
+              "parameters": {
+                "il": [
+                  "/vsicurl/https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Input/SP67_FR_subset_1.tif"
+                ],
+                "exp": "im1b1"
+              }
+            }
+          }
+        },
+        {
+          "name": "ManageNoData",
+          "parameters": {
+            "usenan": false,
+            "mode": "buildmask",
+            "mode.buildmask.inv": 1.0,
+            "mode.buildmask.outv": 0.0,
+            "in": {
+              "name": "OrthoRectification",
+              "parameters": {
+                "map": "utm",
+                "map.utm.zone": 31,
+                "map.utm.northhem": true,
+                "outputs.mode": "auto",
+                "outputs.ulx": 560000.8382510637,
+                "outputs.uly": 5495732.692591702,
+                "outputs.sizex": 251,
+                "outputs.sizey": 304,
+                "outputs.spacingx": 5.997312290795521,
+                "outputs.spacingy": -5.997312290795521,
+                "outputs.lrx": 561506.1636360534,
+                "outputs.lry": 5493909.5096553,
+                "outputs.isotropic": true,
+                "interpolator": "bco",
+                "interpolator.bco.radius": 2,
+                "opt.rpc": 10,
+                "opt.gridspacing": 4.0,
+                "io.in": {
+                  "name": "BandMath",
+                  "parameters": {
+                    "il": [
+                      "/vsicurl/https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Input/SP67_FR_subset_1.tif"
+                    ],
+                    "exp": "im1b1"
+                  }
+                }
+              }
+            }
+          }
+        }
+      ],
+      "exp": "im1+im2"
+    }
+  }
+}
diff --git a/tests/test_core.py b/tests/test_core.py
index abefc80c7c5a03c06360e900d2e7dabee4e116b3..8b3f8fae5fbe66124ceb3c6911e09aca11ba010b 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -1,88 +1,486 @@
-import os
-import pyotb
-from ast import literal_eval
-from pathlib import Path
+import pytest
+import numpy as np
 
+from tests_data import *
 
-FILEPATH = os.environ["TEST_INPUT_IMAGE"]
-INPUT = pyotb.Input(FILEPATH)
 
+def test_app_parameters():
+    # Input / ExtractROI
+    assert INPUT.parameters
+    assert (INPUT.parameters["sizex"], INPUT.parameters["sizey"]) == (251, 304)
+    # OrthoRectification
+    app = pyotb.OrthoRectification(INPUT)
+    assert isinstance(app.parameters["map"], str)
+    assert app.parameters["map"] == "utm"
+    assert "map" in app._auto_parameters
+    app.set_parameters({"map": "epsg", "map.epsg.code": 2154})
+    assert app.parameters["map"] == "epsg"
+    assert "map" in app._settings and "map" not in app._auto_parameters
+    assert app.parameters["map.epsg.code"] == app.app.GetParameters()["map.epsg.code"]
+    # Orthorectification with underscore kwargs
+    app = pyotb.OrthoRectification(io_in=INPUT, map_epsg_code=2154)
+    assert app.parameters["map.epsg.code"] == 2154
+    # ManageNoData
+    app = pyotb.ManageNoData(INPUT)
+    assert "usenan" in app._auto_parameters
+    assert "mode.buildmask.inv" in app._auto_parameters
+    # OpticalCalibration
+    app = pyotb.OpticalCalibration(pyotb.Input(PLEIADES_IMG_URL), level="toa")
+    assert "milli" in app._auto_parameters
+    assert "clamp" in app._auto_parameters
+    assert app._auto_parameters["acqui.year"] == 2012
+    assert app._auto_parameters["acqui.sun.elev"] == 23.836299896240234
 
-# Basic tests
-def test_dtype():
-    assert INPUT.dtype == "uint8"
+
+def test_app_properties():
+    assert INPUT.input_key == INPUT.input_image_key == "in"
+    assert INPUT.output_key == INPUT.output_image_key == "out"
+    with pytest.raises(KeyError):
+        pyotb.BandMath(INPUT, expression="im1b1")
+    # Test user can set custom name
+    app = pyotb.App("BandMath", [INPUT], exp="im1b1", name="TestName")
+    assert app.name == "TestName"
+    # Test data dict is not empty
+    app = pyotb.ReadImageInfo(INPUT)
+    assert app.data
+    # Test elapsed time is not null
+    assert 0 < app.elapsed_time < 1
 
 
-def test_shape():
+def test_app_input_vsi():
+    # Ensure old way is still working: ExtractROI will raise RuntimeError if a path is malformed
+    pyotb.Input("/vsicurl/" + SPOT_IMG_URL)
+    # Simple remote file
+    info = pyotb.ReadImageInfo("https://fake.com/image.tif", frozen=True)
+    assert (
+        info.app.GetParameterValue("in")
+        == info.parameters["in"]
+        == "/vsicurl/https://fake.com/image.tif"
+    )
+    # Compressed single file archive
+    info = pyotb.ReadImageInfo("image.tif.zip", frozen=True)
+    assert (
+        info.app.GetParameterValue("in")
+        == info.parameters["in"]
+        == "/vsizip/image.tif.zip"
+    )
+    # File within compressed remote archive
+    info = pyotb.ReadImageInfo("https://fake.com/archive.tar.gz/image.tif", frozen=True)
+    assert (
+        info.app.GetParameterValue("in")
+        == info.parameters["in"]
+        == "/vsitar//vsicurl/https://fake.com/archive.tar.gz/image.tif"
+    )
+    # Piped curl --> zip --> tiff
+    ziped_tif_urls = (
+        "https://github.com/OSGeo/gdal/raw/master"
+        "/autotest/gcore/data/byte.tif.zip",  # without /vsi
+        "/vsizip/vsicurl/https://github.com/OSGeo/gdal/raw/master"
+        "/autotest/gcore/data/byte.tif.zip",  # with /vsi
+    )
+    for ziped_tif_url in ziped_tif_urls:
+        info = pyotb.ReadImageInfo(ziped_tif_url)
+        assert info["sizex"] == 20
+
+
+def test_img_properties():
+    assert INPUT.dtype == "uint8"
     assert INPUT.shape == (304, 251, 4)
+    assert INPUT.transform == (6.0, 0.0, 760056.0, 0.0, -6.0, 6946092.0)
+    with pytest.raises(TypeError):
+        assert pyotb.ReadImageInfo(INPUT).dtype == "uint8"
+
+
+def test_img_metadata():
+    assert "ProjectionRef" in INPUT.metadata
+    assert "TIFFTAG_SOFTWARE" in INPUT.metadata
+    inp2 = pyotb.Input(
+        "https://sentinel-cogs.s3.us-west-2.amazonaws.com/sentinel-s2-l2a-cogs/"
+        "47/Q/RU/2021/12/S2B_47QRU_20211227_0_L2A/B04.tif"
+    )
+    assert "ProjectionRef" in inp2.metadata
+    assert "OVR_RESAMPLING_ALG" in inp2.metadata
+    # Metadata with numeric values (e.g. TileHintX)
+    fp = (
+        "https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/"
+        "Data/Input/radarsat2/RADARSAT2_ALTONA_300_300_VV.tif?inline=false"
+    )
+    app = pyotb.BandMath({"il": [fp], "exp": "im1b1"})
+    assert "TileHintX" in app.metadata
+
+
+def test_essential_apps():
+    readimageinfo = pyotb.ReadImageInfo(INPUT, quiet=True)
+    assert (readimageinfo["sizex"], readimageinfo["sizey"]) == (251, 304)
+    assert readimageinfo["numberbands"] == 4
+    computeimagestats = pyotb.ComputeImagesStatistics([INPUT], quiet=True)
+    assert computeimagestats["out.min"] == TEST_IMAGE_STATS["out.min"]
+    slicer_computeimagestats = pyotb.ComputeImagesStatistics(
+        il=[INPUT[:10, :10, 0]], quiet=True
+    )
+    assert slicer_computeimagestats["out.min"] == [180]
+
+
+def test_get_statistics():
+    stats_data = pyotb.ComputeImagesStatistics(INPUT).data
+    assert stats_data == TEST_IMAGE_STATS
+    assert INPUT.get_statistics() == TEST_IMAGE_STATS
+
+
+def test_get_info():
+    infos = INPUT.get_info()
+    assert (infos["sizex"], infos["sizey"]) == (251, 304)
+    bm_infos = pyotb.BandMathX([INPUT], exp="im1")["out"].get_info()
+    assert infos == bm_infos
+
+
+def test_read_values_at_coords():
+    assert INPUT[0, 0, 0] == 180
+    assert INPUT[10, 20, :] == [207, 192, 172, 255]
+
+
+def test_xy_to_rowcol():
+    assert INPUT.get_rowcol_from_xy(760101, 6945977) == (19, 7)
+
+
+def test_write():
+    assert INPUT.write("/dev/shm/test_write.tif", ext_fname="nodata=0")
+    INPUT["out"].filepath.unlink()
+    assert INPUT.write(Path("/dev/shm/test_write.tif"), ext_fname="nodata=0")
+    INPUT["out"].filepath.unlink()
+    # Frozen
+    frozen_app = pyotb.BandMath(INPUT, exp="im1b1", frozen=True)
+    assert frozen_app.write("/dev/shm/test_frozen_app_write.tif")
+    frozen_app["out"].filepath.unlink()
+    frozen_app_init_with_outfile = pyotb.BandMath(
+        INPUT, exp="im1b1", out="/dev/shm/test_frozen_app_write.tif", frozen=True
+    )
+    assert frozen_app_init_with_outfile.write(pixel_type="uint16")
+    assert frozen_app_init_with_outfile.dtype == "uint16"
+    frozen_app_init_with_outfile["out"].filepath.unlink()
+
+
+def test_write_multi_output():
+    mss = pyotb.MeanShiftSmoothing(
+        SPOT_IMG_URL,
+        fout="/dev/shm/test_ext_fn_fout.tif",
+        foutpos="/dev/shm/test_ext_fn_foutpos.tif",
+    )
+
+    mss = pyotb.MeanShiftSmoothing(SPOT_IMG_URL)
+    assert mss.write(
+        {
+            "fout": "/dev/shm/test_ext_fn_fout.tif",
+            "foutpos": "/dev/shm/test_ext_fn_foutpos.tif",
+        },
+        ext_fname={"nodata": 0, "gdal:co:COMPRESS": "DEFLATE"},
+    )
+
+    dr = pyotb.DimensionalityReduction(
+        SPOT_IMG_URL, out="/dev/shm/1.tif", outinv="/dev/shm/2.tif"
+    )
+    dr = pyotb.DimensionalityReduction(SPOT_IMG_URL)
+    assert dr.write(
+        {"out": "/dev/shm/1.tif", "outinv": "/dev/shm/2.tif"}
+    )
+
+
+def test_write_ext_fname():
+    def _check(expected: str, key: str = "out", app=INPUT.app):
+        fn = app.GetParameterString(key)
+        assert "?&" in fn
+        assert fn.split("?&", 1)[1] == expected
+
+    assert INPUT.write("/dev/shm/test_write.tif", ext_fname="nodata=0")
+    _check("nodata=0")
+    assert INPUT.write("/dev/shm/test_write.tif", ext_fname={"nodata": "0"})
+    _check("nodata=0")
+    assert INPUT.write("/dev/shm/test_write.tif", ext_fname={"nodata": 0})
+    _check("nodata=0")
+    assert INPUT.write(
+        "/dev/shm/test_write.tif",
+        ext_fname={"nodata": 0, "gdal:co:COMPRESS": "DEFLATE"},
+    )
+    _check("nodata=0&gdal:co:COMPRESS=DEFLATE")
+    assert INPUT.write(
+        "/dev/shm/test_write.tif", ext_fname="nodata=0&gdal:co:COMPRESS=DEFLATE"
+    )
+    _check("nodata=0&gdal:co:COMPRESS=DEFLATE")
+    assert INPUT.write(
+        "/dev/shm/test_write.tif?&box=0:0:10:10",
+        ext_fname={"nodata": "0", "gdal:co:COMPRESS": "DEFLATE", "box": "0:0:20:20"},
+    )
+    # Check that the bbox is the one specified in the filepath, not the one
+    # specified in `ext_filename`
+    _check("nodata=0&gdal:co:COMPRESS=DEFLATE&box=0:0:10:10")
+    assert INPUT.write(
+        "/dev/shm/test_write.tif?&box=0:0:10:10",
+        ext_fname="nodata=0&gdal:co:COMPRESS=DEFLATE&box=0:0:20:20",
+    )
+    _check("nodata=0&gdal:co:COMPRESS=DEFLATE&box=0:0:10:10")
+    INPUT["out"].filepath.unlink()
+
+    mmsd = pyotb.MorphologicalMultiScaleDecomposition(INPUT)
+    mmsd.write(
+        {
+            "outconvex": "/dev/shm/outconvex.tif?&nodata=1",
+            "outconcave": "/dev/shm/outconcave.tif?&nodata=2",
+            "outleveling": "/dev/shm/outleveling.tif?&nodata=3",
+        },
+        ext_fname={"nodata": 0, "gdal:co:COMPRESS": "DEFLATE"},
+    )
+    _check("nodata=1&gdal:co:COMPRESS=DEFLATE", key="outconvex", app=mmsd.app)
+    _check("nodata=2&gdal:co:COMPRESS=DEFLATE", key="outconcave", app=mmsd.app)
+    _check("nodata=3&gdal:co:COMPRESS=DEFLATE", key="outleveling", app=mmsd.app)
+    mmsd["outconvex"].filepath.unlink()
+    mmsd["outconcave"].filepath.unlink()
+    mmsd["outleveling"].filepath.unlink()
 
 
-def test_slicer_shape():
-    extract = INPUT[:50, :60, :3]
-    assert extract.shape == (50, 60, 3)
+def test_output():
+    assert INPUT["out"].write("/dev/shm/test_output_write.tif")
+    INPUT["out"].filepath.unlink()
+    frozen_app = pyotb.BandMath(INPUT, exp="im1b1", frozen=True)
+    assert frozen_app["out"].write("/dev/shm/test_frozen_app_write.tif")
+    frozen_app["out"].filepath.unlink()
+    info_from_output_obj = pyotb.ReadImageInfo(INPUT["out"])
+    assert info_from_output_obj.data
 
 
-def test_slicer_preserve_dtype():
-    extract = INPUT[:50, :60, :3]
-    assert extract.dtype == "uint8"
+# Slicer
+def test_slicer():
+    sliced = INPUT[:50, :60, :3]
+    assert sliced.parameters["cl"] == ["Channel1", "Channel2", "Channel3"]
+    assert sliced.shape == (50, 60, 3)
+    assert sliced.dtype == "uint8"
+    sliced_negative_band_idx = INPUT[:50, :60, :-2]
+    assert sliced_negative_band_idx.shape == (50, 60, 2)
+    sliced_from_output = pyotb.BandMath([INPUT], exp="im1b1")["out"][:50, :60, :-2]
+    assert isinstance(sliced_from_output, pyotb.core.Slicer)
 
 
-# More complex tests
-def test_operation():
+# Operation and LogicalOperation
+def test_operator_expressions():
     op = INPUT / 255 * 128
-    assert op.exp == "((im1b1 / 255) * 128);((im1b2 / 255) * 128);((im1b3 / 255) * 128);((im1b4 / 255) * 128)"
+    assert (
+        op.exp
+        == "((im1b1 / 255) * 128);((im1b2 / 255) * 128);((im1b3 / 255) * 128);((im1b4 / 255) * 128)"
+    )
+    assert op.dtype == "float32"
+    assert abs(INPUT).exp == "(abs(im1b1));(abs(im1b2));(abs(im1b3));(abs(im1b4))"
+    summed_bands = sum(INPUT[:, :, b] for b in range(INPUT.shape[-1]))
+    assert summed_bands.exp == "((((0 + im1b1) + im1b2) + im1b3) + im1b4)"
 
 
-def test_sum_bands():
-    # Sum of bands
-    summed = sum(INPUT[:, :, b] for b in range(INPUT.shape[-1]))
-    assert summed.exp == "((((0 + im1b1) + im1b2) + im1b3) + im1b4)"
+def operation_test(func, exp):
+    meas = func(INPUT)
+    ref = pyotb.BandMathX({"il": [SPOT_IMG_URL], "exp": exp})
+    for i in range(1, 5):
+        compared = pyotb.CompareImages(
+            {"ref.in": ref, "meas.in": meas, "ref.channel": i, "meas.channel": i}
+        )
+        assert (compared["count"], compared["mse"]) == (0, 0)
 
 
-def test_binary_mask_where():
-    # Create binary mask based on several possible values
-    values = [1, 2, 3, 4]
-    res = pyotb.where(pyotb.any(INPUT[:, :, 0] == value for value in values), 255, 0)
-    assert res.exp == "(((((im1b1 == 1) || (im1b1 == 2)) || (im1b1 == 3)) || (im1b1 == 4)) ? 255 : 0)"
+def test_operation_add():
+    operation_test(lambda x: x + x, "im1 + im1")
+    operation_test(lambda x: x + INPUT, "im1 + im1")
+    operation_test(lambda x: INPUT + x, "im1 + im1")
+    operation_test(lambda x: x + 2, "im1 + {2;2;2;2}")
+    operation_test(lambda x: x + 2.0, "im1 + {2.0;2.0;2.0;2.0}")
+    operation_test(lambda x: 2 + x, "{2;2;2;2} + im1")
+    operation_test(lambda x: 2.0 + x, "{2.0;2.0;2.0;2.0} + im1")
 
 
-# Apps
-def test_app_readimageinfo():
-    info = pyotb.ReadImageInfo(INPUT, quiet=True)
-    assert info.sizex == 251
-    assert info.sizey == 304
-    assert info["numberbands"] == info.numberbands == 4
+def test_operation_sub():
+    operation_test(lambda x: x - x, "im1 - im1")
+    operation_test(lambda x: x - INPUT, "im1 - im1")
+    operation_test(lambda x: INPUT - x, "im1 - im1")
+    operation_test(lambda x: x - 2, "im1 - {2;2;2;2}")
+    operation_test(lambda x: x - 2.0, "im1 - {2.0;2.0;2.0;2.0}")
+    operation_test(lambda x: 2 - x, "{2;2;2;2} - im1")
+    operation_test(lambda x: 2.0 - x, "{2.0;2.0;2.0;2.0} - im1")
 
 
-def test_app_computeimagestats():
-    stats = pyotb.ComputeImagesStatistics([INPUT], quiet=True)
-    assert stats["out.min"] == "[33, 64, 91, 47]"
+def test_operation_mult():
+    operation_test(lambda x: x * x, "im1 mult im1")
+    operation_test(lambda x: x * INPUT, "im1 mult im1")
+    operation_test(lambda x: INPUT * x, "im1 mult im1")
+    operation_test(lambda x: x * 2, "im1 * 2")
+    operation_test(lambda x: x * 2.0, "im1 * 2.0")
+    operation_test(lambda x: 2 * x, "2 * im1")
+    operation_test(lambda x: 2.0 * x, "2.0 * im1")
 
 
-def test_app_computeimagestats_sliced():
-    slicer_stats = pyotb.ComputeImagesStatistics(il=[INPUT[:10, :10, 0]], quiet=True)
-    assert slicer_stats["out.min"] == "[180]"
+def test_operation_div():
+    operation_test(lambda x: x / x, "im1 div im1")
+    operation_test(lambda x: x / INPUT, "im1 div im1")
+    operation_test(lambda x: INPUT / x, "im1 div im1")
+    operation_test(lambda x: x / 2, "im1 * 0.5")
+    operation_test(lambda x: x / 2.0, "im1 * 0.5")
+    operation_test(lambda x: 2 / x, "{2;2;2;2} div im1")
+    operation_test(lambda x: 2.0 / x, "{2.0;2.0;2.0;2.0} div im1")
 
 
-# NDVI
+# BandMath NDVI == RadiometricIndices NDVI ?
 def test_ndvi_comparison():
-    ndvi_bandmath = (INPUT[:, :, -1] - INPUT[:, :, [0]]) / (INPUT[:, :, -1] + INPUT[:, :, 0])
+    ndvi_bandmath = (INPUT[:, :, -1] - INPUT[:, :, [0]]) / (
+        INPUT[:, :, -1] + INPUT[:, :, 0]
+    )
     ndvi_indices = pyotb.RadiometricIndices(
-        {"in": INPUT, "list": "Vegetation:NDVI", "channels.red": 1, "channels.nir": 4}
+        INPUT, {"list": ["Vegetation:NDVI"], "channels.red": 1, "channels.nir": 4}
     )
     assert ndvi_bandmath.exp == "((im1b4 - im1b1) / (im1b4 + im1b1))"
+    assert ndvi_bandmath.write("/dev/shm/ndvi_bandmath.tif", "float")
+    assert ndvi_indices.write("/dev/shm/ndvi_indices.tif", "float")
 
-    ndvi_bandmath.write("/tmp/ndvi_bandmath.tif", pixel_type="float")
-    assert Path("/tmp/ndvi_bandmath.tif").exists()
-    ndvi_indices.write("/tmp/ndvi_indices.tif", pixel_type="float")
-    assert Path("/tmp/ndvi_indices.tif").exists()
+    compared = pyotb.CompareImages(
+        {"ref.in": ndvi_indices, "meas.in": "/dev/shm/ndvi_bandmath.tif"}
+    )
+    assert (compared["count"], compared["mse"]) == (0, 0)
+    thresholded_indices = pyotb.where(ndvi_indices >= 0.3, 1, 0)
+    assert thresholded_indices["exp"] == "((im1b1 >= 0.3) ? 1 : 0)"
+    thresholded_bandmath = pyotb.where(ndvi_bandmath >= 0.3, 1, 0)
+    assert (
+        thresholded_bandmath["exp"]
+        == "((((im1b4 - im1b1) / (im1b4 + im1b1)) >= 0.3) ? 1 : 0)"
+    )
 
-    compared = pyotb.CompareImages({"ref.in": ndvi_indices, "meas.in": "/tmp/ndvi_bandmath.tif"})
-    assert compared.count == 0
-    assert compared.mse == 0
 
-    thresholded_indices = pyotb.where(ndvi_indices >= 0.3, 1, 0)
-    assert thresholded_indices.exp == "((im1b1 >= 0.3) ? 1 : 0)"
+# Tests for functions.py
+def test_binary_mask_where():
+    # Create binary mask based on several possible values
+    values = [1, 2, 3, 4]
+    res = pyotb.where(pyotb.any(INPUT[:, :, 0] == value for value in values), 255, 0)
+    assert (
+        res.exp
+        == "(((((im1b1 == 1) || (im1b1 == 2)) || (im1b1 == 3)) || (im1b1 == 4)) ? 255 : 0)"
+    )
 
-    thresholded_bandmath = pyotb.where(ndvi_bandmath >= 0.3, 1, 0)
-    assert thresholded_bandmath.exp == "((((im1b4 - im1b1) / (im1b4 + im1b1)) >= 0.3) ? 1 : 0)"
+
+# Tests for summarize()
+def test_summarize_pipeline_simple():
+    app1 = pyotb.OrthoRectification({"io.in": SPOT_IMG_URL})
+    app2 = pyotb.BandMath({"il": [app1], "exp": "im1b1"})
+    app3 = pyotb.ManageNoData({"in": app2})
+    summary = pyotb.summarize(app3)
+    assert SIMPLE_SERIALIZATION == summary
+
+
+def test_summarize_pipeline_diamond():
+    app1 = pyotb.BandMath({"il": [SPOT_IMG_URL], "exp": "im1b1"})
+    app2 = pyotb.OrthoRectification({"io.in": app1})
+    app3 = pyotb.ManageNoData({"in": app2})
+    app4 = pyotb.BandMathX({"il": [app2, app3], "exp": "im1+im2"})
+    summary = pyotb.summarize(app4)
+    assert DIAMOND_SERIALIZATION == summary
+
+
+def test_summarize_output_obj():
+    assert pyotb.summarize(INPUT["out"])
+
+
+def test_summarize_strip_output():
+    in_fn = "/vsicurl/" + SPOT_IMG_URL
+    in_fn_w_ext = "/vsicurl/" + SPOT_IMG_URL + "?&skipcarto=1"
+    out_fn = "/dev/shm/output.tif"
+    out_fn_w_ext = out_fn + "?&box=10:10:10:10"
+
+    baseline = [
+        (in_fn, out_fn_w_ext, "out", {}, out_fn_w_ext),
+        (in_fn, out_fn_w_ext, "out", {"strip_outpath": True}, out_fn),
+        (in_fn_w_ext, out_fn, "in", {}, in_fn_w_ext),
+        (in_fn_w_ext, out_fn, "in", {"strip_inpath": True}, in_fn),
+    ]
+
+    for inp, out, key, extra_args, expected in baseline:
+        app = pyotb.ExtractROI({"in": inp, "out": out})
+        summary = pyotb.summarize(app, **extra_args)
+        assert (
+            summary["parameters"][key] == expected
+        ), f"Failed for input {inp}, output {out}, args {extra_args}"
+
+
+def test_summarize_consistency():
+    app_fns = [
+        lambda inp: pyotb.ExtractROI(
+            {"in": inp, "startx": 10, "starty": 10, "sizex": 50, "sizey": 50}
+        ),
+        lambda inp: pyotb.ManageNoData({"in": inp, "mode": "changevalue"}),
+        lambda inp: pyotb.DynamicConvert({"in": inp}),
+        lambda inp: pyotb.Mosaic({"il": [inp]}),
+        lambda inp: pyotb.BandMath({"il": [inp], "exp": "im1b1 + 1"}),
+        lambda inp: pyotb.BandMathX({"il": [inp], "exp": "im1"}),
+        lambda inp: pyotb.OrthoRectification({"io.in": inp}),
+    ]
+
+    def operator_test(app_fn):
+        """
+        Here we create 2 summaries:
+        - summary of the app before write()
+        - summary of the app after write()
+        Then we check that both only differ with the output parameter
+        """
+        app = app_fn(inp=SPOT_IMG_URL)
+        out_file = "/dev/shm/out.tif"
+        out_key = app.output_image_key
+        summary_wo_wrt = pyotb.summarize(app)
+        app.write(out_file)
+        summay_w_wrt = pyotb.summarize(app)
+        app[out_key].filepath.unlink()
+        summary_wo_wrt["parameters"].update({out_key: out_file})
+        assert summary_wo_wrt == summay_w_wrt
+
+    for app_fn in app_fns:
+        operator_test(app_fn)
+
+
+# Numpy tests
+def test_numpy_exports_dic():
+    INPUT.export()
+    exported_array = INPUT.exports_dic[INPUT.output_image_key]["array"]
+    assert isinstance(exported_array, np.ndarray)
+    assert exported_array.dtype == "uint8"
+    del INPUT.exports_dic["out"]
+    INPUT["out"].export()
+    assert INPUT["out"].output_image_key in INPUT["out"].exports_dic
+
+
+def test_numpy_conversions():
+    array = INPUT.to_numpy()
+    assert array.dtype == np.uint8
+    assert array.shape == INPUT.shape
+    assert (array.min(), array.max()) == (33, 255)
+    # Sliced img to array
+    sliced = INPUT[:100, :200, :3]
+    sliced_array = sliced.to_numpy()
+    assert sliced_array.dtype == np.uint8
+    assert sliced_array.shape == (100, 200, 3)
+    # Test auto convert to numpy
+    assert isinstance(np.array(INPUT), np.ndarray)
+    assert INPUT.shape == np.array(INPUT).shape
+    assert INPUT[19, 7] == list(INPUT.to_numpy()[19, 7])
+    # Add noise test from the docs
+    white_noise = np.random.normal(0, 50, size=INPUT.shape)
+    noisy_image = INPUT + white_noise
+    assert isinstance(noisy_image, pyotb.core.App)
+    assert noisy_image.shape == INPUT.shape
+
+
+def test_numpy_to_rasterio():
+    array, profile = INPUT.to_rasterio()
+    assert array.dtype == profile["dtype"] == np.uint8
+    assert array.shape == (4, 304, 251)
+    assert profile["transform"] == (6.0, 0.0, 760056.0, 0.0, -6.0, 6946092.0)
+
+    # CRS test requires GDAL python bindings
+    try:
+        from osgeo import osr
+
+        crs = osr.SpatialReference()
+        crs.ImportFromEPSG(2154)
+        dest_crs = osr.SpatialReference()
+        dest_crs.ImportFromWkt(profile["crs"])
+        assert dest_crs.IsSame(crs)
+    except ImportError:
+        pass
diff --git a/tests/test_numpy.py b/tests/test_numpy.py
deleted file mode 100644
index 5b1dd048cb73d2a9c904c3b937c6d60d8b8520c3..0000000000000000000000000000000000000000
--- a/tests/test_numpy.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os
-import numpy as np
-import pyotb
-
-
-FILEPATH = os.environ["TEST_INPUT_IMAGE"]
-INPUT = pyotb.Input(FILEPATH)
-
-
-def test_to_numpy():
-    array = INPUT.to_numpy()
-    assert array.dtype == np.uint8
-    assert array.shape == INPUT.shape
-    assert array.min() == 33
-    assert array.max() == 255
-
-
-def test_to_numpy_sliced():
-    sliced = INPUT[:100, :200, :3]
-    array = sliced.to_numpy()
-    assert array.dtype == np.uint8
-    assert array.shape == (100, 200, 3)
-
-
-def test_convert_to_array():
-    array = np.array(INPUT)
-    assert isinstance(array, np.ndarray)
-    assert INPUT.shape == array.shape
-
-
-def test_add_noise_array():
-    white_noise = np.random.normal(0, 50, size=INPUT.shape)
-    noisy_image = INPUT + white_noise
-    assert isinstance(noisy_image, pyotb.otbObject)
-    assert noisy_image.shape == INPUT.shape
-
-
-def test_to_rasterio():
-    array, profile = INPUT.to_rasterio()
-    assert array.dtype == profile["dtype"] == np.uint8
-    assert array.shape == (4, 304, 251)
-    assert profile["transform"] == (6.0, 0.0, 760056.0, 0.0, -6.0, 6946092.0)
-
-    # CRS test requires GDAL python bindings
-    try:        
-        from osgeo import osr
-        crs = osr.SpatialReference()
-        crs.ImportFromEPSG(2154)
-        dest_crs = osr.SpatialReference()
-        dest_crs.ImportFromWkt(profile["crs"])
-        assert dest_crs.IsSame(crs)
-    except ImportError:
-        pass
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index c2d8f0e910851f5412f044c5d4ef887613af57a0..5646674e483c04767098619d2e156586d9c1d2d7 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -1,9 +1,8 @@
-import sys
 import os
 import itertools
 import pytest
 import pyotb
-from pyotb.helpers import logger
+from tests_data import INPUT, SPOT_IMG_URL
 
 
 # List of buildings blocks, we can add other pyotb objects here
@@ -13,18 +12,16 @@ OTBAPPS_BLOCKS = [
     lambda inp: pyotb.DynamicConvert({"in": inp}),
     lambda inp: pyotb.Mosaic({"il": [inp]}),
     lambda inp: pyotb.BandMath({"il": [inp], "exp": "im1b1 + 1"}),
-    lambda inp: pyotb.BandMathX({"il": [inp], "exp": "im1"})
+    lambda inp: pyotb.BandMathX({"il": [inp], "exp": "im1"}),
 ]
 
 PYOTB_BLOCKS = [
-    lambda inp: 1 + abs(inp) * 2,
+    lambda inp: 1 / (1 + abs(inp) * 2),
     lambda inp: inp[:80, 10:60, :],
 ]
 PIPELINES_LENGTH = [1, 2, 3]
 
 ALL_BLOCKS = PYOTB_BLOCKS + OTBAPPS_BLOCKS
-FILEPATH = os.environ["TEST_INPUT_IMAGE"]
-INPUT = pyotb.Input(FILEPATH)
 
 
 def generate_pipeline(inp, building_blocks):
@@ -77,14 +74,18 @@ def pipeline2str(pipeline):
         a string
 
     """
-    return " > ".join([INPUT.__class__.__name__] + [f"{i}.{app.name.split()[0]}"
-                                                       for i, app in enumerate(pipeline)])
+    return " > ".join(
+        [INPUT.__class__.__name__]
+        + [f"{i}.{app.name.split()[0]}" for i, app in enumerate(pipeline)]
+    )
 
 
 def make_pipelines_list():
     """Create a list of pipelines using different lengths and blocks"""
-    blocks = {FILEPATH: OTBAPPS_BLOCKS,  # for filepath, we can't use Slicer or Operation
-              INPUT: ALL_BLOCKS}
+    blocks = {
+        SPOT_IMG_URL: OTBAPPS_BLOCKS,
+        INPUT: ALL_BLOCKS,
+    }  # for filepath, we can't use Slicer or Operation
     pipelines = []
     names = []
     for inp, blocks in blocks.items():
@@ -101,53 +102,83 @@ def make_pipelines_list():
     return pipelines, names
 
 
-PIPELINES, NAMES = make_pipelines_list()
-
-
-@pytest.mark.parametrize("pipe", PIPELINES, ids=NAMES)
-def test_pipeline_shape(pipe):
-    for i, app in enumerate(pipe):
-        print(app.shape)
-        assert bool(app.shape)
+def shape(pipe):
+    for app in pipe:
+        yield bool(app.shape)
 
 
-@pytest.mark.parametrize("pipe", PIPELINES, ids=NAMES)
-def test_pipeline_shape_nointermediate(pipe):
+def shape_nointermediate(pipe):
     app = [pipe[-1]][0]
-    assert bool(app.shape)
+    yield bool(app.shape)
 
 
-@pytest.mark.parametrize("pipe", PIPELINES, ids=NAMES)
-def test_pipeline_shape_backward(pipe):
-    for i, app in enumerate(reversed(pipe)):
-        assert bool(app.shape)
+def shape_backward(pipe):
+    for app in reversed(pipe):
+        yield bool(app.shape)
 
 
-@pytest.mark.parametrize("pipe", PIPELINES, ids=NAMES)
-def test_pipeline_write(pipe):
+def write(pipe):
     for i, app in enumerate(pipe):
         out = f"/tmp/out_{i}.tif"
         if os.path.isfile(out):
             os.remove(out)
-        app.write(out)
-        assert os.path.isfile(out)
+        yield app.write(out)
 
 
-@pytest.mark.parametrize("pipe", PIPELINES, ids=NAMES)
-def test_pipeline_write_nointermediate(pipe):
+def write_nointermediate(pipe):
     app = [pipe[-1]][0]
-    out = f"/tmp/out_0.tif"
+    out = "/tmp/out_0.tif"
     if os.path.isfile(out):
         os.remove(out)
-    app.write(out)
-    assert os.path.isfile(out)
+    yield app.write(out)
 
 
-@pytest.mark.parametrize("pipe", PIPELINES, ids=NAMES)
-def test_pipeline_write_backward(pipe):
+def write_backward(pipe):
     for i, app in enumerate(reversed(pipe)):
         out = f"/tmp/out_{i}.tif"
         if os.path.isfile(out):
             os.remove(out)
-        app.write(out)
-        assert os.path.isfile(out)
+        yield app.write(out)
+
+
+funcs = [
+    shape,
+    shape_nointermediate,
+    shape_backward,
+    write,
+    write_nointermediate,
+    write_backward,
+]
+
+PIPELINES, NAMES = make_pipelines_list()
+
+
+@pytest.mark.parametrize("test_func", funcs)
+def test(test_func):
+    fname = test_func.__name__
+    successes, failures = 0, 0
+    total_successes = []
+    for pipeline, blocks in zip(PIPELINES, NAMES):
+        err = None
+        try:
+            # Here we count non-empty shapes or write results, no errors during exec
+            bool_tests = list(test_func(pipeline))
+            overall_success = all(bool_tests)
+        except Exception as e:
+            # Unexpected exception in the pipeline, e.g. a RuntimeError we missed
+            bool_tests = []
+            overall_success = False
+            err = e
+        if overall_success:
+            print(f"\033[92m{fname}: success with [{blocks}]\033[0m\n")
+            successes += 1
+        else:
+            print(f"\033[91m{fname}: failure with [{blocks}]\033[0m {bool_tests}\n")
+            if err:
+                print(f"exception thrown: {err}")
+            failures += 1
+        total_successes.append(overall_success)
+    print(f"\nEnded test {fname} with {successes} successes, {failures} failures")
+    if err:
+        raise err
+    assert all(total_successes)
diff --git a/tests/test_serialization.py b/tests/test_serialization.py
deleted file mode 100644
index 2e0ce7fabafaeaab49fae5839eeb9415b38f2f6c..0000000000000000000000000000000000000000
--- a/tests/test_serialization.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import pyotb
-
-filepath = os.environ["TEST_INPUT_IMAGE"]
-
-
-def test_pipeline_simple():
-    # BandMath -> OrthoRectification -> ManageNoData
-    app1 = pyotb.BandMath({'il': [filepath], 'exp': 'im1b1'})
-    app2 = pyotb.OrthoRectification({'io.in': app1})
-    app3 = pyotb.ManageNoData({'in': app2})
-    summary = app3.summarize()
-    reference = {'name': 'ManageNoData', 'parameters': {'in': {
-                    'name': 'OrthoRectification', 'parameters': {'io.in': {
-                        'name': 'BandMath', 'parameters': {'il': ('tests/image.tif',), 'exp': 'im1b1'}},
-                    'map': 'utm',
-                    'outputs.isotropic': True}},
-                'mode': 'buildmask'}}
-    assert summary == reference
-
-
-def test_pipeline_diamond():
-    # Diamond graph
-    app1 = pyotb.BandMath({'il': [filepath], 'exp': 'im1b1'})
-    app2 = pyotb.OrthoRectification({'io.in': app1})
-    app3 = pyotb.ManageNoData({'in': app2})
-    app4 = pyotb.BandMathX({'il': [app2, app3], 'exp': 'im1+im2'})
-    summary = app4.summarize()
-    reference = {'name': 'BandMathX', 'parameters': {'il': [
-                    {'name': 'OrthoRectification', 'parameters': {'io.in': {
-                        'name': 'BandMath', 'parameters': {'il': ('tests/image.tif',), 'exp': 'im1b1'}},
-                    'map': 'utm',
-                    'outputs.isotropic': True}},
-                    {'name': 'ManageNoData', 'parameters': {'in': {
-                        'name': 'OrthoRectification', 'parameters': {
-                            'io.in': {'name': 'BandMath', 'parameters': {'il': ('tests/image.tif',), 'exp': 'im1b1'}},
-                            'map': 'utm',
-                            'outputs.isotropic': True}},
-                    'mode': 'buildmask'}}
-                ],
-                'exp': 'im1+im2'}}
-    assert summary == reference
diff --git a/tests/tests_data.py b/tests/tests_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e40c95e1c1b493b1382bb723b3ed12c28ee35e8
--- /dev/null
+++ b/tests/tests_data.py
@@ -0,0 +1,22 @@
+import json
+from pathlib import Path
+import pyotb
+
+
+SPOT_IMG_URL = "https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Input/SP67_FR_subset_1.tif"
+PLEIADES_IMG_URL = "https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb/-/raw/develop/Data/Baseline/OTB/Images/prTvOrthoRectification_pleiades-1_noDEM.tif"
+INPUT = pyotb.Input(SPOT_IMG_URL)
+
+
+TEST_IMAGE_STATS = {
+    "out.mean": [79.5505, 109.225, 115.456, 249.349],
+    "out.min": [33, 64, 91, 47],
+    "out.max": [255, 255, 230, 255],
+    "out.std": [51.0754, 35.3152, 23.4514, 20.3827],
+}
+
+json_file = Path(__file__).parent / "pipeline_summary.json"
+with json_file.open("r", encoding="utf-8") as js:
+    data = json.load(js)
+SIMPLE_SERIALIZATION = data["SIMPLE"]
+DIAMOND_SERIALIZATION = data["DIAMOND"]