Skip to content

Boundary

boundary

boundary(score: Score) -> Score

Compute the local boundary strength with LBDM (Cambouropoulos, 1997).

Raises:

  • ValueError

    if the score is not monophonic

Returns:

  • Score

    where the notes have the "boundary_strength" property set to a value from 0 to 1.

Source code in amads/melody/boundary.py
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def boundary(score: Score) -> Score:
    """
    Compute the local boundary strength with LBDM (Cambouropoulos, 1997).

    Raises
    ------
    ValueError
        if the score is not monophonic

    Returns
    -------
    Score
        where the notes have the "boundary_strength" property set to a
        value from 0 to 1.
    """
    if not score.ismonophonic():
        raise ValueError("Score must be monophonic")

    notes = score.get_sorted_notes()

    # profiles
    pp = [
        abs(pair[1].key_num - pair[0].key_num) for pair in zip(notes, notes[1:])
    ]
    po = [pair[1].onset - pair[0].onset for pair in zip(notes, notes[1:])]
    pr = [
        max(0, pair[1].onset - pair[0].offset) for pair in zip(notes, notes[1:])
    ]

    def list_degrees(profile):
        ret_list = [
            abs(pair[1] - pair[0]) / (1e-6 + pair[1] + pair[0])
            for pair in zip(profile, profile[1:])
        ]
        ret_list.append(0)
        return ret_list

    # degrees of change
    rp = list_degrees(pp)
    ro = list_degrees(po)
    rr = list_degrees(pr)

    def list_strengths(profile, degrees):
        degrees_sum = [0]
        for degree_pair in zip(degrees, degrees[1:]):
            degrees_sum.append(degree_pair[0] + degree_pair[1])
        strengths = [pair[0] * pair[1] for pair in zip(profile, degrees_sum)]
        max_strength = max(strengths)
        if max_strength > 0.1:
            strengths = [x / max_strength for x in strengths]
        return strengths

    sp = list_strengths(pp, rp)
    so = list_strengths(po, ro)
    sr = list_strengths(pr, rr)

    if len(notes) > 0:
        notes[0].set("boundary_strength", 1)
    for sp_elem, so_elem, sr_elem, note in zip(sp, so, sr, notes[1:]):
        note.set(
            "boundary_strength", 0.25 * sp_elem + 0.5 * so_elem + 0.25 * sr_elem
        )

        # b = [1]
        # for sp_elem, so_elem, sr_elem in zip(sp, so, sr):
        #    b.append(0.25 * sp_elem + 0.5 * so_elem + 0.25 * sr_elem)
    # assert len(b) == len(notes)

    return score

segment

Classes

Functions

fantastic_segmenter

fantastic_segmenter(
    score: Score, phrase_gap: float, units: str
) -> List[Score]

Segment melody into phrases based on IOI gaps.

Parameters:

  • score (Score) –

    Score object containing melody to segment

  • phrase_gap (float) –

    The minimum IOI gap to consider a new phrase

  • units (str) –

    The units of the phrase gap, either "seconds" or "quarters"

Returns:

  • list[Score]

    List of Score objects representing phrases

Source code in amads/melody/segment.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
def fantastic_segmenter(
    score: Score, phrase_gap: float, units: str
) -> List[Score]:
    """Segment melody into phrases based on IOI gaps.
    Parameters
    ----------
    score : Score
        Score object containing melody to segment
    phrase_gap : float
        The minimum IOI gap to consider a new phrase
    units : str
        The units of the phrase gap, either "seconds" or "quarters"

    Returns
    -------
    list[Score]
        List of Score objects representing phrases
    """
    assert units in ["seconds", "quarters"]
    if units == "seconds":
        raise NotImplementedError(
            "Seconds are not yet implemented, see issue #75: "
            "https://github.com/music-computing/amads/issues/75"
        )
    if units == "quarters":
        # Extract notes from score
        notes = score.get_sorted_notes()

        # Create a dictionary to store IOI information
        ioi_data = {}

        # Calculate IOIs
        for i, note in enumerate(notes):
            # Initialize entry for this note
            ioi_data[note] = None

            # first note has no IOI by convention
            if i > 0:
                ioi_data[note] = note.onset - notes[i - 1].onset
            else:
                ioi_data[note] = None

        phrases = []
        current_phrase = []
        for note in notes:
            # Check whether we need to make a new phrase
            need_new_phrase = (
                len(current_phrase) > 0
                and ioi_data[note]
                is not None  # Check current note's IOI instead of previous note
                and ioi_data[note] > phrase_gap
            )
            if need_new_phrase:
                # Create new score for the phrase
                phrase_score = Score(onset=0, duration=None)
                part = Part(
                    parent=None, onset=0, duration=None
                )  # parent=None is required
                start_time = current_phrase[0].onset
                # Adjust note timings relative to phrase start
                for phrase_note in current_phrase:
                    # make a parentless copy of the note so we can adjust its onset
                    # before inserting it into the new part in proper time order
                    new_note = phrase_note.insert_copy_into(None)
                    new_note.onset -= start_time
                    part.insert(new_note)
                phrase_score.insert(part)  # This will set the parent
                phrases.append(phrase_score)
                current_phrase = []
            current_phrase.append(note)

        # Append final phrase
        if len(current_phrase) > 0:
            phrase_score = Score(onset=0, duration=None)
            part = Part(
                parent=None, onset=0, duration=None
            )  # parent=None is required
            start_time = current_phrase[0].onset
            for phrase_note in current_phrase:
                new_note = phrase_note.insert_copy_into(None)
                new_note.onset -= start_time
                part.insert(new_note)
            phrase_score.insert(part)  # This will set the parent
            phrases.append(phrase_score)

        return phrases

segment_gestalt

This module implements the segment gestalt function by Tenney & Polansky (1980)

We can broadly categorise the algorithm's limitations to 2 categories:

  1. Soft restrictions

  2. Hard restrictions on what scores we can take, either because the algorithm exhibits undefined behavior when these scores are given, or because it isn't designed for said restrictions.

With these categories in mind, we have the following limitations. The algorithm does not consider these things within its scope (given a monophonic input):

  1. the monophonic music may have stream segregation (i.e. 1 stream of notes can be interpreted as 2 or more separate interspersed entities)

  2. does not consider harmony or shape (see beginning of section 2 for the OG paper for more details)

  3. does not give semantic meaning (we're still stuck giving arbitrary ideals to arbitrary things)

The algorithm has the following restriction to the score:

  • the score must be monophonic (perception differences) If we consider polyphonic scores, we will need a definition of what a substructure is for said score (in said algorithm) with respect to how we carve the note strutures. Since, in this algorithm, we don't consider stream segregation and other features that require larger context clues, we can just simply define a score substructure “temporally” as a contiguous subsequence of notes. Hence, it is safe to assume that the current algorithm is undefined when it comes to polyphonic music.

Some thoughts (and questions): (1) Should our output preserve the internal structure of the score for segments and clangs? Probably not. Keep in mind we're dealing with monophonic score structures. we just need to provide sufficient information that allows a caller to potentially verify the result and use it elsewhere, hence we simply return 2 lists of separate scores.

Legit think having a separate representation that can index into individual notes will be immensely helpful. But, I'm certain there has to be something I'm missing to decide otherwise (if I had to guess, ambiguity of how musical scores themselves are presented to the musician is chief among them, and maintaining that ambiguity in our internal representation is also paramount)

Also legit think we need well-defined rules to split and merge scores...

On a completely separate and unrelated note, there are 2 pitchmeans with, the exact same implementation and 2 filenames...

Classes

Functions

_construct_score_list

_construct_score_list(notes, intervals)

given an iterator of intervals and a global list of notes, we construct a list of scores containing the notes specified within the intervals

Source code in amads/melody/segment_gestalt.py
67
68
69
70
71
72
73
74
75
76
77
78
79
def _construct_score_list(notes, intervals):
    """
    given an iterator of intervals and a global list of notes,
    we construct a list of scores containing the notes specified within the intervals
    """
    score_list = []
    for interval in intervals:
        new_score = Score()
        new_part = Part(new_score)
        for note in notes[interval[0] : interval[1]]:
            note.insert_copy_into(new_part)
        score_list.append(new_score)
    return score_list

_find_peaks

_find_peaks(target_list, comp=lt)

returns a list of indices identifying peaks in the list according to a comparison

Source code in amads/melody/segment_gestalt.py
82
83
84
85
86
87
88
89
90
91
92
93
def _find_peaks(target_list, comp=lt):
    """
    returns a list of indices identifying peaks in the list
    according to a comparison
    """
    peaks = []
    for i, triplet in enumerate(
        zip(target_list, target_list[1:], target_list[2:])
    ):
        if comp(triplet[0], triplet[1]) and comp(triplet[2], triplet[1]):
            peaks.append(i + 1)
    return peaks

segment_gestalt

segment_gestalt(score: Score) -> tuple[list[float], list[float]]

Given a monophonic score, returns clang and segment boundary onsets

Parameters:

  • score (Score) –

    The score to be segmented

Returns:

  • tuple[list[float], list[float]]

    None if no clangs can be formed, else, 2-tuple of: (sorted list of onsets denoting clangs boundaries, sorted list of onsets denoting segments segment boundaries)

Raises:

  • Exception

    if the score is not monophonic

Source code in amads/melody/segment_gestalt.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
def segment_gestalt(score: Score) -> tuple[list[float], list[float]]:
    """
    Given a monophonic score, returns clang and segment boundary onsets

    Parameters
    ----------
    score: Score
        The score to be segmented

    Returns
    -------
    tuple[list[float], list[float]]
        None if no clangs can be formed, else, 2-tuple of:
        (sorted list of onsets denoting clangs boundaries,
        sorted list of onsets denoting segments segment boundaries)


    Raises
    ------
    Exception
        if the score is not monophonic
    """
    if not score.ismonophonic():
        raise Exception("score not monophonic, input is not valid.")

    notes: List[Note] = cast(
        List[Note], score.flatten(collapse=True).list_all(Note)
    )

    if len(notes) <= 0:
        return ([], [])

    cl_values = []
    # calculate clang distances here
    for note_pair in zip(notes[:-1], notes[1:]):
        pitch_diff = note_pair[1].key_num - note_pair[0].key_num
        onset_diff = note_pair[1].onset - note_pair[0].onset
        cl_values.append(2 * onset_diff + abs(pitch_diff))

    # combines the boolean map and the scan function that was done in matlab
    if len(cl_values) < 3:
        return ([], [])

    clang_soft_peaks = _find_peaks(cl_values)
    cl_indices = [0]
    # worry about indices here
    # starting index here
    # 1 past the end so we can construct score list easier
    cl_indices.extend([idx + 1 for idx in clang_soft_peaks])
    cl_indices.append(len(notes))

    clang_onsets = list(map(lambda i: (notes[i].onset), cl_indices[:-1]))

    if len(clang_onsets) <= 2:
        return (clang_onsets, [])

    # we can probably split the clangs here and organize them into scores
    clang_scores = _construct_score_list(
        notes, zip(cl_indices[:-1], cl_indices[1:])
    )
    # calculate segment boundaries
    # we need to basically follow segment_gestalt.m
    # (1) calculate individual clang pitch means
    mean_pitches = [pitch_mean(score, weighted=True) for score in clang_scores]

    # (2) calculate segment distances
    seg_dist_values = []
    # calculating segment distance...
    for i in range(len(clang_scores) - 1):
        local_seg_dist = 0.0
        # be careful of the indices when calculating segdist here
        local_seg_dist += abs(mean_pitches[i + 1] - mean_pitches[i])
        # first first distance
        local_seg_dist += (
            notes[cl_indices[i + 1]].onset - notes[cl_indices[i]].onset
        )
        # first of next clang to last of distance
        local_seg_dist += abs(
            notes[cl_indices[i + 1]].key_num
            - notes[cl_indices[i + 1] - 1].key_num
        )
        local_seg_dist += 2 * (
            notes[cl_indices[i + 1]].onset - notes[cl_indices[i + 1] - 1].onset
        )
        seg_dist_values.append(local_seg_dist)
    if len(seg_dist_values) < 3:
        return (clang_onsets, [])

    seg_soft_peaks = _find_peaks(seg_dist_values)
    assert seg_soft_peaks[-1] < len(cl_indices) - 1
    seg_indices = [0]
    # do we need to add 1 here? where do we add 1
    # worry about indices here
    seg_indices.extend([cl_indices[idx + 1] for idx in seg_soft_peaks])
    seg_indices.append(len(notes))

    segment_onsets = list(map(lambda i: (notes[i].onset), seg_indices[:-1]))
    return (clang_onsets, segment_onsets)

melsim

This is a Python wrapper for the R package 'melsim'. This wrapper allows the user to easily interface with the melsim package using the AMADS Score object. Melsim is a package for computing similarity between melodies, and is being developed by Sebastian Silas and Klaus Frieler (https://www.aesthetics.mpg.de/en/the-institute/people/klaus-frieler.html).

Melsim is based on SIMILE, which was written by Daniel Müllensiefen and Klaus Frieler in 2003/2004. This package is used to compare two or more melodies pairwise across a range of similarity measures. Not all similarity measures are implemented in melsim, but the ones that are can be used here. All of the following similarity measures are implemented and functional in melsim: Please be aware that the names of the similarity measures are case-sensitive.

Num Name
1 Jaccard
2 Kulczynski2
3 Russel
4 Faith
5 Tanimoto
6 Dice
7 Mozley
8 Ochiai
9 Simpson
10 cosine
11 angular
12 correlation
13 Tschuprow
14 Cramer
15 Gower
16 Euclidean
17 Manhattan
18 supremum
19 Canberra
20 Chord
21 Geodesic
22 Bray
23 Soergel
24 Podani
25 Whittaker
26 eJaccard
27 eDice
28 Bhjattacharyya
29 divergence
30 Hellinger
31 edit_sim_utf8
32 edit_sim
33 Levenshtein
34 sim_NCD
35 const
36 sim_dtw

The following similarity measures are not currently functional in melsim:

Num Name Type
1 count_distinct set-based
2 tversky set-based
3 simple matching
4 braun_blanquet set-based
5 minkowski vector-based
6 ukkon distribution-based
7 sum_common distribution-based
8 distr_sim distribution-based
9 stringdot_utf8 sequence-based
10 pmi special
11 sim_emd special

Further to the similarity measures, melsim allows the user to specify which domain the similarity should be calculated for. This is referred to as a “transformation” in melsim, and all of the following transformations are implemented and functional:

Num Name
1 pitch
2 int
3 fuzzy_int
4 parsons
5 pc
6 ioi_class
7 duration_class
8 int_X_ioi_class
9 implicit_harmonies

The following transformations are not currently functional in melsim:

Num Name
1 ioi
2 phrase_segmentation

Functions

run_script_in_r

run_script_in_r(script: str, text: bool = True) -> str

Run an R script and return its output.

Parameters:

  • script (str) –

    R script to run

Returns:

  • str

    Standard output from the R script

Raises:

  • RuntimeError

    If there is an error running the R script

Source code in amads/melody/similarity/melsim.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def run_script_in_r(script: str, text: bool = True) -> str:
    """Run an R script and return its output.

    Parameters
    ----------
    script : str
        R script to run

    Returns
    -------
    str
        Standard output from the R script

    Raises
    ------
    RuntimeError
        If there is an error running the R script
    """
    global _rscript_path
    if not _rscript_path:
        _rscript_path = _find_rscript()
    result = subprocess.run(
        [_rscript_path, "-e", script],
        capture_output=True,
        text=text,
        check=True,
    )
    return result.stdout.strip()

_find_rscript

_find_rscript() -> str

Find the Rscript executable.

Returns:

  • str

    Path to Rscript executable

Raises:

  • RuntimeError

    If Rscript is not found

Source code in amads/melody/similarity/melsim.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def _find_rscript() -> str:
    """Find the Rscript executable.

    Returns
    -------
    str
        Path to Rscript executable

    Raises
    ------
    RuntimeError
        If Rscript is not found
    """
    # Try common locations for Rscript
    rscript_path = shutil.which("Rscript")

    if rscript_path:
        return rscript_path

    # Try common macOS homebrew locations
    common_paths = [
        "/opt/homebrew/bin/Rscript",  # Apple Silicon homebrew
        "/usr/local/bin/Rscript",  # Intel homebrew
        "/Library/Frameworks/R.framework/Resources/bin/Rscript",  # R.app
    ]

    for path in common_paths:
        if os.path.exists(path) and os.access(path, os.X_OK):
            return path

    raise RuntimeError(
        "Rscript not found. Please install R and ensure Rscript is in your PATH. "
        "You can install R from https://cran.r-project.org/ or using Homebrew: brew install r"
    )

check_r_packages_installed

check_r_packages_installed(
    install_missing: bool = False, n_retries: int = 3
)

Check if required R packages are installed.

Parameters:

  • install_missing (bool, default: False ) –

    If True, attempt to install missing packages automatically.

  • n_retries (int, default: 3 ) –

    Number of retries for installing each missing package.

Raises:

  • ImportError

    If required packages are missing and install_missing is False.

  • RuntimeError

    If there is an error checking or installing packages.

Source code in amads/melody/similarity/melsim.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
def check_r_packages_installed(
    install_missing: bool = False, n_retries: int = 3
):
    """Check if required R packages are installed.

    Parameters
    ----------
    install_missing : bool, default=False
        If True, attempt to install missing packages automatically.
    n_retries : int, default=3
        Number of retries for installing each missing package.

    Raises
    ------
    ImportError
        If required packages are missing and install_missing is False.
    RuntimeError
        If there is an error checking or installing packages.
    """
    # Create R script to check package installation using base R only
    check_script = """
    packages <- c({packages})
    missing <- packages[!sapply(packages, requireNamespace, quietly = TRUE)]
    if (length(missing) > 0) {{
        cat(paste0('"', missing, '"', collapse = ","))
    }} else {{
        cat("")
    }}
    """

    # Format package list
    packages_str = ", ".join(
        [f'"{p}"' for p in r_cran_packages + r_github_packages]
    )
    check_script = check_script.format(packages=packages_str)

    # Run R script
    try:
        output = run_script_in_r(check_script)

        # Parse the output - if empty, no missing packages
        if not output:
            missing_packages = []
        else:
            # Parse comma-separated quoted strings
            missing_packages = [pkg.strip('"') for pkg in output.split(",")]

        if missing_packages:
            if install_missing:
                for package in missing_packages:
                    try:
                        for attempt in Retrying(
                            stop=stop_after_attempt(n_retries),
                            wait=wait_exponential(multiplier=1, min=1, max=10),
                        ):
                            with attempt:
                                install_r_package(package)
                    except RetryError as e:
                        raise RuntimeError(
                            f"Failed to install R package '{package}' after {n_retries} attempts. "
                            "See above for the traceback."
                        ) from e
            else:
                raise ImportError(
                    f"Packages {missing_packages} are required but not installed. "
                    "You can install them by running: install_dependencies()"
                )
    except subprocess.CalledProcessError as e:
        raise RuntimeError(f"Error checking R packages: {e.stderr}")

install_r_package

install_r_package(package: str)

Install an R package.

Parameters:

  • package (str) –

    Name of the R package to install.

Raises:

  • ValueError

    If the package type is unknown.

Source code in amads/melody/similarity/melsim.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
def install_r_package(package: str):
    """Install an R package.

    Parameters
    ----------
    package : str
        Name of the R package to install.

    Raises
    ------
    ValueError
        If the package type is unknown.
    """
    if package in r_cran_packages:
        print(f"Installing CRAN package '{package}'...")
        install_script = f"""
        utils::chooseCRANmirror(ind=1)
        utils::install.packages("{package}", dependencies=TRUE)
        """
        _ = run_script_in_r(install_script)
    elif package in r_github_packages:
        print(f"Installing GitHub package '{package}'...")
        repo = github_repos[package]
        install_script = f"""
        if (!requireNamespace("remotes", quietly = TRUE)) {{
            utils::install.packages("remotes")
        }}
        remotes::install_github("{repo}", upgrade="always", dependencies=TRUE)
        """
        _ = run_script_in_r(install_script)
    else:
        raise ValueError(f"Unknown package type for '{package}'")

install_dependencies

install_dependencies()

Install all required R packages.

Raises:

  • ImportError

    If required packages are missing and install_missing is False.

  • RuntimeError

    If there is an error checking or installing packages.

Source code in amads/melody/similarity/melsim.py
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
def install_dependencies():
    """Install all required R packages.

    Raises
    ------
    ImportError
        If required packages are missing and install_missing is False.
    RuntimeError
        If there is an error checking or installing packages.
    """
    # Check which packages need to be installed using base R only
    check_script = """
    packages <- c({packages})
    missing <- packages[!sapply(packages, requireNamespace, quietly = TRUE)]
    if (length(missing) > 0) {{
        cat(paste0('"', missing, '"', collapse = ","))
    }} else {{
        cat("")
    }}
    """

    # Check CRAN packages
    packages_str = ", ".join([f'"{p}"' for p in r_cran_packages])
    check_script_cran = check_script.format(packages=packages_str)

    try:
        output = run_script_in_r(check_script_cran)

        # Parse the output - if empty, no missing packages
        if not output:
            missing_cran = []
        else:
            # Parse comma-separated quoted strings
            missing_cran = [pkg.strip('"') for pkg in output.split(",")]

        if missing_cran:
            print("Installing missing CRAN packages...")
            cran_script = f"""
            utils::chooseCRANmirror(ind=1)
            utils::install.packages(c({", ".join([f'"{p}"' for p in missing_cran])}), dependencies=TRUE)
            """
            _ = run_script_in_r(cran_script)
        else:
            print("Skipping install: All CRAN packages are already installed.")
    except subprocess.CalledProcessError as e:
        raise RuntimeError(f"Error checking CRAN packages: {e.stderr}")

    # Check GitHub packages
    packages_str = ", ".join([f'"{p}"' for p in r_github_packages])
    check_script_github = check_script.format(packages=packages_str)

    try:
        output = run_script_in_r(check_script_github)

        # Parse the output - if empty, no missing packages
        if not output:
            missing_github = []
        else:
            # Parse comma-separated quoted strings
            missing_github = [pkg.strip('"') for pkg in output.split(",")]

        if missing_github:
            print("Installing missing GitHub packages...")
            for package in missing_github:
                repo = github_repos[package]
                print(f"Installing {package} from {repo}...")
                install_script = f"""
                if (!requireNamespace("remotes", quietly = TRUE)) {{
                    utils::install.packages("remotes")
                }}
                remotes::install_github("{repo}", upgrade="always", dependencies=TRUE)
                """
                _ = run_script_in_r(install_script)
        else:
            print(
                "Skipping install: All GitHub packages are already installed."
            )
    except subprocess.CalledProcessError as e:
        raise RuntimeError(f"Error checking GitHub packages: {e.stderr}")

    print("All dependencies are installed and up to date.")

check_python_package_installed

check_python_package_installed(package: str)

Check if a Python package is installed.

Parameters:

  • package (str) –

    Name of the Python package to check.

Raises:

  • ImportError

    If the package is not installed.

Source code in amads/melody/similarity/melsim.py
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
def check_python_package_installed(package: str):
    """Check if a Python package is installed.

    Parameters
    ----------
    package : str
        Name of the Python package to check.

    Raises
    ------
    ImportError
        If the package is not installed.
    """
    try:
        __import__(package)
    except ImportError:
        raise ImportError(
            f"Package '{package}' is required but not installed. "
            f"Please install it using pip: pip install {package}"
        )

validate_method

validate_method(method: str)

Validate that the similarity method is supported.

Parameters:

  • method (str) –

    Name of the similarity method to validate.

Raises:

  • ValueError

    If the method is not supported.

Source code in amads/melody/similarity/melsim.py
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
def validate_method(method: str):
    """Validate that the similarity method is supported.

    Parameters
    ----------
    method : str
        Name of the similarity method to validate.

    Raises
    ------
    ValueError
        If the method is not supported.
    """
    if method not in VALID_METHODS:
        raise ValueError(
            f"Invalid method '{method}'. Valid methods are: {', '.join(VALID_METHODS)}"
        )

validate_transformation

validate_transformation(transformation: str)

Validate that the transformation is supported.

Parameters:

  • transformation (str) –

    Name of the transformation to validate.

Raises:

  • ValueError

    If the transformation is not supported.

Source code in amads/melody/similarity/melsim.py
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
def validate_transformation(transformation: str):
    """Validate that the transformation is supported.

    Parameters
    ----------
    transformation : str
        Name of the transformation to validate.

    Raises
    ------
    ValueError
        If the transformation is not supported.
    """
    if transformation not in VALID_TRANSFORMATIONS:
        raise ValueError(
            f"Invalid transformation '{transformation}'. Valid transformations are: {', '.join(VALID_TRANSFORMATIONS)}"
        )

get_similarity

get_similarity(
    melody_1, melody_2, method: str, transformation: str
) -> float

Calculate similarity between two Score objects using the specified method.

Parameters:

  • melody_1 (Score) –

    First Score object containing a monophonic melody

  • melody_2 (Score) –

    Second Score object containing a monophonic melody

  • method (str) –

    Name of the similarity method to use from the list in the module docstring.

  • transformation (str) –

    Name of the transformation to use from the list in the module docstring.

Returns:

  • float

    Similarity value between the two melodies

Examples:

>>> from amads.core.basics import Score
>>> # Create two simple melodies using from_melody
>>> melody_1 = Score.from_melody(pitches=[60, 62, 64, 65], durations=1.0)
>>> melody_2 = Score.from_melody(pitches=[60, 62, 64, 67], durations=1.0)
>>> # Calculate similarity using Jaccard method
>>> similarity = get_similarity(melody_1, melody_2, 'Jaccard', 'pitch')
Source code in amads/melody/similarity/melsim.py
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
def get_similarity(
    melody_1, melody_2, method: str, transformation: str
) -> float:
    """Calculate similarity between two Score objects using the specified method.

    Parameters
    ----------
    melody_1 : Score
        First Score object containing a monophonic melody
    melody_2 : Score
        Second Score object containing a monophonic melody
    method : str
        Name of the similarity method to use from the list in the module docstring.
    transformation : str
        Name of the transformation to use from the list in the module docstring.

    Returns
    -------
    float
        Similarity value between the two melodies

    Examples
    --------
    >>> from amads.core.basics import Score
    >>> # Create two simple melodies using from_melody
    >>> melody_1 = Score.from_melody(pitches=[60, 62, 64, 65], durations=1.0)
    >>> melody_2 = Score.from_melody(pitches=[60, 62, 64, 67], durations=1.0)
    >>> # Calculate similarity using Jaccard method
    >>> similarity = get_similarity(melody_1, melody_2, 'Jaccard', 'pitch')
    """
    # Validate inputs
    validate_method(method)
    validate_transformation(transformation)

    # Convert Score objects to arrays
    pitches1, starts1, ends1 = score_to_arrays(melody_1)
    pitches2, starts2, ends2 = score_to_arrays(melody_2)

    # Pass lists directly to _get_similarity
    return _get_similarity(
        pitches1,
        starts1,
        ends1,
        pitches2,
        starts2,
        ends2,
        method,
        transformation,
    )

_get_similarity

_get_similarity(
    melody1_pitches: List[float],
    melody1_starts: List[float],
    melody1_ends: List[float],
    melody2_pitches: List[float],
    melody2_starts: List[float],
    melody2_ends: List[float],
    method: str,
    transformation: str,
) -> float

Calculate similarity between two melodies using the specified method.

Parameters:

  • melody1_pitches (List[float]) –

    Pitch values for the first melody

  • melody1_starts (List[float]) –

    Start times for the first melody

  • melody1_ends (List[float]) –

    End times for the first melody

  • melody2_pitches (List[float]) –

    Pitch values for the second melody

  • melody2_starts (List[float]) –

    Start times for the second melody

  • melody2_ends (List[float]) –

    End times for the second melody

  • method (str) –

    Name of the similarity method to use

  • transformation (str) –

    Name of the transformation to use

Returns:

  • float

    Similarity value between the two melodies

Source code in amads/melody/similarity/melsim.py
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
def _get_similarity(
    melody1_pitches: List[float],
    melody1_starts: List[float],
    melody1_ends: List[float],
    melody2_pitches: List[float],
    melody2_starts: List[float],
    melody2_ends: List[float],
    method: str,
    transformation: str,
) -> float:
    """Calculate similarity between two melodies using the specified method.
    Parameters
    ----------
    melody1_pitches : List[float]
        Pitch values for the first melody
    melody1_starts : List[float]
        Start times for the first melody
    melody1_ends : List[float]
        End times for the first melody
    melody2_pitches : List[float]
        Pitch values for the second melody
    melody2_starts : List[float]
        Start times for the second melody
    melody2_ends : List[float]
        End times for the second melody
    method : str
        Name of the similarity method to use
    transformation : str
        Name of the transformation to use
    Returns
    -------
    float
        Similarity value between the two melodies
    """
    # Validate inputs
    validate_method(method)
    validate_transformation(transformation)

    # Convert arrays to comma-separated strings (works with both lists and numpy arrays)
    pitches1_str = ",".join(map(str, melody1_pitches))
    starts1_str = ",".join(map(str, melody1_starts))
    ends1_str = ",".join(map(str, melody1_ends))
    pitches2_str = ",".join(map(str, melody2_pitches))
    starts2_str = ",".join(map(str, melody2_starts))
    ends2_str = ",".join(map(str, melody2_ends))

    # Create R script for similarity calculation
    r_script = f"""
    suppressMessages(suppressWarnings({{
        library(melsim)
        # Create melody objects
        melody1 <- melody_factory$new(mel_data = tibble::tibble(
            onset = c({starts1_str}),
            pitch = c({pitches1_str}),
            duration = c({ends1_str}) - c({starts1_str})
        ))
        melody2 <- melody_factory$new(mel_data = tibble::tibble(
            onset = c({starts2_str}),
            pitch = c({pitches2_str}),
            duration = c({ends2_str}) - c({starts2_str})
        ))
        # Create similarity measure
        sim_measure <- sim_measure_factory$new(
            name = "{method}",
            full_name = "{method}",
            transformation = "{transformation}",
            parameters = list(),
            sim_measure = "{method}"
        )
        # Calculate similarity
        result <- melody1$similarity(melody2, sim_measure)
        cat(jsonlite::toJSON(result$sim))
    }}))
    """

    # Run R script
    try:
        output_str = run_script_in_r(r_script)
        # Extract JSON from output (may contain warnings before the JSON)
        # Find the JSON part - look for the last line or the part after newline
        lines = output_str.split("\n")
        json_str = lines[-1]  # JSON should be on the last line

        # If that doesn't work, try to find JSON array/object markers
        if not (
            json_str.startswith("[")
            or json_str.startswith("{")
            or json_str.startswith('"')
        ):
            for line in reversed(lines):
                if (
                    line.startswith("[")
                    or line.startswith("{")
                    or line.startswith('"')
                ):
                    json_str = line
                    break

        output = json.loads(json_str)

        # Handle both single values and lists
        if isinstance(output, list):
            value = output[0]  # Get first value if it's a list
        else:
            value = output

        # Handle NA values from R
        if value == "NA" or value is None:
            return float("nan")  # Return NaN for invalid combinations

        return float(value)
    except subprocess.CalledProcessError as e:
        raise RuntimeError(f"Error calculating similarity: {e.stderr}")

_convert_strings_to_tuples

_convert_strings_to_tuples(d: Dict) -> Dict

Convert string keys back to tuples where needed.

Source code in amads/melody/similarity/melsim.py
669
670
671
672
673
674
675
676
677
def _convert_strings_to_tuples(d: Dict) -> Dict:
    """Convert string keys back to tuples where needed."""
    result = {}
    for k, v in d.items():
        if isinstance(v, dict):
            result[k] = _convert_strings_to_tuples(v)
        else:
            result[k] = v
    return result

score_to_arrays

score_to_arrays(score) -> Tuple[List[float], List[float], List[float]]

Extract melody attributes from a Score object.

Parameters:

  • score (Score) –

    Score object containing a monophonic melody

Returns:

  • Tuple[List[int], List[float], List[float]]

    Tuple of (pitches, start_times, end_times)

Source code in amads/melody/similarity/melsim.py
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
def score_to_arrays(score) -> Tuple[List[float], List[float], List[float]]:
    """Extract melody attributes from a Score object.

    Parameters
    ----------
    score : Score
        Score object containing a monophonic melody

    Returns
    -------
    Tuple[List[int], List[float], List[float]]
        Tuple of (pitches, start_times, end_times)
    """
    assert score.ismonophonic(), "Score must be monophonic"

    notes = score.get_sorted_notes()

    # Extract onset, pitch, duration for each note
    pitches = [note.pitch.key_num for note in notes]
    starts = [note.onset for note in notes]
    ends = [note.onset + note.duration for note in notes]

    return pitches, starts, ends

_batch_compute_similarities

_batch_compute_similarities(args_list: List[Tuple]) -> List[float]

Compute similarities for a batch of melody pairs.

Parameters:

  • args_list (List[Tuple]) –

    List of argument tuples for _compute_similarity

Returns:

  • List[float]

    List of similarity values

Source code in amads/melody/similarity/melsim.py
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
def _batch_compute_similarities(args_list: List[Tuple]) -> List[float]:
    """Compute similarities for a batch of melody pairs.

    Parameters
    ----------
    args_list : List[Tuple]
        List of argument tuples for _compute_similarity

    Returns
    -------
    List[float]
        List of similarity values
    """
    # Create R script for batch similarity calculation with improved efficiency
    r_script = """
    suppressMessages(suppressWarnings({
        library(melsim)
        library(jsonlite)
        library(purrr)
    # Function to create melody object
    create_melody <- function(pitches, starts, ends) {
        melody_factory$new(mel_data = tibble::tibble(
            onset = as.numeric(strsplit(starts, ",")[[1]]),
            pitch = as.numeric(strsplit(pitches, ",")[[1]]),
            duration = as.numeric(strsplit(ends, ",")[[1]]) - as.numeric(strsplit(starts, ",")[[1]])
        ))
    }
    # Function to calculate similarity
    calc_similarity <- function(melody1, melody2, method, transformation) {
        sim_measure <- sim_measure_factory$new(
            name = method,
            full_name = method,
            transformation = transformation,
            parameters = list(),
            sim_measure = method
        )
        result <- melody1$similarity(melody2, sim_measure)
        result$sim
    }
    # Process command line arguments
    args <- commandArgs(trailingOnly = TRUE)
    n_args <- length(args)
    n_comparisons <- n_args / 8  # Each comparison has 8 arguments
    # Pre-allocate results vector
    results <- numeric(n_comparisons)
    # Create a cache for melody objects
    melody_cache <- new.env()
    # Process in chunks for better memory management
    chunk_size <- 1000
    n_chunks <- ceiling(n_comparisons / chunk_size)
    for (chunk in seq_len(n_chunks)) {
        start_idx <- (chunk - 1) * chunk_size + 1
        end_idx <- min(chunk * chunk_size, n_comparisons)
        # Process chunk
        for (i in start_idx:end_idx) {
            idx <- (i-1) * 8 + 1
            # Get or create melody1
            melody1_key <- paste(args[idx], args[idx+1], args[idx+2], sep="|")
            if (!exists(melody1_key, envir=melody_cache)) {
                melody_cache[[melody1_key]] <- create_melody(args[idx], args[idx+1], args[idx+2])
            }
            melody1 <- melody_cache[[melody1_key]]
            # Get or create melody2
            melody2_key <- paste(args[idx+3], args[idx+4], args[idx+5], sep="|")
            if (!exists(melody2_key, envir=melody_cache)) {
                melody_cache[[melody2_key]] <- create_melody(args[idx+3], args[idx+4], args[idx+5])
            }
            melody2 <- melody_cache[[melody2_key]]
            method <- args[idx+6]
            transformation <- args[idx+7]
            results[i] <- calc_similarity(melody1, melody2, method, transformation)
        }
        # Force garbage collection after each chunk
        gc()
    }
    cat(toJSON(results))
    }))
    """

    # Prepare all arguments
    all_args = []
    for melody1_data, melody2_data, method, transformation in args_list:
        # Convert lists to comma-separated strings
        pitches1_str = ",".join(map(str, melody1_data[0]))
        starts1_str = ",".join(map(str, melody1_data[1]))
        ends1_str = ",".join(map(str, melody1_data[2]))
        pitches2_str = ",".join(map(str, melody2_data[0]))
        starts2_str = ",".join(map(str, melody2_data[1]))
        ends2_str = ",".join(map(str, melody2_data[2]))

        all_args.extend(
            [
                pitches1_str,
                starts1_str,
                ends1_str,
                pitches2_str,
                starts2_str,
                ends2_str,
                method,
                transformation,
            ]
        )

    # Run R script with all arguments
    try:
        output_str = run_script_in_r(r_script)
        # Extract JSON from output (may contain warnings before the JSON)
        # Find the JSON part - look for the last line or the part after newline
        lines = output_str.split("\n")
        json_str = lines[-1]  # JSON should be on the last line

        # If that doesn't work, try to find JSON array/object markers
        if not (
            json_str.startswith("[")
            or json_str.startswith("{")
            or json_str.startswith('"')
        ):
            for line in reversed(lines):
                if (
                    line.startswith("[")
                    or line.startswith("{")
                    or line.startswith('"')
                ):
                    json_str = line
                    break

        parsed_result = json.loads(json_str)
        # Handle NA values from R
        return [
            float("nan") if x == "NA" or x is None else float(x)
            for x in parsed_result
        ]
    except subprocess.CalledProcessError as e:
        raise RuntimeError(f"Error calculating similarities: {e.stderr}")

get_similarities

get_similarities(
    scores: Dict[str, object],
    method: Union[str, List[str]] = "Jaccard",
    transformation: Union[str, List[str]] = "pitch",
    output_file: Union[str, Path, None] = None,
    n_cores: Optional[int] = None,
    batch_size: int = 1000,
) -> Union[
    Dict[str, Dict[str, float]],
    Dict[Tuple[str, str], Dict[str, Dict[str, float]]],
]

Calculate pairwise similarities between multiple Score objects.

You can provide a single method and transformation, or a list of methods and transformations. The function will return similarity matrices as nested dictionaries.

Parameters:

  • scores (Dict[str, Score]) –

    Dictionary mapping score names to Score objects

  • method (Union[str, List[str]], default: "Jaccard" ) –

    Name of the similarity method(s) to use. Can be a single method or a list of methods.

  • transformation (Union[str, List[str]], default: "pitch" ) –

    Name of the transformation(s) to use. Can be a single transformation or a list of transformations.

  • output_file (Union[str, Path], default: None ) –

    If provided, save results to this file. If no extension is provided, .json will be added.

  • n_cores (int, default: None ) –

    Number of CPU cores to use for parallel processing. Defaults to all available cores.

  • batch_size (int, default: 1000 ) –

    Number of comparisons to process in each batch

Returns:

  • Union[Dict[str, Dict[str, float]], Dict[Tuple[str, str], Dict[str, Dict[str, float]]]]

    If single method and transformation: nested dictionary similarity matrix {row_name: {col_name: similarity}} where row_name and col_name are score names. If multiple methods/transformations: dictionary mapping (method, transformation) tuples to similarity matrices

Source code in amads/melody/similarity/melsim.py
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
def get_similarities(
    scores: Dict[str, object],
    method: Union[str, List[str]] = "Jaccard",
    transformation: Union[str, List[str]] = "pitch",
    output_file: Union[str, Path, None] = None,
    n_cores: Optional[int] = None,
    batch_size: int = 1000,
) -> Union[
    Dict[str, Dict[str, float]],
    Dict[Tuple[str, str], Dict[str, Dict[str, float]]],
]:
    """Calculate pairwise similarities between multiple Score objects.

    You can provide a single method and transformation, or a list of methods and transformations.
    The function will return similarity matrices as nested dictionaries.

    Parameters
    ----------
    scores : Dict[str, Score]
        Dictionary mapping score names to Score objects
    method : Union[str, List[str]], default="Jaccard"
        Name of the similarity method(s) to use. Can be a single method or a list of methods.
    transformation : Union[str, List[str]], default="pitch"
        Name of the transformation(s) to use. Can be a single transformation or a list of transformations.
    output_file : Union[str, Path], optional
        If provided, save results to this file. If no extension is provided, .json will be added.
    n_cores : int, optional
        Number of CPU cores to use for parallel processing. Defaults to all available cores.
    batch_size : int, default=1000
        Number of comparisons to process in each batch

    Returns
    -------
    Union[Dict[str, Dict[str, float]], Dict[Tuple[str, str], Dict[str, Dict[str, float]]]]
        If single method and transformation: nested dictionary similarity
        matrix {row_name: {col_name: similarity}} where row_name and col_name
        are score names. If multiple methods/transformations: dictionary mapping
        (method, transformation) tuples to similarity matrices
    """
    # Convert single method/transformation to lists
    methods = [method] if isinstance(method, str) else method
    transformations = (
        [transformation] if isinstance(transformation, str) else transformation
    )

    # Validate all methods and transformations
    for m in methods:
        validate_method(m)
    for t in transformations:
        validate_transformation(t)

    if len(scores) < 2:
        raise ValueError("Need at least 2 Score objects for comparison")

    # Extract melody data from all scores (avoid multiprocessing due to Score object pickling issues)
    print("Extracting melody data...")
    melody_data = {}
    for name, score in tqdm(scores.items(), desc="Processing Score objects"):
        try:
            melody_data[name] = score_to_arrays(score)
        except Exception as e:
            print(
                f"Warning: Could not extract melody data for {name}: {str(e)}"
            )

    if len(melody_data) < 2:
        raise ValueError("Need at least 2 valid Score objects for comparison")

    # Prepare arguments for parallel processing
    print("Computing similarities...")
    args = []
    score_pairs = []

    # Pre-compute all combinations for better performance
    combinations_list = list(combinations(melody_data.items(), 2))
    for (name1, data1), (name2, data2) in combinations_list:
        for m in methods:
            for t in transformations:
                args.append((data1, data2, m, t))
                score_pairs.append((name1, name2, m, t))

    # Process in larger batches for better performance
    similarities_list = []
    for i in tqdm(range(0, len(args), batch_size), desc="Processing batches"):
        batch = args[i : i + batch_size]
        similarities_list.extend(_batch_compute_similarities(batch))

    # Create dictionary of results
    similarities = dict(zip(score_pairs, similarities_list))

    # Convert to matrix format using native Python types
    score_names = list(scores.keys())

    # Create similarity matrices as nested dictionaries
    matrices = {}

    for m in methods:
        for t in transformations:
            # Initialize matrix as nested dictionary with 1s on diagonal
            sim_matrix = {}
            for name1 in score_names:
                sim_matrix[name1] = {}
                for name2 in score_names:
                    if name1 == name2:
                        sim_matrix[name1][name2] = 1.0
                    else:
                        sim_matrix[name1][name2] = 0.0

            # Fill matrix with pairwise similarities
            # Since combinations() only gives us each pair once, set both directions
            for (
                name1,
                name2,
                method_key,
                transformation_key,
            ), similarity in similarities.items():
                if method_key == m and transformation_key == t:
                    # Handle NaN values consistently
                    if (
                        similarity == "NA"
                        or similarity is None
                        or (
                            isinstance(similarity, float)
                            and math.isnan(similarity)
                        )
                    ):
                        sim_value = float("nan")
                    else:
                        sim_value = float(similarity)

                    # Set both directions to ensure perfect symmetry
                    sim_matrix[name1][name2] = sim_value
                    sim_matrix[name2][name1] = sim_value

            matrices[(m, t)] = sim_matrix

    # Save to file if output file specified
    if output_file:
        print("Saving results...")

        # Ensure output file has .json extension
        output_file = Path(output_file)
        if not output_file.suffix:
            output_file = output_file.with_suffix(".json")

        # Save matrices to JSON
        output_data = {}
        for (m, t), matrix in matrices.items():
            output_data[f"{m}_{t}"] = matrix

        import json

        with open(output_file, "w") as f:
            json.dump(output_data, f, indent=2)
        print(f"Results saved to {output_file}")

    # Return format depends on number of method/transformation combinations
    if len(methods) == 1 and len(transformations) == 1:
        # Single method and transformation: return just the matrix
        return matrices[(methods[0], transformations[0])]
    else:
        # Multiple methods/transformations: return dictionary of matrices
        return matrices