diff --git a/examples/nmf/JIT-NMF-Header.maxpat b/examples/nmf/JIT-NMF-Header.maxpat index d69ea0fc..8b5d0eb1 100644 --- a/examples/nmf/JIT-NMF-Header.maxpat +++ b/examples/nmf/JIT-NMF-Header.maxpat @@ -377,9 +377,8 @@ "patching_rect" : [ 5.0, 195.0, 360.0, 74.0 ], "presentation" : 1, "presentation_linecount" : 4, - "presentation_rect" : [ 5.0, 5.0, 360.0, 74.0 ], - "text" : "When using a small number of iterations, fluid.bufnmf~ can process in close-to-realtime. In this example, fluid.bufnmf~ is used to dissect components of a sound and pan them seperately.", - "textcolor" : [ 0.0, 0.0, 0.0, 1.0 ] + "presentation_rect" : [ 5.0, 5.0, 362.0, 74.0 ], + "text" : "When using a small number of iterations, fluid.bufnmf~ can process in close-to-realtime. In this example, fluid.bufnmf~ is used to dissect components of a sound and pan them seperately." } } @@ -603,7 +602,8 @@ "presentation" : 1, "presentation_linecount" : 3, "presentation_rect" : [ 18.97886073589325, 160.070424437522888, 639.0, 60.0 ], - "text" : "These example patches display creative uses of nmf processing, each with increasing complexity.\n\nIf you aren't familiar with the nmf objects, check out their help files before diving deeper!" + "text" : "These example patches display creative uses of nmf processing, each with increasing complexity.\n\nIf you aren't familiar with the nmf objects, check out their help files before diving deeper!", + "textcolor" : [ 0.0, 0.0, 0.0, 1.0 ] } } diff --git a/extras/Fluid Corpus Manipulation Toolkit.maxpat b/extras/Fluid Corpus Manipulation Toolkit.maxpat index 930afa35..c0615e96 100644 --- a/extras/Fluid Corpus Manipulation Toolkit.maxpat +++ b/extras/Fluid Corpus Manipulation Toolkit.maxpat @@ -768,10 +768,9 @@ "numoutlets" : 0, "patching_rect" : [ 5.0, 195.0, 360.0, 74.0 ], "presentation" : 1, - "presentation_linecount" : 4, - "presentation_rect" : [ 5.0, 5.0, 360.0, 74.0 ], - "text" : "fluid.bufnmf~ is a relatively heavy object in terms of processing. However, you can tune it to work in a more lightweight manner, facilitating all sorts of 'just in time' approaches. This tab will take you to a handful of examples.", - "textcolor" : [ 0.0, 0.0, 0.0, 1.0 ] + "presentation_linecount" : 3, + "presentation_rect" : [ 5.0, 5.0, 360.0, 57.0 ], + "text" : "Analysing pitch is a complex problem to solve. This example shows how by using some statistical inferences, the true value of pitch can be more accurately measured." } } @@ -782,13 +781,6 @@ "source" : [ "obj-1", 0 ] } - } -, { - "patchline" : { - "destination" : [ "obj-20", 0 ], - "source" : [ "obj-15", 1 ] - } - } , { "patchline" : { @@ -1215,8 +1207,7 @@ "presentation" : 1, "presentation_linecount" : 14, "presentation_rect" : [ 5.0, 5.0, 353.0, 242.0 ], - "text" : "Lauren Sarah Hayes is a Scottish improviser and sound artist. Her music is a mix of experimental pop/live electronics/techno/noise/free improvisation and has been described as 'voracious' and 'exhilarating'. She is a sculptress of sound, manipulating, remixing, and bending voice, drum machines, analogue synths and self-built software live and physically. She is excited by what can happen in the vulnerable relationships between sound, space, and audience. Her shows are highly physical, making the performance of live electronic music more engaging for audiences. Over the last decade she has developed and honed a deliberately challenging and unpredictable performance system that explores the relationships between bodies, sound, environments, and technology.", - "textcolor" : [ 0.0, 0.0, 0.0, 1.0 ] + "text" : "Lauren Sarah Hayes is a Scottish improviser and sound artist. Her music is a mix of experimental pop/live electronics/techno/noise/free improvisation and has been described as 'voracious' and 'exhilarating'. She is a sculptress of sound, manipulating, remixing, and bending voice, drum machines, analogue synths and self-built software live and physically. She is excited by what can happen in the vulnerable relationships between sound, space, and audience. Her shows are highly physical, making the performance of live electronic music more engaging for audiences. Over the last decade she has developed and honed a deliberately challenging and unpredictable performance system that explores the relationships between bodies, sound, environments, and technology." } } @@ -1227,13 +1218,6 @@ "source" : [ "obj-1", 0 ] } - } -, { - "patchline" : { - "destination" : [ "obj-20", 0 ], - "source" : [ "obj-15", 1 ] - } - } , { "patchline" : { @@ -2432,8 +2416,7 @@ "presentation" : 1, "presentation_linecount" : 11, "presentation_rect" : [ 5.0, 5.0, 363.0, 191.0 ], - "text" : "Imagine you have a large corpus of sounds that you've collected from a studio session, some outside sound walks or experimenting with a synthesiser on a rainy afternoon. This tutorial teaches you how to build a '2D Corpus Explorer', a patch that will enable you to interrogate and listen to those sounds in a structured manner. The end result is similar to CataRT and AudioStellar in that small segments of your corpus sounds are analysed and mapped to a two-dimensional space that can be explored using your mouse. It will cover topics such as segmentation, analysis, data processing and visualisation.", - "textcolor" : [ 0.0, 0.0, 0.0, 1.0 ] + "text" : "Imagine you have a large corpus of sounds that you've collected from a studio session, some outside sound walks or experimenting with a synthesiser on a rainy afternoon. This tutorial teaches you how to build a '2D Corpus Explorer', a patch that will enable you to interrogate and listen to those sounds in a structured manner. The end result is similar to CataRT and AudioStellar in that small segments of your corpus sounds are analysed and mapped to a two-dimensional space that can be explored using your mouse. It will cover topics such as segmentation, analysis, data processing and visualisation." } } @@ -2444,13 +2427,6 @@ "source" : [ "obj-1", 0 ] } - } -, { - "patchline" : { - "destination" : [ "obj-20", 0 ], - "source" : [ "obj-15", 1 ] - } - } , { "patchline" : { @@ -3046,8 +3022,7 @@ "presentation" : 1, "presentation_linecount" : 7, "presentation_rect" : [ 5.0, 5.0, 352.0, 124.0 ], - "text" : "Slicing, segmenting, chopping, or dividing. These words all refer to the same type of process in which a sound is separated into 'chunks' or 'slices' across time. Each of these objects embodies a different model of listening for identifying 'slice points'. In other words, different types of change can be measured and used to determine how to segment a sound.", - "textcolor" : [ 0.0, 0.0, 0.0, 1.0 ] + "text" : "Slicing, segmenting, chopping, or dividing. These words all refer to the same type of process in which a sound is separated into 'chunks' or 'slices' across time. Each of these objects embodies a different model of listening for identifying 'slice points'. In other words, different types of change can be measured and used to determine how to segment a sound." } } @@ -3058,13 +3033,6 @@ "source" : [ "obj-1", 0 ] } - } -, { - "patchline" : { - "destination" : [ "obj-20", 0 ], - "source" : [ "obj-15", 1 ] - } - } , { "patchline" : {