diff --git a/STYLEGUIDE.adoc b/STYLEGUIDE.adoc index 5096150da9ce..5759baea18a1 100644 --- a/STYLEGUIDE.adoc +++ b/STYLEGUIDE.adoc @@ -10,9 +10,25 @@ CAUTION: This document is still a work in progress. * Master * Node * Pipeline +* Executor Mixing of these terms is incorrect +NOTE: When referring to a Jenkins Pipeline via short-hand ("Pipeline"), it +should always be title-cased. When referring to a conceptual pipeline +("continuous delivery pipeline"), it it should always be lower-cased. + + +When needing to refer to the collection of executors and/or nodes it is best to +refer to them collectively as "the Jenkins environment", for example: + +____ +[..] they will now execute in parallel assuming the requisite capacity exists +in the Jenkins environment. +____ + +As opposed to "the Jenkins cluster", "agent pool" or any other phrase to +describe the collective set of resources provided by Jenkins. == Syntax/Formatting @@ -110,6 +126,15 @@ By default, all of these use the class name as label, but that can be customized javadoc:hudson.scm.SCM#all()[a list of all known SCM implementations] ---- +== Handbook Style Guide + +* For consecutive sections that are related to or build on each other, there + should be a reasonable "intro" or preamble at the beginning of one section + and a reasonable "outtro" at the end, to provide continuity between the + documents + + + == Assorted comments * Prefer "for example" over "e.g." which can be more clear to non-native english diff --git a/content/_layouts/chapter.html.haml b/content/_layouts/chapter.html.haml index 95b69ea0a12d..d379d6ba8bdd 100644 --- a/content/_layouts/chapter.html.haml +++ b/content/_layouts/chapter.html.haml @@ -34,20 +34,26 @@ layout: default %a.next.page-link{:href => File.join('..', next_chapter.key)} #{next_chapter.title} ⇒ + + .section %h1 = page.title - = content - if chapter.sections && chapter.sections.size > 0 - %h2 - Sub-sections - - %ul - - chapter.sections.each do |section| - %li - %a{:href => section.key} - = section.title + #subsections + .toc + #toc-title + Chapter Sub-Sections + %ul + - chapter.sections.each do |section| + %li + %a{:href => section.key} + = section.title + + + = content + .section_nav.pagination-links - if previous_chapter diff --git a/content/css/jenkins.css b/content/css/jenkins.css index 531e043e704a..af8b072b23f2 100644 --- a/content/css/jenkins.css +++ b/content/css/jenkins.css @@ -1345,8 +1345,10 @@ blockquote { background-color: #f9f9f9; border-left: 1px solid #c9c9c9; float: right; + width: 10rem; margin-left: 15px; padding: 10px; + clear: right; } .toc li li { diff --git a/content/doc/book/book.yml b/content/doc/book/book.yml index 652f6fbcd147..1dda74109e03 100644 --- a/content/doc/book/book.yml +++ b/content/doc/book/book.yml @@ -9,3 +9,4 @@ chapters: - operating - scaling - appendix + - glossary diff --git a/content/doc/book/glossary/chapter.yml b/content/doc/book/glossary/chapter.yml new file mode 100644 index 000000000000..ffedc6ea38d3 --- /dev/null +++ b/content/doc/book/glossary/chapter.yml @@ -0,0 +1,2 @@ +--- +sections: diff --git a/content/doc/book/glossary/index.adoc b/content/doc/book/glossary/index.adoc new file mode 100644 index 000000000000..859878011cb1 --- /dev/null +++ b/content/doc/book/glossary/index.adoc @@ -0,0 +1,19 @@ +--- +layout: chapter +--- +:description: +:author: +:email: jenkinsci-users@googlegroups.com +:sectanchors: +:toc: left +:notitle: + +[glossary] += Glossary + + +[glossary] + +Freestyle Job:: + A Freestyle Job + diff --git a/content/doc/book/pipeline/chapter.yml b/content/doc/book/pipeline/chapter.yml index a9c17b078264..c0934aee8d77 100644 --- a/content/doc/book/pipeline/chapter.yml +++ b/content/doc/book/pipeline/chapter.yml @@ -1,4 +1,6 @@ --- sections: - - overview + - getting-started - jenkinsfile + - multibranch + - shared-libraries diff --git a/content/doc/book/pipeline/getting-started.adoc b/content/doc/book/pipeline/getting-started.adoc new file mode 100644 index 000000000000..a76a9de64d23 --- /dev/null +++ b/content/doc/book/pipeline/getting-started.adoc @@ -0,0 +1,231 @@ +--- +layout: section +--- +:notitle: +:description: +:author: +:email: jenkinsci-docs@googlegroups.com +:sectanchors: +:toc: +:imagesdir: /doc/book/resources/pipeline +:hide-uri-scheme: + += Getting Started + +Jenkins Pipeline is a suite of plugins which supports implementing and +integrating continuous delivery pipelines into Jenkins. Pipeline provides an +extensible set of tools for modeling simple-to-complex delivery pipelines "as +code" via the Pipeline DSL. +footnoteref:[dsl,link:https://en.wikipedia.org/wiki/Domain-specific_language[Domain-Specific Language]] + +This section will introduce some of the key concepts to Jenkins Pipeline and +help introduce the basics of defining and working with Pipelines inside of a +running Jenkins instance. + +== Prerequisites + +To use Jenkins Pipeline, you will need: + +* Jenkins 2.x or later (older versions back to 1.642.3 may work but are not + recommended) +* Pipeline plugin +footnoteref:[pipeline, link:https://plugins.jenkins.io/workflow-aggregator[Pipeline plugin]] + +To learn how to install and manage plugins, consult <<../managing/plugins#, Managing Plugins>>. + +== Defining a Pipeline + +Pipeline Script is written in +link:http://groovy-lang.org/[Groovy]. +The relevant bits of +link:http://groovy-lang.org/semantics.html[Groovy syntax] +will be introduced as necessary in this document, so while an understanding of +Groovy is helpful, it is not required to use Pipeline Script. + +A basic Pipeline can be created in either of the following ways: + +* By entering a script directly in the Jenkins web UI. +* By creating a `Jenkinsfile` which can be checked into a project's source + control repository. + +The syntax for defining a Pipeline with either approach is the same, but while +Jenkins supports entering Pipeline Script directly into the web UI, it's +generally considered best practice to define the Pipeline in a `Jenkinsfile` +which Jenkins will then load directly from source control. +footnoteref:[scm, https://en.wikipedia.org/wiki/Source_control_management] + + +=== Defining a Pipeline in the Web UI + +To create a basic Pipeline in the Jenkins web UI, follow these steps: + +* Click *New Item* on Jenkins home page. + +image::new-item-selection.png["Click *New Item* on the Jenkins home page", role=center] + +* Enter a name for your Pipeline, select *Pipeline* and click *OK*. + +[CAUTION] +==== +Jenkins uses the name of the Pipeline to create directories on disk. Pipeline +names which include spaces may uncover bugs in scripts which do not expect +paths to contain spaces. +==== + +image::new-item-creation.png["Enter a name, select *Pipeline*, and click *OK*", role=center] + + + +* In the *Script* text-area, enter a Pipeline script and click *Save*. + +image::hello-world-script.png["In the *Script* text-area, enter a Pipeline script and click Save", role=center] + +* Click *Build Now* to run the Pipeline. + +image::build-now.png["Click *Build Now* to run the Pipeline", role=center] + + +* Click *#1* under "Build History" and then click *Console Output* to see the + full output from the Pipeline. + +image::hello-world-console-output.png["Click *Console Output* for the Pipeline", role=center] + +The example above shows a successful run of a basic Pipeline created in the Jenkins +web UI, using two valuable steps. + +[pipeline] +---- +// Script // +node { // <1> + echo 'Hello World' // <2> +} +// Declarative not yet implemented // +---- +<1> `node` allocates an executor and workspace in the Jenkins environment. +<2> `echo` writes simple string in the Console Output. + + +// Despite :sectanchors:, explicitly defining an anchor because it will be +// referenced from other documents +[[defining-a-pipeline-in-scm]] +=== Defining a Pipeline in SCM + +Complex pipelines would be cumbersome to write and maintain if you could only do +that in the text area provided by the Jenkins job configuration page. + +Accordingly, you also have the option of writing Pipeline scripts +(Jenkinsfiles) with a text editor and then loading those scripts into Jenkins +using the *Pipeline Script from SCM* option. + +Loading pipeline scripts using the `checkout scm` step leverages the +idea of "Pipeline as code" and allows easy maintenance of Pipelines with source +control and text-editors. + +To do this, select *Pipeline script from SCM* when defining the pipeline. + +With the *Pipeline script from SCM* option selected, you do not enter any Groovy +code in the Jenkins UI; you just indicate by specifying a path where in source +code you want to retrieve the pipeline from. When you update the designated +repository, a new build is triggered, as long as your job is configured with an +SCM polling trigger. + +[TIP] +==== +The first line of a `Jenkinsfile` should be `#!groovy` +footnoteref:[shebang, https://en.wikipedia.org/wiki/Shebang_(Unix)] +which text editors, IDEs, GitHub, etc will use to syntax highlight the +`Jenkinsfile` properly as Groovy code. +==== + + +== Built-in Documentation + +Pipeline ships with built-in documentation features to make it +easier to create Pipelines of varying complexities. This built-in documentation +is automatically generated and updated based on the plugins installed in the +Jenkins instance. + +The built-in documentation can be found globally at: +link:http://localhost:8080[localhost:8080/pipeline-syntax/], +assuming you have a Jenkins instance running on localhost port 8080. The same +documentation is also linked as *Pipeline Syntax* in the side-bar for any +configured Pipeline project. + +image::pipeline-syntax-sidebar.png[Pipeline Syntax in the side-bar, role=center] + +[[snippet-generator]] +=== Snippet Generator + +The built-in "Snippet Generator" utility is helpful for creating bits of +code for individual steps, discovering new steps provided by plugins, or +experimenting with different parameters for a particular step. + +The Snippet Generator is dynamically populated with a list of the steps +available to the Jenkins instance. The number of steps available is dependent +on the plugins installed which explicitly expose steps for use in Pipeline. + +To generate a step snippet with the Snippet Generator: + +. Navigate to the Snippet Generator +. Select the desired step in the *Sample Step* dropdown menu +. Use the dynamically populated area below the *Sample Step* dropdown to configure the selected step. +. Click *Generate Pipeline Script* to create a snippet of Pipeline which can be +copied and pasted into a Pipeline. + +image::snippet-generator.png[Snippet Generator, role=center] + +To access additional information and/or documentation about the step selected, +click on the help icon (blue question mark). + +=== Global Variable Reference + +In addition to the Snippet Generator, which only surfaces steps, Pipeline also +provides a built-in "*Global Variable Reference*." Like the Snippet Generator, +it is also dynamically populated by plugins. Unlike the Snippet Generator +however, the Global Variable Reference only contains documentation for +*variables*, provided by Pipeline or plugins, which are available for +Pipelines. + +The variables provided by default in Pipeline are: + +env:: + +Environment variables accessible from Pipeline Script, for example: +`env.PATH` or `env.BUILD_ID`. Consult the built-in +link:http://localhost:8080/pipeline-syntax/globals#env[Global Variable Reference] +for a complete, and up to date, list of environment variables +available in Pipeline. + +params:: + +Exposes all parameters defined for the Pipeline as a read-only +link:http://groovy-lang.org/syntax.html#_maps[Map], +for example: `params.MY_PARAM_NAME`. + +currentBuild:: + +May be used to discover information about the currently executing Pipeline, +with properties such as `currentBuild.result`, `currentBuild.displayName`, +etc. Consult the built-in +link:http://localhost:8080/pipeline-syntax/globals#currentBuild[Global Variable Reference] +for a complete, and up to date, list of properties available on `currentBuild`. + + +== Further Reading + +This section merely scratches the surface of what can be done with Jenkins +Pipeline, but should provide enough of a foundation for you to start +experimenting with a test Jenkins instance. + +In the next section, <>, more Pipeline steps +will be discussed along with patterns for implementing successful, real-world, +Jenkins Pipelines. + + +=== Additional Resources + +* link:https://jenkins.io/doc/pipeline/steps[Pipeline Steps Reference], + encompassing all steps provided by plugins distributed in the Jenkins Update + Center. +* link:https://jenkins.io/doc/pipeline/examples[Pipeline Examples], a + community-curated collection of copyable Pipeline examples. diff --git a/content/doc/book/pipeline/index.adoc b/content/doc/book/pipeline/index.adoc index ce1c3c4b3670..fb62387a0f84 100644 --- a/content/doc/book/pipeline/index.adoc +++ b/content/doc/book/pipeline/index.adoc @@ -6,43 +6,152 @@ layout: chapter :author: :email: jenkinsci-users@googlegroups.com :sectanchors: -:toc: left +:toc: + +//// +NOTE: The sections are ordered from simpler to progressively more complex +subjects. The earlier sections are intended for those new to pipeline or +unfamiliar with its latest features. The later sections with discuss +expert-level considerations and corner-cases. + +This chapter functions as a continuation of "Getting Started with Jenkins" and +"Using Jenkins" , but the format will be slightly different - see the +description above. The first sections should lead users through the basics of +pipeline, and later sections can switch to feature reference for experienced +users. All sections should still be written and ordered to only assume +knowledge from "Getting Started", "Using Jenkins", or from previous sections in +this chapter. +//// + = Pipeline -This chapter will cover all aspects of the Jenkins Pipeline, from running pipeline jobs -to writing your own pipeline code. +This chapter will cover all aspects of Jenkins Pipeline, from running pipeline jobs +to writing your own pipeline code, and even extending Pipeline. This chapter is intended to be used by Jenkins users of all skill levels, but beginners may need to refer to some sections of "<>" to understand some topics covered in this chapter. -The sections are ordered from simpler to progressively more complex subjects. -The earlier sections are intended for those new to pipeline or unfamiliar with its -latest features. The later sections with discuss expert-level considerations -and corner-cases. - If you are not yet familiar with basic Jenkins terminology and features, start with <>. -If you are already familiar with Jenkins basics and would like to delve deeper -into generally how to use various features, see -<>. +[[overview]] +== What is Pipeline? + +Jenkins Pipeline is a suite of plugins which supports implementing and +integrating continuous delivery pipelines into Jenkins. Pipeline provides an +extensible set of tools for modeling simple-to-complex delivery pipelines "as +code" via the Pipeline DSL. +footnoteref:[dsl,link:https://en.wikipedia.org/wiki/Domain-specific_language[Domain-Specific Language]] + +Typically, this "Pipeline as Code" would be written to a `Jenkinsfile` and +checked into a project's source control repository, for example: + +[pipeline] +---- +// Script // +node { // <1> + stage('Build') { // <2> + sh 'make' // <3> + } + + stage('Test') { + sh 'make check' + junit 'reports/**/*.xml' // <4> + } + + stage('Deploy') { + sh 'make publish' + } +} + +// Declarative not yet implemented // +---- +<1> <> indicates that Jenkins should allocate an executor and workspace for +this part of the Pipeline. +<2> <> describes a stage of this Pipeline. +<3> `sh` executes the given shell command +<4> `junit` is a Pipeline <> provided by the +link:https://plugins.jenkins.io/junit[JUnit plugin] +for aggregating test reports. + +[[why]] +== Why Pipeline? + +Jenkins is, fundamentally, an automation engine which supports a number of +automation patterns. Pipeline adds a powerful set of automation tools onto +Jenkins, supporting use-cases that span from simple continuous integration to +comprehensive continuous delivery pipelines. By modeling a series of related +tasks, users can take advantage of the many features of Pipeline: + +* *Code*: Pipelines are implemented "as code," and typically checked into + source control, giving teams the abilit to edit, review, and iterate upon + their delivery pipeline. +* *Durable*: Pipelines can survive both planned, and unplanned, restarts of the + Jenkins master. +* *Pausable*: Pipelines can optionally stop and wait for human input or approval + before continuing the Pipeline run. +* *Versatile*: Pipelines support complex real-world continuous delivery + requirements, including the ability to fork/join, loop, and perform work in + parallel. +* *Extensible*: The Pipeline plugin supports custom extensions to its DSL + footnoteref:[dsl] + and multiple options for integration with other plugins. + + +While Jenkins has always allowed rudimentary forms of chaining Freestyle Jobs +together to perform sequential tasks, +footnote:[Additional plugins have been used to implement complex behaviors +utilizing Freestyle Jobs such as the Copy Artifact, Parameterized Trigger, +and Promoted Builds plugins] +Pipeline makes this concept a first-class citizen in Jenkins. + +Building on the core Jenkins value of extensibility, Pipeline is also +extensible both by users with <> +and by plugin developers. +footnoteref:[ghof,link:https://plugins.jenkins.io/github-organization-folder[GitHub +Organization Folder plugin]] + + +The flowchart below is an example of one continuous delivery scenario easily +modeled in Jenkins Pipeline: + +image::/images/pipeline/realworld-pipeline-flow.png[title="Pipeline Flow", 800] + + +[[terms]] +== Pipeline Terms -If you are a Jenkins administrator and want to know more about managing Jenkins nodes and instances, see -<>. +[[step]] +Step:: + A single task; fundamentally steps tell Jenkins _what_ to do. In + order to execute the shell command `make`, the `sh` + step would be used. For example `sh 'make'`. + When a plugin extends the Pipeline DSL, that typically means the plugin has + implemented a new _step_. -If you are a system administrator and want learn how to back-up, restore, maintain as Jenkins servers and nodes, see -<>. +[[node]] +Node:: + Generally speaking, all _work_ which is done by a Pipeline, will be done in + the context of a single, or multiple, `node` step declarations, which does two things: + . Schedules the steps contained within the block to run by adding an item + to the Jenkins queue. As soon as an executor is free on a node, the + steps will run. + . Creates a workspace, a directory specific to that particular + Pipeline, where files can be checked out from source control and work can + be done. +CAUTION: Depending on your Jenkins configuration, some workspaces may not get +automatically cleaned up after a period of inactivity. See tickets and +discussion linked from +https://issues.jenkins-ci.org/browse/JENKINS-2111[JENKINS-2111] +for more information. -[WARNING] -==== -*To Contributors*: -This chapter functions as a continuation of "<>" -and "<>", but the format will be slightly different - see the description above. -The first sections should lead users through the basics of pipeline, and later sections can switch to -feature reference for experienced users. All sections should still -be written and ordered to only assume knowledge from "Getting Started", "Using Jenkins", or -from previous sections in this chapter. -==== +[[stage]] +Stage:: + `stage` is a step for defining a conceptually distinct subset of the + entire Pipeline, for example: "Build", "Test", and "Deploy". While stages + have no bearing on the execution of the Pipeline, they are used by many + plugins to visualize or present Jenkins Pipeline status/progress. + footnoteref:[blueocean,link:/projects/blueocean[Blue Ocean], link:https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+Stage+View+Plugin[Pipeline Stage View plugin]] diff --git a/content/doc/book/pipeline/jenkinsfile.adoc b/content/doc/book/pipeline/jenkinsfile.adoc index fd29a01b012b..605c094f5162 100644 --- a/content/doc/book/pipeline/jenkinsfile.adoc +++ b/content/doc/book/pipeline/jenkinsfile.adoc @@ -4,145 +4,480 @@ layout: section :notitle: :description: :author: -:email: jenkinsci-users@googlegroups.com +:email: jenkinsci-docs@googlegroups.com :sectanchors: -:toc: left +:toc: +:hide-uri-scheme: = The Jenkinsfile -== Audience and Purpose +This section builds on the information covered in <>, +and introduces more useful steps, common patterns, and demonstrates some +non-trivial `Jenkinsfile` examples. + +Creating a `Jenkinsfile`, which is checked into source control +footnoteref:[scm, https://en.wikipedia.org/wiki/Source_control_management], +provides a number of immediate benefits: + +* Code review/iteration on the Pipeline +* Audit trail for the Pipeline +* Single source of truth + footnote:[https://en.wikipedia.org/wiki/Single_Source_of_Truth] + for the Pipeline, which can be viewed and edited by multiple members of the project. + + +While the syntax for defining a Pipeline, either in the web UI or with a +`Jenkinsfile`, is the same, it's generally considered best practice to define +the Pipeline in a `Jenkinsfile` and check that in to source control. -This document is intended for Jenkins users who want to leverage the power of -pipeline functionality. Extending the reach of what was learned from a "Hello -World" example in link:/doc/pipeline/[Getting Started with Pipeline], this -document explains how to use a `Jenkinsfile` to perform a simple checkout and -build for the contents of a repository. == Creating a Jenkinsfile -A `Jenkinsfile` is a container for your pipeline (or other) script, which details -what specific steps are needed to perform a job for which you want to use -Jenkins. You create a `Jenkinsfile` with your preferred Groovy editor, or through -the configuration page on the web interface of your Jenkins instance. +As discussed in the <> +section, a `Jenkinsfile` is a text file that contains the definition of a +Jenkins Pipeline and is checked into source control. Consider the following +Pipeline which implements a basic three-stage continuous delivery pipeline. -Using a Groovy editor to code a `Jenkinsfile` gives you more flexibility for -building complex single or multibranch pipelines, but whether you use an editor -or the Jenkins interface does not matter if what you want to do is get familiar -with basic `Jenkinsfile` content. +[pipeline] +---- +// Script // +node { // <1> + stage('Build') { // <2> + /* .. snip .. */ + } + stage('Test') { + /* .. snip .. */ + } + stage('Deploy') { + /* .. snip .. */ + } +} +// Declarative not yet implemented // +---- +<1> `node` allocates an executor and workspace in the Jenkins environment. +<2> `stage` describes distinct parts of the Pipeline for better visualization of progress/status. + +Not all Pipelines will have these same three stages, but this is a good +continuous delivery starting point to define them for most projects. The +sections below will demonstrate the creation and execution of a simple Pipeline +in a test installation of Jenkins. + +[NOTE] +==== +It is assumed that there is already a source control repository set up for +project and a Pipeline has been defined in Jenkins following +<>. +==== + +Using a text editor, ideally one which supports +link:http://groovy-lang.org[Groovy] +syntax highlighting, create a new `Jenkinsfile` in the root directory of the +project. + + +In the example above, `node` is a crucial first step as it allocates an +executor and workspace for the Pipeline. In essence, without `node`, a Pipeline +cannot do any work! From within `node`, the first order of business will be to +checkout the source code for this project. Since the `Jenkinsfile` is being +pulled directly from source control, Pipeline provides a quick and easy way to +access the right revision of the source code + +[pipeline] +---- +// Script // +node { + checkout scm // <1> + /* .. snip .. */ +} +// Declarative not yet implemented // +---- +<1> The `checkout` step will checkout code from source control; `scm` is a +special variable which instructs the `checkout` step to clone the specific +revision which triggered this Pipeline run. +=== Build -. Open your Jenkins instance or Groovy editor. -. Navigate to the directory you want (it should be the root directory for your project). -. Use standard Jenkins syntax. -. Save your file. +For many projects the beginning of "work" in the Pipeline would be the "build" +stage. Typically this stage of the Pipeline will be where source code is +assembled, compiled, or packaged. The `Jenkinsfile` is *not* a replacement for an +existing build tool such as GNU/Make, Maven, Gradle, etc, but rather can be +viewed as a glue layer to bind the multiple phases of a project's development +lifecycle (build, test, deploy, etc) together. -The following example shows a basic `Jenkinsfile` made to build and test code for -a Maven project. `node` is the step that schedules tasks in the following block -to run on the machine (usually an agent) that matches the label specified in the -step argument (in this case, a node called "linux"). Code between the braces ( -`{` and `}` ) is the body of the `node` step. The `checkout scm` command -indicates that this `Jenkinsfile` was created with an eye toward multibranch -support: +Jenkins has a number of plugins for invoking practically any build tool in +general use, but this example will simply invoke `make` from a shell step +(`sh`). The `sh` step assumes the system is Unix/Linux-based, for +Windows-based systems the `bat` could be used instead. +[pipeline] +---- +// Script // +node { + /* .. snip .. */ + stage('Build') { + sh 'make' // <1> + archiveArtifacts artifacts: '**/target/*.jar', fingerprint: true // <2> + } + /* .. snip .. */ +} +// Declarative not yet implemented // +---- +<1> The `sh` step invokes the `make` command and will only continue if a +zero exit code is returned by the command. Any non-zero exit code will fail the +Pipeline. +<2> `archiveArtifacts` captures the files built matching the include pattern +(`**/target/*.jar`) and saves them to the Jenkins master for later retrieval. + + +[CAUTION] +==== +Archiving artifacts is not a substitute for using external artifact +repositories such as Artifactory or Nexus and should be considered only for +basic reporting and file archival. +==== + +=== Test + +Running automated tests is a crucial component of any successful continuous +delivery process. As such, Jenkins has a number of test recording, reporting, +and visualization facilities provided by a +link:https://plugins.jenkins.io/?labels=report[number of plugins]. +At a fundamental level, when there are test failures, it is useful to have +Jenkins record the failures for reporting and visualization in the web UI. The +example below uses the `junit` step, provided by the +link:https://plugins.jenkins.io/junit[JUnit plugin]. + +In the example below, if tests fail, the Pipeline is marked "unstable", as +denoted by a yellow ball in the web UI. Based on the recorded test reports, +Jenkins can also provide historical trend analysis and visualization. + +[pipeline] +---- +// Script // +node { + /* .. snip .. */ + stage('Test') { + /* `make check` returns non-zero on test failures, + * using `true` to allow the Pipeline to continue nonetheless + */ + sh 'make check || true' // <1> + junit '**/target/*.xml' // <2> + } + /* .. snip .. */ +} +// Declarative not yet implemented // +---- +<1> Using an inline shell conditional (`sh 'make || true'`) ensures that the +`sh` step always sees a zero exit code, giving the `junit` step the opportunity +to capture and process the test reports. Alternative approaches to this are +covered in more detail in the <> section below. +<2> `junit` captures and associates the JUnit XML files matching the inclusion +pattern (`**/target/*.xml`). -[source,groovy] +=== Deploy + +Deployment can imply a variety of steps, depending on the project or +organization requirements, and may be anything from publishing built artifacts +to an Artifactory server, to pushing code to a production system. + +At this stage of the example Pipeline, both the "Build" and "Test" stages have +successfully executed. In essense, the "Deploy" stage will only execute +assuming previous stages completed successfully, otherwise the Pipeline would +have exited early. + +[pipeline] ---- - node ('linux'){ - stage 'Build and Test' - env.PATH = "${tool 'Maven 3'}/bin:${env.PATH}" - checkout scm - sh 'mvn clean package' - } +// Script // +node { + /* .. snip .. */ + stage('Deploy') { + if (currentBuild.result == 'SUCCESS') { // <1> + sh 'make publish' + } + } + /* .. snip .. */ +} +// Declarative not yet implemented // ---- +<1> Accessing the `currentBuild.result` variable allows the Pipeline Script to +determine if there were any test failures. In which case, the value would be +`UNSTABLE`. + +Assuming everything has executed successfully in the example Jenkins Pipeline, +each successful Pipeline run will have associated build artifacts archived, +test results reported upon and the full console output all in Jenkins. -In single-branch contexts, you could replace .checkout scm. with a source code -checkout step that calls a particular repository, such as: +A Pipeline Script can include conditional tests (shown above), loops, +try/catch/finally blocks and even functions. The next section will cover this +more advanced Pipeline Script syntax in more detail. +== Advanced Syntax for Pipeline Scripts + +Pipeline Script is a domain-specific language +footnoteref:[dsl, https://en.wikipedia.org/wiki/Domain-specific_language] +based on Groovy, most +link:http://groovy-lang.org/semantics.html[Groovy syntax] +can be in Pipeline Script without modification. + +=== String Interpolation + +Groovy's "String" interpolation support can be confusing to many newcomers to +the language. While Groovy supports declaring a string with either single quotes, or +double quotes, for example: + [source,groovy] ---- +def singlyQuoted = 'Hello' +def doublyQuoted = "World" +---- + +Only the latter string will support the dollar-sign (`$`) based string +interpolation, for example: -git url: "https://github.com/my-organization/simple-maven-project-with-tests.git" +[source,groovy] +---- +def username = 'Jenkins' +echo 'Hello Mr. ${username}' +echo "I said, Hello Mr. ${username}" ---- +Would result in: -== Making Pull Requests +[source] +---- +Hello Mr. ${username} +I said, Hello Mr. Jenkins +---- -A pull request notifies the person responsible for maintaining a Jenkins -repository that you have a change or change set that you want to see merged into -the main branch associated with that repository. Each individual change is -called a "commit." +Understanding how to use Groovy's string interpolation is vital for using some +of Pipeline Script's more advanced features. -You make pull requests from a command line, or by selecting the appropriately -labeled button (typically "Pull" or "Create Pull Request") in the interface for -your source code management system. +=== Working with the Environment -A pull request to a repository included in or monitored by an Organization -Folder can be used to automatically execute a multibranch pipeline build. +Jenkins Pipeline exposes environment variables via the global variable `env`, +which is available from anywhere within a `Jenkinsfile`. The full list of +environment variables accessible from within Jenkins Pipeline is documented at +link:http://localhost:8080/pipeline-syntax/globals#env[localhost:8080/pipeline-syntax/globals#env], +assuming a Jenkins master is running on `localhost:8080`, and includes: +BUILD_ID:: The current build ID, identical to BUILD_NUMBER for builds created in Jenkins versions 1.597+ +JOB_NAME:: Name of the project of this build, such as "foo" or "foo/bar". +JENKINS_URL:: Full URL of Jenkins, such as http://example.com:port/jenkins/ (NOTE: only available if Jenkins URL set in "System Configuration") -== Using Organization Folders -Organization folders enable Jenkins to automatically detect and include any new -repositories within them as resources. +Referencing or using these environment variables can be accomplished like +accessing any key in a Groovy +link:http://groovy-lang.org/syntax.html#_maps[Map], +for example: -When you create a new repository (as might be the case for a new project), that -repository has a `Jenkinsfile`. If you also configure one or more organization -folders, Jenkins automatically detects any repository in an organization folder, -scans the contents of that repository at either default or configurable -intervals, and creates a Multibranch Pipeline project for what it finds in the -scan. An organization folder functions as a "parent," and any item within it is -treated as a "child" of that parent. +[pipeline] +---- +// Script // +node { + echo "Running ${env.BUILD_ID} on ${env.JENKINS_URL}" +} +// Declarative not yet implemented // +---- -Organization folders alleviate the need to manually create projects for new -repositories. When you use organization folders, Jenkins views your repositories -as a hierarchy, and each repository (organization folder) may optionally have -child elements such as branches or pull requests. +==== Setting environment variables -To create Organization folders: +Setting an environment variable within a Jenkins Pipeline can be done with the +`withEnv` step, which allows overriding specified environment variables for a +given block of Pipeline Script, for example: -. Open Jenkins in your web browser. -. Go to: New Item → GitHub Organization or New Item → Bitbucket Team. -. Follow the configuration steps, making sure to specify appropriate scan - credentials and a specific owner for the GitHub Organization or Bitbucket Team - name. -. Set build triggers by selecting the checkbox associated with the trigger type - you want. Folder scans and the pipeline builds associated with those scans can - be initiated by command scripts or performed at defined intervals. They can also - triggered by project promotion or changes to the images in a monitored Docker - hub. -. Decide whether to automatically remove or retain unused items. "Orphaned Item - Strategy" fields in the configuration interface let you specify how many days to - keep old items, and how many old items to keep. If you enter no values in these - fields, unused items are removed by default. +[pipeline] +---- +// Script // +node { + /* .. snip .. */ + withEnv(["PATH+MAVEN=${tool 'M3'}/bin"]) { + sh 'mvn -B verify' + } +} +// Declarative not yet implemented // +---- -While configuring organization folders, you can set the following options: -* Repository name pattern - a regular expression to specify which repositories are included in scans -* API endpoint - an alternate API endpoint to use a self-hosted GitHub Enterprise -* Checkout credentials - alternate credentials to use when checking out (cloning) code +=== Build Parameters -Multibranch Pipeline projects and Organization Folders are examples of -"computed folder" functionality. In Multibranch Pipeline projects, computation -creates child items for eligible branches. In Organization folders, computation -populates child items as individual Multibranch Pipelines for scanned -repositories. +If you configured your pipeline to accept parameters using the *Build with +Parameters* option, those parameters are accessible as Groovy variables of the +same name. -Select the "Folder Computation" section of your Jenkins interface to see the -duration (in seconds) and result (success or failure) of computation operations, -or to access a Folder Computation Log that provides more detail about this -activity. -== Basic Checkout and Build +Assuming that a String parameter named "Greeting" has been configured for the +Pipeline project in the web UI, a `Jenkinsfile` can access that parameter via +`$Greeting`: -Checkout and build command examples are shown in the code example used by the -introduction above. Examples shown assume that Jenkins is running on Linux or -another Unix-like operating system. +[pipeline] +---- +// Script // +node { + echo "${Greeting} World!" +} +// Declarative not yet implemented // +---- -If your Jenkins server or agent is running on Windows, you are less likely to be -using the Bourne shell (`sh`) or -link:http://www.computerhope.com/unix/ubash.htm[Bourne-Again shell] (`bash`) as -a command language interpreter for starting software builds. In Windows -environments, use `bat` in place of `sh`, and backslashes (`\`) rather than -slashes as file separators in pathnames. +///// +TODO: Expand this section with more examples +///// + +=== Handling Failures + +Pipeline Script relies on Groovy's built-in `try`/`catch`/`finally` semantics +for handling failures during execution of the Pipeline. + +In the <> example above, the `sh` step was modified to never return a +non-zero exit code (`sh 'make check || true'`). This approach, while valid, +means the following stages need to check `currentBuild.result` to know if +there has been a test failure or not. + +An alternative way of handling this, which preserves the early-exit behavior of +failures in Pipeline, while still giving `junit` the chance to capture test +reports, is to use a series of `try`/`finally` blocks: + +[pipeline] +---- +// Script // +node { + /* .. snip .. */ + stage('Test') { + try { + sh 'make check' + } + finally { + junit '**/target/*.xml' + } + } + /* .. snip .. */ +} +// Declarative not yet implemented // +---- + +=== Using multiple nodes + +In all previous uses of the `node` step, it has been used without any +arguments. This means Jenkins will allocate an executor wherever one is +available. The `node` step can take an optional "label" parameter, which is +helpful for more advanced use-cases such as executing builds/tests across +multiple platforms. + +In the example below, the "Build" stage will be performed on one node and +the built results will be reused on two different nodes, labelled "linux" and +"windows" respectively, during the "Test" stage. + +[pipeline] +---- +// Script // +stage('Build') { + node { + checkout scm + sh 'make' + stash includes: '**/target/*.jar', name: 'app' // <1> + } +} + +stage('Test') { + node('linux') { // <2> + checkout scm + try { + unstash 'app' // <3> + sh 'make check' + } + finally { + junit '**/target/*.xml' + } + } + node('windows') { + checkout scm + try { + unstash 'app' + bat 'make check' // <4> + } + finally { + junit '**/target/*.xml' + } + } +} +// Declarative not yet implemented // +---- +<1> The `stash` step allows capturing files matching an inclusion pattern +(`**/target/*.jar`) for reuse within the _same_ Pipeline. Once the Pipeline has +completed its execution, stashed files are deleted from the Jenkins master. +<2> The optional parameter to `node` allows for any valid Jenkins label +expression. Consult the inline help for `node` in the <> for more details. +<3> `unstash` will retrieve the named "stash" from the Jenkins master into the +Pipeline's current workspace. +<4> The `bat` script allows for executing batch scripts on Windows-based +platforms. + +=== Executing in parallel + +The example in the <> runs tests across two +different platforms in a linear series. In practice, if the `make check` +execution takes 30 minutes to complete, the "Test" stage would now take 60 +minutes to complete! + +Fortunately, Pipeline has built-in functionality for executing portions of +Pipeline Script in parallel, implemented in the aptly named `parallel` step. + +Refactoring the example above to use the `parallel` step: + +[pipeline] +---- +// Script // +stage('Build') { + /* .. snip .. */ +} + +stage('Test') { + parallel linux: { + node('linux') { + checkout scm + try { + unstash 'app' + sh 'make check' + } + finally { + junit '**/target/*.xml' + } + } + }, + windows: { + node('windows') { + /* .. snip .. */ + } + } +} +// Declarative not yet implemented // +---- + +Instead of executing the tests on the "linux" and "windows" labelled nodes in +series, they will now execute in parallel assuming the requisite capacity +exists in the Jenkins environment. + + +=== Optional step arguments + +Groovy allows parentheses around function arguments to be omitted. + +Many Pipeline steps also use the named-parameter syntax as a shorthand for +creating a Map in Groovy, which uses the syntax `[key1: value1, key2: value2]`. +Making statements like the following functionally equivalent: + +[source, groovy] +---- +git url: 'git://example.com/amazing-project.git', branch: 'master' +git([url: 'git://example.com/amazing-project.git', branch: 'master']) +---- + +For convenience, when calling steps taking only one parameter (or only one +mandatory parameter), the parameter name may be omitted, for example: + +[source, groovy] +---- +sh 'echo hello' /* short form */ +sh([script: 'echo hello']) /* long form */ +---- diff --git a/content/doc/book/pipeline/multibranch.adoc b/content/doc/book/pipeline/multibranch.adoc new file mode 100644 index 000000000000..87094a865a39 --- /dev/null +++ b/content/doc/book/pipeline/multibranch.adoc @@ -0,0 +1,107 @@ +--- +layout: section +--- +:notitle: +:description: +:author: +:email: jenkinsci-docs@googlegroups.com +:sectanchors: +:imagesdir: /doc/book/resources/pipeline +:hide-uri-scheme: +:toc: + += Multibranch Pipelines + +In the <> a `Jenkinsfile` which could be +checked into source control was implemented. This section covers the concept of +*Multibranch* Pipelines which build on the `Jenkinsfile` foundation to provide +more dynamic and automatic functionality in Jenkins. + +== Creating a Multibranch Pipeline + +The *Multibranch Pipeline* project type enables you to implement different +Jenkinsfiles for different branches of the same project. +In a Multibranch Pipeline project, Jenkins automatically discovers, manages and +executes Pipelines for branches which contain a `Jenkinsfile` in source control + +This eliminates the need for manual Pipeline creation and management, as would +be necessary when, for example, a developer adds a new feature branch. + +To create a Multibranch Pipeline: + +* Click *New Item* on Jenkins home page. + +image::new-item-selection.png["Click *New Item* on the Jenkins home page", role=center] + +* Enter a name for your Pipeline, select *Multibranch Pipeline* and click *OK*. + +[CAUTION] +==== +Jenkins uses the name of the Pipeline to create directories on disk. Pipeline +names which include spaces may uncover bugs in scripts which do not expect +paths to contain spaces. +==== + +image::new-item-multibranch-creation.png["Enter a name, select *Multibranch Pipeline*, and click *OK*", role=center] + +* Add a *Branch Source* (for example, Git) and enter the location of the + repository. + +image::multibranch-branch-source.png["Add a Branch Source", role=center] +image::multibranch-branch-source-configuration.png["Add the URL to the project repository", role=center] + + +* *Save* the Multibranch Pipeline project. + +Upon *Save*, Jenkins automatically scans the designated repository and creates +appropriate items for each branch in the repository which contains a +`Jenkinsfile`. + +By default, Jenkins will not automatically reindex the repository for branch +additions or deletions (unless using an <>), +so it is often useful to configure a Multibranch Pipeline to periodically +reindex in the configuration: + +image::multibranch-branch-indexing.png["Setting up branch reindexing", role=center] + + +=== Additional Environment Variables + +Multibranch Pipelines expose additional information about the branch being +built through the `env` global variable, such as: + +BRANCH_NAME:: Name of the branch for which this Pipeline is executing, for +example `master`. + +CHANGE_ID:: An identifier corresponding to some kind of change request, such as a pull request number + +Additional environment variables are listed in the +<>. + + +=== Supporting Pull Requests + +With the "GitHub" or "Bitbucket" Branch Sources, Multibranch Pipelines can be +used for validating pull/change requests. This functionality is provided, +respectively, by the +link:https://plugins.jenkins.io/github-branch-source[GitHub Branch Source] +and +link:https://plugins.jenkins.io/cloudbees-bitbucket-branch-source[Bitbucket Branch Source] +plugins. Please consult their documentation for further information on how to +use those plugins. + + +[[organization-folders]] +== Using Organization Folders + +Organization Folders enable Jenkins to monitor an entire GitHub +Organization, or Bitbucket Team/Project and automatically create new +Multibranch Pipelines for repositories which contain branches and pull requests +containing a `Jenkinsfile`. + +Currently, this functionality exists only for GitHub and Bitbucket, with +functionality provided by the +link:https://plugins.jenkins.io/github-organization-folder[GitHub Organization Folder] +and +link:https://plugins.jenkins.io/cloudbees-bitbucket-branch-source[Bitbucket Branch Source] +plugins. diff --git a/content/doc/book/pipeline/overview.adoc b/content/doc/book/pipeline/overview.adoc index 7e5c7b9ba2a8..f0b73c51fbd0 100644 --- a/content/doc/book/pipeline/overview.adoc +++ b/content/doc/book/pipeline/overview.adoc @@ -1,625 +1,4 @@ --- -layout: section +layout: refresh +refresh_to_post_id: '/doc/book/pipeline/getting-started' --- -:notitle: -:description: -:author: -:email: jenkinsci-users@googlegroups.com -:sectanchors: -:toc: left - -= Overview - -== Audience and Purpose - -This document is intended for novice users of the Jenkins pipeline feature. The -document explains what a pipeline is, why that matters, and how to create the -different kinds of pipelines. - -== Why Pipeline? - -While standard Jenkins "freestyle" jobs support simple continuous integration by -allowing you to define sequential tasks in an application lifecycle, they do not -create a record of execution that persists through any planned or unplanned -restarts, enable one script to address all the steps in a complex workflow, or -confer the other advantages of pipelines. - -In contrast to freestyle jobs, pipelines enable you to define the whole -application lifecycle. Pipeline functionality helps Jenkins to support -continuous delivery (CD). The Pipeline plugin was built with requirements for a -flexible, extensible, and script-based CD workflow capability in mind. - -Accordingly, pipeline functionality is: - -* Durable: Pipelines can survive both planned and unplanned restarts of your Jenkins master. -* Pausable: Pipelines can optionally stop and wait for human input or approval before completing the jobs for which they were built. -* Versatile: Pipelines support complex real-world CD requirements, including the ability to fork or join, loop, and work in parallel with each other. -* Extensible: The Pipeline plugin supports custom extensions to its DSL (domain scripting language) and multiple options for integration with other plugins. - - -The flowchart below is an example of one continuous delivery scenario enabled by the Pipeline plugin: - -image::/images/pipeline/realworld-pipeline-flow.png[title="Pipeline Flow", 800] - -== Pipeline Defined - -Pipelines are Jenkins jobs enabled by the Pipeline (formerly called "workflow") -plugin and built with simple text scripts that use a Pipeline DSL -(domain-specific language) based on the Groovy programming language. - -Pipelines leverage the power of multiple steps to execute both simple and -complex tasks according to parameters that you establish. Once created, -pipelines can build code and orchestrate the work required to drive applications -from commit to delivery. - -== Pipeline Vocabulary - -Pipeline terms such as "step," "node," and "stage" are a subset of the vocabulary used for Jenkins in general. - -Step:: - A "step" (often called a "build step") is a single task that is part of sequence. Steps tell Jenkins what to do. - -Node:: - In pipeline coding contexts, a "node" is a step that does two things, typically by enlisting help from available executors on agents: - * Schedules the steps contained within it to run by adding them to the Jenkins build queue (so that as soon as an executor slot is free on a node, the appropriate steps run). - * Creates a workspace, meaning a file directory specific to a particular job, where resource-intensive processing can occur without negatively impacting your pipeline performance. Workspaces created by node are automatically removed after all the steps contained inside the node declaration finish executing. - It is a best practice to do all material work, such as building or running shell scripts, within nodes, because node blocks in a stage tell Jenkins that the steps within them are resource-intensive enough to be scheduled, request help from the agent pool, and lock a workspace only as long as they need it. - -In Jenkins generally, "node" also means any computer that is part of your Jenkins installation, whether that computer is used as a master or as an agent. - -Stage:: - A "stage" is a logically distinct part of the execution of any task, with parameters for locking, ordering, and labeling its part of a process relative to other parts of the same process. Pipeline syntax is often comprised of stages. Each stage step can have one or more build steps within it. - It is a best practice to work within stages because they help with organization by lending logical divisions to a pipelines, and because the - Jenkins Pipeline visualization feature displays stages as unique segments of the pipeline. - -Familiarity with Jenkins terms such as "master," "agent," and "executor" also helps with understanding how pipelines work. These terms are not specific to pipelines: - -* master - A "master" is the computer where the Jenkins server is installed and - running; it handles tasks for your build system. Pipeline scripts are parsed - on masters, where Groovy code runs and node blocks allocate executors and - workspaces for use by any nested steps (such as `sh`) that might request one or both. -* agent - An "agent" (formerly "slave") is a computer set up to offload - available projects from the master. Your configuration determines the number - and scope of operations that an agent can perform. Operations are performed by - executors. -* executor - An "executor" is a computational resource for running builds or - Pipeline steps. It can run on master or agent machines, either by itself or in - parallel with other executors. - -== Preparing Jenkins to Run Pipelines - -To run pipelines, you need to have a Jenkins instance that is set up with the -appropriate plugins. This requires: - -* Jenkins 1.642.3 or later (Jenkins 2 is recommended) -* The Pipeline plugin - -=== Installing the Pipeline Plugin - -The Pipeline plugin is installed in the same way as other Jenkins plugins. -Installing the Pipeline plugin also installs the suite of related plugins on -which it depends: - -. Open Jenkins in your web browser. -. On the Manage Jenkins page for your installation, navigate to *Manage Plugins*. -. Find https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+Plugin[Pipeline] from among the plugins listed on the Available tab (You can do this by scrolling through the plugin list or by using "Pipeline" as a term to filter results). -. Select the checkbox for Pipeline plugin. -. Select either *Install without restart* or *Download now and install after restart*. -. Restart Jenkins. - -=== Pipeline Plugin Context - -The Pipeline plugin works with a suite of related plugins that enhance the -pipeline functionality of your Jenkins setup. The related plugins typically -introduce additional pipeline syntax or visualizations. - -For example, the table below, while not comprehensive, describes a few -pipeline-related plugins in terms of their importance to pipeline functionality -(required, recommended, or optional). - -To get the basic pipeline functionality, you only need to install the main -Pipeline plugin, but recommended plugins add additional capabilities that you -will probably want. For example, it is a best practice to develop pipelines as code by storing a `Jenkinsfile` with pipeline script in your SCM, -so that you can apply the same version control and testing to pipelines as you do to your other software, and that is why the -Multibranch Pipeline plugin is recommended. - -Optional plugins are mainly useful if you are creating pipelines that are -related to the technologies that they support. - - -[options="header"] -|======================= -|Plugin Name |Description |Status -|Pipeline (workflow-aggregator) | Installs the core pipeline engine and its dependent plugins: -Pipeline: API, -Pipeline: Basic Steps, -Pipeline: Durable Task Step, -Pipeline: Execution Support, -Pipeline: Global Shared Library for CPS pipeline, -Pipeline: Groovy CPS Execution, -Pipeline: Job, -Pipeline: SCM Step, -Pipeline: Step API -| required - -| Pipeline: Stage View -| Provides a graphical swimlane view of pipeline stage execution, as well as a build history of the stages -| recommended - -| Multibranch Pipeline -| Adds "Multibranch Pipeline" item type which enables Jenkins to automatically -build branches that contain a `Jenkinsfile` -| recommended - -| GitHub Branch Source -| Adds GitHub Organization Folder item type and adds "GitHub" as a branch source on Multibranch pipelines -| recommended for teams hosting repositories in GitHub - -| Bitbucket Branch Source -| Adds Bitbucket Team item type and adds "Bitbucket" as a branch source on Multibranch pipelines -| recommended for teams hosting repositories in Bitbucket; best with Bitbucket Server 4.0 or later. - -| Docker Pipeline -| Enables pipeline to build and use Docker containers inside pipeline scripts. -| optional - -|======================= - - -=== More Information - -As with any Jenkins plugin, you can install the Pipeline plugin using the Plugin -Manager in a running Jenkins instance. - -To explore Pipeline without installing -Jenkins separately or accessing your production system, you can run a -link:https://github.com/jenkinsci/workflow-aggregator-plugin/blob/master/demo/README.md[Docker -demo] of Pipeline functionality. - -Pipeline-related plugins are regularly "whitelisted" as compatible with or -designed for Pipeline usage. For more information, see the -link:https://github.com/jenkinsci/pipeline-plugin/blob/master/COMPATIBILITY.md[Plugin -Compatibility With Pipeline] web page. - -When you get flows from source control through `Jenkinsfile` or a link:https://github.com/jenkinsci/workflow-cps-global-lib-plugin/blob/master/README.md[Pipeline Global Library], -you may also have to whitelist method calls in the link:https://wiki.jenkins-ci.org/display/JENKINS/Script+Security+Plugin[Script Security Plugin]. - -[NOTE] -==== -Several plugins available in the Jenkins ecosystem but not actually -related to the Pipeline feature set described in this guide also use the terms -"pipeline," "DSL," and "Job DSL" in their names. For example: - -* Build Pipeline plugin - provides a way to execute Jenkins jobs sequentially -* Build Flow Plugin - introduces a job type that lets you define an orchestration process as a script. - -This guide describes the link:https://wiki.jenkins-ci.org/display/JENKINS/Pipeline+Plugin[Pipeline Plugin] that supports the current Pipeline feature set. -==== - -== Approaches to Defining Pipeline Script - -You can create pipelines in either of the following ways: - -* Through script entered in the configuration page of the web interface for your Jenkins instance. -* Through a `Jenkinsfile` that you create with a text editor and then check into your project's source control repository, where it can be accessed when you select the *Pipeline Script from SCM* option while configuring the Pipeline in Jenkins. - -[NOTE] -==== -When you use a Jenkinsfile, it is a best practice to put #!groovy at the top of the file so that IDEs and -GitHub diffs detect the Groovy language properly. - -==== - -== Creating a Simple Pipeline - -Initial pipeline usage typically involves the following tasks: - -. Downloading and installing the Pipeline plugin (Unless it is already part of your Jenkins installation) -. Creating a Pipeline of a specific type -. Configuring your Pipeline -. Controlling flow (workflow) through your Pipeline -. Scaling your Pipeline - -To create a simple pipeline from the Jenkins interface, perform the following steps: - -. Click *New Item* on your Jenkins home page, enter a name for your (pipeline) job, select *Pipeline*, and click *OK*. -. In the Script text area of the configuration screen, enter your pipeline script. If you are new to pipeline creation, you might want to start by opening Snippet Generator and selecting the "Hello Word" snippet. -. Check the Use Groovy Sandbox option below the Script text area. -. Click *Save*. -. Click *Build Now* to create the pipeline. -. Click ▾ and select *Console Output* to see the output. - - -Pipelines are written as Groovy scripts that tell Jenkins what to do when they -are run. Relevant bits of syntax are introduced as needed, so while an -understanding of Groovy is helpful, it is not required to use Pipeline. - -If you are a Jenkins administrator (in other words, authorized to approve your -own scripts), sandboxing is optional but efficient, because it lets scripts run -without approval as long as they limit themselves to operations that Jenkins -considers inherently safe. - -[NOTE] -==== -To use pathnames that include spaces, bracket those pathnames between escaped double quotes using \". -The extra quotation marks ensure that any spaces in pathnames are parsed properly. - -==== - -The following example shows a successful build of a pipeline created with a -one-line script that uses the `echo` step to output the phrase, "Hello from -Pipeline": - -[source,groovy] ----- -node { - echo 'Hello from Pipeline' -} ----- - ----- -Started by user anonymous -[Pipeline] echo -Hello from Pipeline -[Pipeline] End of Pipeline -Finished: SUCCESS ----- - -[NOTE] -==== -You can also create complex and multibranch pipelines in the script entry -area of the Jenkins configuration page, but because they contain multiple stages -and the configuration page UI provides limited scripting space, pipeline -creation is more commonly done using an editor of your choice from which scripts -can be loaded into Jenkins using the *Pipeline script from SCM* option. - -==== - -It is a best practice to use parallel steps whenever you can, as long as you remember not to attempt so much parallel processing -that it swamps the number of available executors. For example, you can acquire a node within the parallel branches of your pipeline: - -[source,groovy] ----- -parallel 'integration-tests':{ - node('mvn-3.3'){} -}, 'functional-tests':{ - node('selenium'){} -} ----- - -== Creating Multibranch Pipelines - -The *Multibranch Pipeline* project type enables you to configure different jobs -for different branches of the same project. In a multibranch pipeline -configuration, Jenkins automatically discovers, manages, and executes jobs -for multiple source repositories and branches. This eliminates the need for -manual job creation and management, as would otherwise be necessary -when, for example, a developer adds a new feature to an existing -product. - -A multibranch pipeline project always includes a 'Jenkinsfile' in its -repository root. Jenkins automatically creates a sub-project for each branch -that it finds in a repository with a `Jenkinsfile`. - -Multibranch pipelines use the same version control as the rest of your software -development process. This "pipeline as code" approach has the following -advantages: - -* You can modify pipeline code without special editing permissions. -* Finding out who changed what and why no longer depends on whether developers remember to comment their code changes in configuration files. -* Version control makes the history of changes to code readily apparent. - -To create a Multibranch Pipeline: - -. Click New Item on your Jenkins home page, enter a name for your job, select Multibranch Pipeline, and click OK. -. Configure your SCM source (options include Git, GitHub, Mercurial, Subversion, and Bitbucket), supplying information about the owner, scan credentials, and repository in appropriate fields. - For example, if you select Git as the branch source, you are prompted for the usual connection information, but then rather than enter a fixed refspec (Git's name for a source/destination pair), you would enter a branch name pattern (Use default settings to look for any branch). -. Configure the other multibranch pipeline options: - * API endpoint - an alternate API endpoint to use a self-hosted GitHub Enterprise - * Checkout credentials - alternate credentials to use when checking out the code (cloning) - * Include branches - a regular expression to specify branches to include - * Exclude branches - a regular expression to specify branches to exclude; note that this will takes precedence over the contents of include expressions -. Save your configuration. - -Jenkins automatically scans the designated repository and creates appropriate branches. - -For example (again in Git), if you started with a master branch, and then wanted -to experiment with some changes, and so did `git checkout -b newfeature` and -pushed some commits, Jenkins would automatically detect the new branch in your -repository and create a new sub-project for it. That sub-project would have its -own build history unrelated to the trunk (main line). - -If you choose, you can ask for the sub-project to be automatically removed after -its branch is merged with the main line and deleted. To change your Pipeline -script—for example, to add a new Jenkins publisher step corresponding to new -reports that your `Makefile`/`pom.xml`/etc. is creating—you edit the appropriate -`Jenkinsfile`. Your Pipeline script is always synchronized with -the rest of the source code you are working on. - -*Multibranch Pipeline* projects expose the name of the branch being built with -the `BRANCH_NAME` environment variable. In multibranch pipelines, the `checkout -scm` step checks out the specific commit that the `Jenkinsfile` originated, so -as to maintain branch integrity. - -== Loading Pipeline Scripts from SCM - -Complex pipelines would be cumbersome to write and maintain if you could only do -that in the text area provided by the Jenkins job configuration page. - -Accordingly, you also have the option of writing pipeline scripts (Jenkinsfiles) -with the editor that you use in your IDE (integrated development environment) or -SCM system, and then loading those scripts into Jenkins using the *Pipeline -Script from SCM* option enabled by the workflow-scm-step plugin, which is one of -the plugins that the Pipeline plugin depends on and automatically installs. - -Loading pipeline scripts using the `checkout scm` step leverages the -idea of "pipeline as code," and lets you maintain pipelines using version -control and standalone Groovy editors. - -To do this, select *Pipeline script from SCM* when defining the pipeline. - -With the *Pipeline script from SCM* option selected, you do not enter any Groovy -code in the Jenkins UI; you just indicate by specifying a path where in source -code you want to retrieve the pipeline from. When you update the designated -repository, a new build is triggered, as long as your job is configured with an -SCM polling trigger. - -== Writing Pipeline Scripts in the Jenkins UI - -Because Pipelines are comprised of text scripts, they can be written (edited) in -the same script creation area of the Jenkins user interface where you create -them: - -image::/images/pipeline/pipeline-editor.png[title="Pipeline Editor", 800] - -NOTE: You determine which kind of pipeline you want to set up before writing it. - -=== Using Snippet Generator - -You can use the Snippet Generator tool to create syntax examples for individual -steps with which you might not be familiar, or to add relevant syntax to a step -with a long and complex configuration. - -Snippet Generator is dynamically populated with a list of the steps available -for pipeline configuration. Depending on the plugins installed to your Jenkins -environment, you may see more or fewer items in the list exposed by Snippet -Generator. - -To add one or more steps from Snippet Generator to your pipeline code: - -. Open Snippet Generator -. Scroll to the step you want -. Click that step -. Configure the selected step, if presented with configuration options -. Click *Generate Groovy* to see a Groovy snippet that runs the step as configured -. Optionally select and configure additional steps - -image::/images/pipeline/snippet-generator.png[title="Snippet Generator", 800] - -When you click *Generate Groovy* after selecting a step, you see the function -name used for that step, the names of any parameters it takes (if they are not -default parameters), and the syntax used by Snippet Generator to create that -step. - -You can copy and paste the generated code right into your Pipeline, or use it as -a starting point, perhaps deleting any optional parameters that you do not need. - -To access information about steps marked with the help icon (question mark), -click on that icon. - -== Basic Groovy Syntax for Pipeline Configuration - -You typically add functionality to a new pipeline by performing the following tasks: - -* Adding nodes -* Adding more complex logic (usually expressed as stages and steps) - -To configure a pipeline you have created through the Jenkins UI, select the -pipeline and click *Configure*. - -If you run Jenkins on Linux or another Unix-like operating system with a Git -repository that you want to test, for example, you can do that with syntax like -the following, substituting your own name for "joe-user": - - -[source, groovy] ----- - node { - git url: 'https://github.com/joe_user/simple-maven-project-with-tests.git' - def mvnHome = tool 'M3' - sh "${mvnHome}/bin/mvn -B verify" - } ----- - -In Windows environments, you would use `bat` in place of `sh` and you might -use backslashes as the file separator where needed (backslashes need to be -escaped inside strings). - -For example, rather than: - -[source, groovy] ----- -sh "${mvnHome}/bin/mvn -B verify" ----- - -you would use: - -[source, groovy] ----- -bat "${mvnHome}\\bin\\mvn -B verify" ----- - -However, it's really only DOS that requires backslashes as path separators. Windows -can work with backslashes, but it does not require them. Therefore, the same paths -using forward slashes should work fine on Windows using the `bat` function. - -Your Groovy pipeline script can include functions, conditional tests, loops, -try/catch/finally blocks, and so on. - -Sample syntax for one node in a Java environment that is using the open source -Maven build automation tool (note the definition for `mvnHome`) is shown below: - -image::/images/pipeline/pipeline-sample.png[title="Pipeline Sample", 800] - -Pipeline Sample (graphic) key: - -* `def` is a keyword to define a function (you can also give a Java type in - place of `def` to make it look more like a Java method) -* `=~` is Groovy syntax to match text against a regular expression -* [0] looks up the first match -* [1] looks up the first (…) group within that match -* `readFile` step loads a text file from the workspace and returns its content - (Note: Do not use `java.io.File` methods, these refer to files on the master - where Jenkins is running, not files in the current workspace). -* The `writeFile` step saves content to a text file in the workspace -* The `fileExists` step checks whether a file exists without loading it. - -The tool step makes sure a tool with the given name is installed on the current -node. The script needs to know where it was installed, so the tool can be run -later. For this, you need a variable. - -The `def` keyword in Groovy is the quickest way to define a new variable (with no specific type). - -In the sample syntax discussed above, a variable is defined by the following expression: - - -[source, groovy] ----- -def mvnHome = tool 'M3' ----- - -This ensures that 'M3' is installed somewhere accessible to Jenkins and assigns -the return value of the step (an installation path) to the `mvnHome` variable. - -== Advanced Groovy Syntax for Pipeline Configuration - -Groovy lets you omit parentheses around function arguments. The named-parameter -syntax is also a shorthand for creating a map, which in Groovy uses the syntax -`[key1: value1, key2: value2]`, so you could write: - - -[source, groovy] ----- -git([url: 'https://github.com/joe_user/simple-maven-project-with-tests.git', branch: 'master']) ----- - -For convenience, when calling steps taking only one parameter (or only one -mandatory parameter), you can omit the parameter name. For example: - - -[source, groovy] ----- -sh 'echo hello' ----- - -is really shorthand for: - -[source, groovy] ----- -sh([script: 'echo hello']) ----- - -=== Managing the Environment - -One way to use tools by default is to add them to your executable path using the -special variable `env` that is defined for all pipelines: - - -[source, groovy] ----- -node { - git url: 'https://github.com/joe_user/simple-maven-project-with-tests.git' - def mvnHome = tool 'M3' - env.PATH = "${mvnHome}/bin:${env.PATH}" - sh 'mvn -B verify' -} ----- - -* Properties of this variable are environment variables on the current node. -* You can override certain environment variables, and the overrides are seen by - subsequent `sh` steps (or anything else that pays attention to environment variables). -* You can run `mvn` without a fully-qualified path. - -Setting a variable such as `PATH` in this way is only safe if you are using a -single agent for this build. Alternatively, you can use the `withEnv` step to -set a variable within a scope: - - -[source, groovy] ----- -node { - git url: 'https://github.com/jglick/simple-maven-project-with-tests.git' - withEnv(["PATH+MAVEN=${tool 'M3'}/bin"]) { - sh 'mvn -B verify' - } -} ----- - -Jenkins defines some environment variables by default: - -*Example:* `env.BUILD_TAG` can be used to get a tag like 'jenkins-projname-1' from -Groovy code, or `$BUILD_TAG` can be used from a `sh` script. The Snippet Generator -help for the `withEnv` step has more detail on this topic. - -=== Build Parameters - -If you configured your pipeline to accept parameters using the *Build with -Parameters* option, those parameters are accessible as Groovy variables of the -same name. - -=== Types of Executors - -Every Pipeline build runs on a Jenkins master using a *flyweight executor*, -which is an uncounted (because it's a temporary rather than configured) slot. Flyweight executors -require very little computing power. A flyweight executor (sometimes also called -a flyweight task) represents Groovy script, which is idle as it waits for a step to complete. - -To highlight the contrast between executor types, some Jenkins documentation calls any regular executor a *heavyweight executor*. - -When you run a `node` step, an executor is allocated on a node, which is usually an agent, as soon as -an appropriate node is available. - -It is a best practice to avoid placing `input` within a node. The input element pauses pipeline execution to wait for either automatic or manual approval. -By design and by nature, approval can take some time, so placing `input` within a node wastes resources by tying up both the flyweight executor -used for input and the regular executor used by the node block, which will not be free for other tasks until input is complete. - -Although any flyweight executor running a pipeline is hidden when the pipeline script is idle (between tasks), the *Build Executor Status* widget on the Jenkins page displays status for both types of executors. If the -one available executor on an agent has been pressed into service by a pipeline build that is paused and -you start a second build of the same pipeline, both builds are shown running on the master, but the -second build displays in the Build Queue until the initial build completes and executors are free to help with further processing. - -When you use inputs, it is a best practice to wrap them in timeouts. Wrapping inputs in timeouts allows them to be cleaned up if -approvals do not occur within a given window. For example: - -[source, groovy] ----- -timeout(time:5, unit:'DAYS') { - input message:'Approve deployment?', submitter: 'it-ops' -} ----- - -=== Recording Test Results and Artifacts - -If there are any test failures in a given build, you want Jenkins to record -them and then proceed, rather than stopping. If you want it saved, you must -capture the `.jar` that you built. The following sample code for a node shows how -(As previously seen in examples from this guide, Maven is being used as -a build tool): - -[source, groovy] ----- -node { - git 'https://github.com/joe_user/simple-maven-project-with-tests.git' - def mvnHome = tool 'M3' - sh "${mvnHome}/bin/mvn -B -Dmaven.test.failure.ignore verify" - archiveArtifacts artifacts: '**/target/*.jar', fingerprint: true - junit '**/target/surefire-reports/TEST-*.xml' -} ----- - -(Older versions of Pipeline require a slightly more verbose syntax. -The “snippet generator” can be used to see the exact format.) - -* If tests fail, the Pipeline is marked unstable (as denoted by a yellow ball in - the Jenkins UI), and you can browse "Test Result Trend" to see the relevant history. -* You should see Last Successful Artifacts on the Pipeline index page. diff --git a/content/doc/book/pipeline/shared-libraries.adoc b/content/doc/book/pipeline/shared-libraries.adoc new file mode 100644 index 000000000000..c35c431b576d --- /dev/null +++ b/content/doc/book/pipeline/shared-libraries.adoc @@ -0,0 +1,462 @@ +--- +layout: section +--- +:notitle: +:description: +:author: +:email: jenkinsci-docs@googlegroups.com +:sectanchors: +:imagesdir: /doc/book/resources/pipeline +:hide-uri-scheme: +:toc: + += Shared Libraries + +As Pipeline is adopted for more and more projects in an organization, common +patterns are likely to emerge. Oftentimes it is useful to share parts of +Pipeline scripts between various projects to reduce redundancies and keep code +"DRY" +footnoteref:[dry, http://en.wikipedia.org/wiki/Don\'t_repeat_yourself]. + +Pipeline has support for creating "Shared Libraries" which can be defined in +external source control repositories and loaded into existing Pipelines. + +== Defining Shared Libraries + +An Shared Library is defined with a name, a source code retrieval method such +as by SCM, and optionally a default version. The name should be a short +identifier as it will be used in scripts. + +The version could be anything understood by that SCM; for example, branches, +tags, and commit hashes all work for Git. You may also declare whether scripts +need to explicitly request that library (detailed below), or if it is present +by default. Furthermore, if you specify a version in Jenkins configuration, +you can block scripts from selecting a _different_ version. + +The best way to specify the SCM is using an SCM plugin which has been +specifically updated to support a new API for checking out an arbitrary named +version (_Modern SCM_ option). As of this writing, the latest versions of the +Git and Subversion plugins support this mode; others should follow. + +If your SCM plugin has not been integrated, you may select _Legacy SCM_ and +pick anything offered. In this case, you need to include +`${library.yourLibName.version}` somewhere in the configuration of the SCM, so +that during checkout the plugin will expand this variable to select the desired +version. For example, for Subversion, you can set the _Repository URL_ to +`https://svnserver/project/${library.yourLibName.version}` and then use +versions such as `trunk` or `branches/dev` or `tags/1.0`. + +=== Directory structure + +The directory structure of a Shared Library repository is as follows: + +[source] +---- +(root) ++- src # Groovy source files +| +- org +| +- foo +| +- Bar.groovy # for org.foo.Bar class ++- vars +| +- foo.groovy # for global 'foo' variable/function +| +- foo.txt # help for 'foo' variable/function ++- resources # resource files (external libraries only) +| +- org +| +- foo +| +- bar.json # static helper data for org.foo.Bar +---- + +The `src` directory should look like standard Java source directory structure. +This directory is added to the classpath when executing Pipelines. + +The `vars` directory hosts scripts that define global variables accessible from +Pipeline scripts. +The basename of each `*.groovy` file should be a Groovy (~ Java) identifier, conventionally `camelCased`. +The matching `*.txt`, if present, can contain documentation, processed through the system’s configured markup formatter +(so may really be HTML, Markdown, etc., though the `txt` extension is required). + +The Groovy source files in these directories get the same “CPS transformation” as your Pipeline scripts. + +A `resources` directory allows the `libraryResource` step to be used from an external library to load associated non-Groovy files. +Currently this feature is not supported for internal libraries. + +Other directories under the root are reserved for future enhancements. + +=== Global Shared Libraries + +There are several places where Shared Libraries can be defined, depending on +the use-case. _Manage Jenkins » Configure System » Global Pipeline Libraries_ +as many libraries as necessary can be configured. + +Since these libraries will be globally usable, any Pipeline in the system can +utilize functionality implemented in these libraries. + +These libraries are considered "trusted:" they can run any methods in Java, +Groovy, Jenkins internal APIs, Jenkins plugins, or third-party libraries. This +allows you to define libraries which encapsulate individually unsafe APIs in a +higher-level wrapper safe for use from any job. Beware that **anyone able to +push commits to this SCM repository could obtain unlimited access to Jenkins**. +You need the _Overall/RunScripts_ permission to configure these libraries +(normally this will be granted to Jenkins administrators). + +=== Folder-level Shared Libraries + +Any Folder created can have Shared Libraries associated with it. This mechanism +allows scoping of specific libraries to all the Pipelines inside of the folder +or subfolder. + +Folder-based libraries are not considered "trusted:" they run in the Groovy +sandbox just like typical Pipeline scripts. + +=== Automatic Shared Libraries + +Other plugins may add ways of defining libraries on the fly. +For example, the +link:https://plugins.jenkins.io/github-organization-folder[GitHub Organization Folder] +plugin allows a scripty to use an untrusted library such as +`github.com/someorg/somerepo` without any additional configuration. In this +case, the specified GitHub repository would be loaded, from the `master` +branch, using an anonymous checkout. + +== Using libraries + +Pipeline scripts can access shared libraries marked _Load implicitly_, They may +immediately use classes or global variables defined by any such libraries +(details below). + +To access other shared libraries, a script needs to use the `@Library` +annotation, specifying the library's name: + +[source,groovy] +---- +@Library('somelib') +/* Using a version specifier, such as branch, tag, etc */ +@Library('somelib@1.0') +/* Accessing multiple libraries with one statement */ +@Library(['somelib', 'otherlib@abc1234']) +---- + +The annotation can be anywhere in the script where an annotation is permitted +by Groovy. When referring to class libraries (with `src/` directories), +conventionally the annotation goes on an `import` statement: + +[source,groovy] +---- +@Library('somelib') +import com.mycorp.pipeline.somelib.UsefulClass +---- + +[NOTE] +==== +It is legal, though unnecessary, to `import` a global variable (or function) +defined in the `vars/` directory: +==== + +Note that libraries are resolved and loaded during _compilation_ of the script, +before it starts executing. This allows the Groovy compiler to understand the +meaning of symbols used in static type checking, and permits them to be used +in type declarations in the script, for example: + +[source,groovy] +---- +@Library('somelib') +import com.mycorp.pipeline.somelib.Helper + +int useSomeLib(Helper helper) { + helper.prepare() + return helper.count() +} + +echo useSomeLib(new Helper('some text')) +---- + +Global Variables/functions however, are resolved at runtime. + +=== Overriding versions + +A `@Library` annotation may override a default version given in the library’s +definition, if the definition permits this. In particular, a shared library +marked for implicit use can still be loaded in a different version using the +annotation (unless the definition specifically forbids this). + +== Writing libraries + +At the base level, any valid +link:http://groovy-lang.org/syntax.html[Groovy code] +is okay for use. Different data structures, utility functions, etc, such as: + +[source,groovy] +---- +// src/org/foo/Point.groovy +package org.foo; + +// point in 3D space +class Point { + float x,y,z; +} +---- + +=== Accessing steps + +Library classes cannot directly call step functions such as `sh` or `git`. +They can however implement functions, outside of the scope of an enclosing +class, which in turn invoke Pipeline steps, for example: + +[source,groovy] +---- +// src/org/foo/Zot.groovy +package org.foo; + +def checkOutFrom(repo) { + git url: "git@github.com:jenkinsci/${repo}" +} +---- + +Which can then be called from a Pipeline Script: + +[source,groovy] +---- +def z = new org.foo.Zot() +z.checkOutFrom(repo) +---- + +This approach has limitations; for example, it prevents the declaration of a +superclass. + +Alternately, a set of `steps` can be passed explicitly to a library class, in a +constructor, or just one method: + +[source,groovy] +---- +package org.foo +class Utilities { + def steps + Utilities(steps) {this.steps = steps} + def mvn(args) { + steps.sh "${steps.tool 'Maven'}/bin/mvn -o ${args}" + } +} +---- + +Which would be access from Pipeline with: + +[source,groovy] +---- +@Library('utils') import org.foo.Utilities +def utils = new Utilities(steps) +node { + utils.mvn 'clean package' +} +---- + +If the library needs to access global variables, such as `env`, those should be +explicitly passed into the library classes, or functions, in a similar manner. + +Instead of passing numerous variables from the Pipeline Script into a library, + +[source,groovy] +---- +package org.foo +class Utilities { + static def mvn(script, args) { + script.sh "${script.tool 'Maven'}/bin/mvn -s ${script.env.HOME}/jenkins.xml -o ${args}" + } +} +---- + +The above example shows the script being passed in to one `static` method, +invoked from a Pipeline Script as follows: + +[source,groovy] +---- +@Library('utils') import static org.foo.Utilities.* +node { + mvn this, 'clean package' +} +---- + +=== Defining steps + + +Shared Libraries can also define functions which look and feel like built-in +steps, such as `sh` or `git`. + + +For example, to define `helloWorld` step, the file `vars/helloWorld.groovy` +should be created and have a `call` method defined: + +[source,groovy] +---- +// vars/helloWorld.groovy +def call(name) { + // you can call any valid step functions from your code, just like you can from Pipeline scripts + echo "Hello world, ${name}" +} +---- + +The Pipeline Script would then be able to call this step: + +[source,groovy] +---- +helloWorld "Joe" +---- + +If called with a block, the `call` method will receive a +link:http://groovy-lang.org/closures.html[`Closure`]. +The type should be defined explicitly to clarify the intent of the step, for +example: + +[source,groovy] +---- +// vars/windows.groovy +def call(Closure body) { + node('windows') { + body() + } +} +---- + +The Pipeline Script can then use this like any other step which accepts a +block: + +[source,groovy] +---- +windows { + bat "cmd /?" +} +---- + + +=== Defining global variables + +Internally, scripts in the `vars` directory are instantiated as a singleton +on-demand, when used first. So it is possible to define more methods, +properties on a single file that interact with each other: + +[source,groovy] +---- +// vars/acme.groovy +def setFoo(v) { + this.foo = v; +} +def getFoo() { + return this.foo; +} +def say(name) { + echo "Hello world, ${name}" +} +---- + +Then the Pipeline Script can invoke these functions, rooted off the `acme` +object: + +[source,groovy] +---- +acme.foo = "5"; +echo acme.foo; // print 5 +acme.say "Joe" // print "Hello world, Joe" +---- + +[NOTE] +==== +A variable defined in a shared library will only show up in _Global Variables +Reference_ (under _Pipeline Syntax_) after you have first run a successful +build using that library, allowing its sources to be checked out by Jenkins. +==== + + +=== Defining a more structured DSL + +If you have a lot of Pipeline jobs that are mostly similar, the global +function/variable mechanism gives you a handy tool to build a higher-level DSL +that captures the similarity. For example, all Jenkins plugins are built and +tested in the same way, so we might write a step named +`jenkinsPlugin`: + +[source,groovy] +---- +// vars/jenkinsPlugin.groovy +def call(body) { + // evaluate the body block, and collect configuration into the object + def config = [:] + body.resolveStrategy = Closure.DELEGATE_FIRST + body.delegate = config + body() + + // now build, based on the configuration provided + node { + git url: "https://github.com/jenkinsci/${config.name}-plugin.git" + sh "mvn install" + mail to: "...", subject: "${config.name} plugin build", body: "..." + } +} +---- + +Assuming the script has either been loaded as a +<> or as a +<> +the resulting `Jenkinsfile` will be dramatically simpler: + +[pipeline] +---- +// Script // +jenkinsPlugin { + name = 'git' +} +// Declarative not yet implemented // +---- + +=== Using third-party libraries + +It is possible to use third-party Java libraries, typically found in +link:http://search.maven.org/[Maven Central], +from *trusted* library code using the `@Grab` annotation. Refer to the +link:http://docs.groovy-lang.org/latest/html/documentation/grape.html#_quick_start[Grape documentation] +for details, but simply put: + +[source,groovy] +---- +@Grab('org.apache.commons:commons-math3:3.4.1') +import org.apache.commons.math3.primes.Primes +void parallelize(int count) { + if (!Primes.isPrime(count)) { + error "${count} was not prime" + } + // … +} +---- + +Third-party libraries are cached by default in `~/.groovy/grapes/` on the +Jenkins master. + +=== Loading resources + +External libraries may load adjunct files from a `resources/` directory using +the `libraryResource` step. The argument is a relative pathname, akin to Java +resource loading: + +[source,groovy] +---- +def request = libraryResource 'com/mycorp/pipeline/somelib/request.json' +---- + +The file is loaded as a string, suitable for passing to certain APIs or saving +to a workspace using `writeFile`. + +It is advisable to use an unique package structure so you do not accidentally +conflict with another library. + +=== Pretesting library changes + +If you notice a mistake in a build using an untrusted library, +simply click the _Replay_ link to try editing one or more of its source files, +and see if the resulting build behaves as expected. +Once you are satisfied with the result, follow the diff link from the build’s status page, +and apply the diff to the library repository and commit. + +(Even if the version requested for the library was a branch, rather than a fixed version like a tag, +replayed builds will use the exact same revision as the original build: +library sources will not be checked out again.) + +_Replay_ is not currently supported for trusted libraries. +Modifying resource files is also not currently supported during _Replay_. diff --git a/content/doc/book/resources/pipeline/build-now.png b/content/doc/book/resources/pipeline/build-now.png new file mode 100644 index 000000000000..c488c264d609 Binary files /dev/null and b/content/doc/book/resources/pipeline/build-now.png differ diff --git a/content/doc/book/resources/pipeline/hello-world-console-output.png b/content/doc/book/resources/pipeline/hello-world-console-output.png new file mode 100644 index 000000000000..56acb57d21bd Binary files /dev/null and b/content/doc/book/resources/pipeline/hello-world-console-output.png differ diff --git a/content/doc/book/resources/pipeline/hello-world-script.png b/content/doc/book/resources/pipeline/hello-world-script.png new file mode 100644 index 000000000000..57acd21bc1f5 Binary files /dev/null and b/content/doc/book/resources/pipeline/hello-world-script.png differ diff --git a/content/doc/book/resources/pipeline/multibranch-branch-indexing.png b/content/doc/book/resources/pipeline/multibranch-branch-indexing.png new file mode 100644 index 000000000000..bb972047892a Binary files /dev/null and b/content/doc/book/resources/pipeline/multibranch-branch-indexing.png differ diff --git a/content/doc/book/resources/pipeline/multibranch-branch-source-configuration.png b/content/doc/book/resources/pipeline/multibranch-branch-source-configuration.png new file mode 100644 index 000000000000..e277d62f71ca Binary files /dev/null and b/content/doc/book/resources/pipeline/multibranch-branch-source-configuration.png differ diff --git a/content/doc/book/resources/pipeline/multibranch-branch-source.png b/content/doc/book/resources/pipeline/multibranch-branch-source.png new file mode 100644 index 000000000000..3d91cf9f4b2b Binary files /dev/null and b/content/doc/book/resources/pipeline/multibranch-branch-source.png differ diff --git a/content/doc/book/resources/pipeline/new-item-creation.png b/content/doc/book/resources/pipeline/new-item-creation.png new file mode 100644 index 000000000000..50864b708062 Binary files /dev/null and b/content/doc/book/resources/pipeline/new-item-creation.png differ diff --git a/content/doc/book/resources/pipeline/new-item-multibranch-creation.png b/content/doc/book/resources/pipeline/new-item-multibranch-creation.png new file mode 100644 index 000000000000..9ea03f7e3555 Binary files /dev/null and b/content/doc/book/resources/pipeline/new-item-multibranch-creation.png differ diff --git a/content/doc/book/resources/pipeline/new-item-selection.png b/content/doc/book/resources/pipeline/new-item-selection.png new file mode 100644 index 000000000000..8c2947ba339b Binary files /dev/null and b/content/doc/book/resources/pipeline/new-item-selection.png differ diff --git a/content/doc/book/resources/pipeline/pipeline-syntax-sidebar.png b/content/doc/book/resources/pipeline/pipeline-syntax-sidebar.png new file mode 100644 index 000000000000..13ee894d2c3f Binary files /dev/null and b/content/doc/book/resources/pipeline/pipeline-syntax-sidebar.png differ diff --git a/content/doc/book/resources/pipeline/snippet-generator.png b/content/doc/book/resources/pipeline/snippet-generator.png new file mode 100644 index 000000000000..cbcc6a4f6066 Binary files /dev/null and b/content/doc/book/resources/pipeline/snippet-generator.png differ