diff --git a/.circleci/config.yml b/.circleci/config.yml index aeeba9765..383ab7e23 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ version: 2.1 orbs: - docker: circleci/docker@2.6.0 - ruby: circleci/ruby@2.3.1 + docker: circleci/docker@2.2.0 + ruby: circleci/ruby@2.3.0 node: circleci/node@6.3.0 browser-tools: circleci/browser-tools@1.4.8 @@ -45,6 +45,7 @@ jobs: steps: - setup_remote_docker: docker_layer_caching: false + version: default - checkout - docker/check: registry: harbor.k8s.libraries.psu.edu @@ -61,7 +62,7 @@ jobs: build: docker: - - image: cimg/ruby:3.1.6-node + - image: cimg/ruby:3.1.2-node executor: ruby/default steps: - checkout @@ -72,7 +73,7 @@ jobs: test: docker: - - image: cimg/ruby:3.1.6-browsers + - image: cimg/ruby:3.1.2-browsers - image: cimg/postgres:12.7 environment: POSTGRES_USER: psu_research_metadata_test diff --git a/.rubocop_todo.yml b/.rubocop_todo.yml index a3532d9ba..aea38be22 100644 --- a/.rubocop_todo.yml +++ b/.rubocop_todo.yml @@ -1,32 +1,13 @@ # This configuration was generated by # `rubocop --auto-gen-config` -# on 2024-12-20 15:40:23 UTC using RuboCop version 1.69.2. +# on 2022-08-24 16:57:17 UTC using RuboCop version 1.33.0. # The point is for the user to remove these configuration records # one by one as the offenses are removed from the code base. # Note that changes in the inspected code, or installation of new # versions of RuboCop, may require this file to be generated again. -# Offense count: 73 -# Configuration parameters: EnforcedStyle. -# SupportedStyles: link_or_button, strict -Capybara/ClickLinkOrButtonStyle: - Enabled: false - # Offense count: 1 -# Configuration parameters: Include, MaxAmount. -# Include: **/*_spec.rb, **/spec/**/*, **/test/**/*, **/features/support/factories/**/*.rb -FactoryBot/ExcessiveCreateList: - Exclude: - - 'spec/integration/organization_profiles/show_spec.rb' - -# Offense count: 35 -# Configuration parameters: Include. -# Include: **/*_spec.rb, **/spec/**/*, **/test/**/*, **/features/support/factories/**/*.rb -FactoryBot/FactoryAssociationWithStrategy: - Enabled: false - -# Offense count: 1 -# Configuration parameters: IgnoreLiteralBranches, IgnoreConstantBranches, IgnoreDuplicateElseBranch. +# Configuration parameters: IgnoreLiteralBranches, IgnoreConstantBranches. Lint/DuplicateBranch: Exclude: - 'app/mappers/status_mapper.rb' @@ -65,32 +46,29 @@ Lint/RescueException: # Offense count: 1 # This cop supports safe autocorrection (--autocorrect). -# Configuration parameters: AutoCorrect, AllowUnusedKeywordArguments, IgnoreEmptyMethods, IgnoreNotImplementedMethods, NotImplementedExceptions. -# NotImplementedExceptions: NotImplementedError +# Configuration parameters: AllowUnusedKeywordArguments, IgnoreEmptyMethods, IgnoreNotImplementedMethods. Lint/UnusedMethodArgument: Exclude: - 'app/importers/pure_publication_tag_importer.rb' -# Offense count: 62 -# Configuration parameters: CountComments, CountAsOne, AllowedMethods, AllowedPatterns, inherit_mode. +# Offense count: 136 +# Configuration parameters: CountComments, CountAsOne, ExcludedMethods, AllowedMethods, AllowedPatterns, IgnoredMethods, inherit_mode. # AllowedMethods: refine Metrics/BlockLength: Max: 298 -# Offense count: 15 +# Offense count: 13 # Configuration parameters: CountComments, CountAsOne. Metrics/ClassLength: - Max: 755 - Exclude: - - 'app/models/publication.rb' + Max: 1213 -# Offense count: 39 -# Configuration parameters: AllowedMethods, AllowedPatterns. +# Offense count: 32 +# Configuration parameters: AllowedMethods, AllowedPatterns, IgnoredMethods. Metrics/CyclomaticComplexity: Max: 33 -# Offense count: 34 -# Configuration parameters: AllowedMethods, AllowedPatterns. +# Offense count: 30 +# Configuration parameters: AllowedMethods, AllowedPatterns, IgnoredMethods. Metrics/PerceivedComplexity: Max: 35 @@ -100,7 +78,6 @@ Naming/AccessorMethodName: - 'lib/utilities/pure_downloader.rb' # Offense count: 2 -# This cop supports unsafe autocorrection (--autocorrect-all). # Configuration parameters: EnforcedStyleForLeadingUnderscores. # SupportedStylesForLeadingUnderscores: disallowed, required, optional Naming/MemoizedInstanceVariableName: @@ -108,7 +85,7 @@ Naming/MemoizedInstanceVariableName: - 'app/importers/activity_insight_importer.rb' - 'app/importers/psu_law_school_oai_creator.rb' -# Offense count: 6 +# Offense count: 3 # Configuration parameters: NamePrefix, ForbiddenPrefixes, AllowedMethods, MethodDefinitionMacros. # NamePrefix: is_, has_, have_ # ForbiddenPrefixes: is_, has_, have_ @@ -117,36 +94,31 @@ Naming/MemoizedInstanceVariableName: Naming/PredicateName: Exclude: - 'app/importers/activity_insight_importer.rb' - - 'app/importers/unpaywall_response.rb' - 'app/models/external_publication_waiver.rb' - 'app/models/publication.rb' + - 'app/importers/unpaywall_response.rb' -# Offense count: 373 +# Offense count: 334 # Configuration parameters: EnforcedStyle, CheckMethodNames, CheckSymbols, AllowedIdentifiers, AllowedPatterns. # SupportedStyles: snake_case, normalcase, non_integer -# AllowedIdentifiers: capture3, iso8601, rfc1123_date, rfc822, rfc2822, rfc3339, x86_64 +# AllowedIdentifiers: capture3, iso8601, rfc1123_date, rfc822, rfc2822, rfc3339 Naming/VariableNumber: Enabled: false -# Offense count: 1 -Performance/MapMethodChain: - Exclude: - - 'spec/component/models/preferred_open_access_policy_spec.rb' - -# Offense count: 8 +# Offense count: 2 # This cop supports unsafe autocorrection (--autocorrect-all). Performance/StringInclude: Exclude: - 'app/importers/web_of_science_publication.rb' - 'app/models/wos_grant.rb' -# Offense count: 217 -# Configuration parameters: Prefixes, AllowedPatterns. +# Offense count: 160 +# Configuration parameters: Prefixes. # Prefixes: when, with, without RSpec/ContextWording: Enabled: false -# Offense count: 8 +# Offense count: 9 # Configuration parameters: IgnoredMetadata. RSpec/DescribeClass: Exclude: @@ -156,66 +128,64 @@ RSpec/DescribeClass: - 'spec/integration/profiles/external_publication_waivers/create_spec.rb' - 'spec/integration/profiles/external_publication_waivers/new_spec.rb' - 'spec/integration/profiles/internal_publication_waivers/new_spec.rb' + - 'spec/integration/profiles/open_access_publications/edit_spec.rb' - 'spec/integration/user/sessions/delete_spec.rb' - 'spec/integration/user/sessions/new_spec.rb' - - 'spec/requests/api/v1/api_docs/*' + - 'spec/requests/api/v1/api_docs/organizations_spec.rb' + - 'spec/requests/api/v1/api_docs/users_spec.rb' + - 'spec/requests/api/v1/api_docs/publications_spec.rb' -# Offense count: 1634 -# Configuration parameters: Max, AllowedIdentifiers, AllowedPatterns. -RSpec/IndexedLet: - Enabled: false - -# Offense count: 518 +# Offense count: 304 RSpec/LetSetup: Enabled: false -# Offense count: 1 +# Offense count: 3 RSpec/MessageChain: Exclude: - 'spec/component/controllers/orcid_works_controller_spec.rb' + - 'spec/component/importers/scholarsphere_importer_spec.rb' + - 'spec/component/jobs/doi_verification_job_spec.rb' -# Offense count: 107 +# Offense count: 87 # Configuration parameters: EnforcedStyle. # SupportedStyles: have_received, receive RSpec/MessageSpies: Exclude: - - 'spec/component/importers/activity_insight_importer_spec.rb' - 'spec/component/importers/csv_importer_spec.rb' - - 'spec/component/jobs/ai_oa_status_export_job_spec.rb' - - 'spec/component/jobs/doi_verification_job_spec.rb' - - 'spec/component/jobs/publication_download_job_spec.rb' - 'spec/component/jobs/scholarsphere_upload_job_spec.rb' - 'spec/component/jobs/scholarsphere_version_check_job_spec.rb' - 'spec/component/models/activity_insight_publication_exporter_spec.rb' - - 'spec/component/models/duplicate_publication_group_spec.rb' - 'spec/component/models/open_access_notifier_spec.rb' - 'spec/component/models/orcid_api_client_spec.rb' - 'spec/component/models/orcid_oauth_client_spec.rb' - 'spec/component/models/orcid_resource_spec.rb' - 'spec/component/services/scholarsphere_deposit_service_spec.rb' + - 'spec/component/models/duplicate_publication_group_spec.rb' + - 'spec/component/jobs/doi_verification_job_spec.rb' + - 'spec/component/importers/activity_insight_importer_spec.rb' + - 'spec/component/jobs/publication_download_job_spec.rb' + - 'spec/component/jobs/ai_oa_status_export_job_spec.rb' # Offense count: 40 RSpec/MultipleDescribes: Enabled: false -# Offense count: 1098 +# Offense count: 923 # Configuration parameters: AllowSubject. RSpec/MultipleMemoizedHelpers: - Max: 56 + Max: 81 -# Offense count: 13 -# Configuration parameters: EnforcedStyle, IgnoreSharedExamples. -# SupportedStyles: always, named_only +# Offense count: 9 +# Configuration parameters: IgnoreSharedExamples. RSpec/NamedSubject: Exclude: - - 'spec/component/models/activity_insight_oa_file_spec.rb' - 'spec/component/models/scholarsphere_work_deposit_spec.rb' - 'spec/component/serializers/api/v1/performance_serializer_spec.rb' - 'spec/component/serializers/api/v1/presentation_serializer_spec.rb' - 'spec/component/serializers/api/v1/publication_serializer_spec.rb' + - 'spec/component/models/activity_insight_oa_file_spec.rb' -# Offense count: 2289 -# Configuration parameters: AllowedGroups. +# Offense count: 1180 RSpec/NestedGroups: Max: 8 @@ -224,11 +194,6 @@ RSpec/OverwritingSetup: Exclude: - 'spec/component/importers/activity_insight_importer_spec.rb' -# Offense count: 1 -RSpec/PendingWithoutReason: - Exclude: - - 'spec/component/lib/omniauth/strategies/azure_oauth_spec.rb' - # Offense count: 2 RSpec/RepeatedExample: Exclude: @@ -239,16 +204,15 @@ RSpec/RepeatedExampleGroupBody: Exclude: - 'spec/unit/models/activity_insight_education_history_item_spec.rb' -# Offense count: 6 +# Offense count: 4 RSpec/RepeatedExampleGroupDescription: Exclude: - - 'spec/component/importers/unpaywall_client_spec.rb' - 'spec/unit/models/activity_insight_education_history_item_spec.rb' - 'spec/unit/models/wos_grant_spec.rb' + - 'spec/component/importers/unpaywall_client_spec.rb' + - 'spec/component/importers/oab_client_spec.rb' -# Offense count: 22 -# This cop supports safe autocorrection (--autocorrect). -# Configuration parameters: AutoCorrect. +# Offense count: 24 RSpec/ScatteredSetup: Exclude: - 'spec/component/controllers/orcid_access_tokens_controller_spec.rb' @@ -256,44 +220,36 @@ RSpec/ScatteredSetup: - 'spec/unit/models/activity_insight_publication_spec.rb' - 'spec/unit/models/authorship_merge_policy_spec.rb' -# Offense count: 10 -# Configuration parameters: Include, CustomTransform, IgnoreMethods, IgnoreMetadata. -# Include: **/*_spec.rb -RSpec/SpecFilePathFormat: - Exclude: - - 'spec/component/importers/etd_csv_importer_spec.rb' - - 'spec/component/importers/psu_hr_user_importer_spec.rb' - - 'spec/component/lib/healthchecks/delayed_job_errors_spec.rb' - - 'spec/component/lib/omniauth/strategies/azure_oauth_spec.rb' - - 'spec/component/models/orcid_integration_spec.rb' - - 'spec/requests/activity_insight_oa_workflow/wrong_file_version_curation/email_author_spec.rb' - - 'spec/requests/api/v1/organizations_spec.rb' - - 'spec/requests/api/v1/publications_spec.rb' - - 'spec/requests/api/v1/users_spec.rb' - - 'spec/unit/models/null_comparable_time_spec.rb' - # Offense count: 2 RSpec/SubjectStub: Exclude: - 'spec/component/importers/csv_importer_spec.rb' -# Offense count: 706 +# Offense count: 624 # Configuration parameters: IgnoreNameless, IgnoreSymbolicNames. RSpec/VerifiedDoubles: Enabled: false -# Offense count: 1 +# Offense count: 6 # This cop supports unsafe autocorrection (--autocorrect-all). Rails/CompactBlank: Exclude: - 'app/models/publication_match_on_doi_policy.rb' + - 'app/models/publication_merge_on_doi_policy.rb' + +# Offense count: 2 +# This cop supports unsafe autocorrection (--autocorrect-all). +Rails/DeprecatedActiveModelErrorsMethods: + Exclude: + - 'app/models/contributor_name.rb' + - 'app/models/scholarsphere_work_deposit.rb' # Offense count: 21 # This cop supports unsafe autocorrection (--autocorrect-all). # Configuration parameters: Whitelist, AllowedMethods, AllowedReceivers. -# Whitelist: find_by_sql, find_by_token_for -# AllowedMethods: find_by_sql, find_by_token_for -# AllowedReceivers: Gem::Specification, page +# Whitelist: find_by_sql +# AllowedMethods: find_by_sql +# AllowedReceivers: Gem::Specification Rails/DynamicFindBy: Exclude: - 'app/importers/nsf_grant_importer.rb' @@ -301,20 +257,19 @@ Rails/DynamicFindBy: - 'spec/component/models/publication_spec.rb' - 'spec/component/models/user_spec.rb' -# Offense count: 95 -# This cop supports safe autocorrection (--autocorrect). +# Offense count: 53 # Configuration parameters: EnforcedStyle. # SupportedStyles: slashes, arguments Rails/FilePath: Enabled: false -# Offense count: 36 +# Offense count: 34 # Configuration parameters: Include. # Include: app/models/**/*.rb Rails/HasManyOrHasOneDependent: Enabled: false -# Offense count: 14 +# Offense count: 10 Rails/I18nLocaleTexts: Exclude: - 'app/mailers/admin_notifications_mailer.rb' @@ -348,6 +303,7 @@ Rails/OutputSafety: - 'app/models/open_access_location.rb' - 'app/models/organization.rb' - 'app/models/publication.rb' + - 'app/components/scholarsphere_deposit_form_component.rb' # Offense count: 1 # This cop supports safe autocorrection (--autocorrect). @@ -361,11 +317,35 @@ Rails/Present: Rails/RedundantPresenceValidationOnBelongsTo: Enabled: false -# Offense count: 33 +# Offense count: 15 # Configuration parameters: ForbiddenMethods, AllowedMethods. # ForbiddenMethods: decrement!, decrement_counter, increment!, increment_counter, insert, insert!, insert_all, insert_all!, toggle!, touch, touch_all, update_all, update_attribute, update_column, update_columns, update_counters, upsert, upsert_all Rails/SkipsModelValidations: - Enabled: false + Exclude: + - 'app/controllers/authorships_controller.rb' + - 'app/controllers/presentation_contributions_controller.rb' + - 'app/controllers/user_performances_controller.rb' + - 'app/controllers/activity_insight_oa_workflow/wrong_version_base_controller.rb' + - 'app/importers/pure_publication_importer.rb' + - 'app/models/api_token.rb' + - 'app/models/authorship.rb' + - 'app/models/deputy_assignment.rb' + - 'app/models/scholarsphere_work_deposit.rb' + - 'app/models/user.rb' + - 'app/services/oa_workflow_service.rb' + - 'spec/component/controllers/deputy_assignments_controller_spec.rb' + - 'spec/component/models/user_profile_spec.rb' + - 'app/services/oa_workflow_service.rb' + - 'app/models/activity_insight_oa_file.rb' + - 'app/controllers/activity_insight_oa_workflow/preferred_file_version_none_curation_controller.rb' + - 'spec/component/models/publication_spec.rb' + - 'app/jobs/ai_oa_wf_version_check_job.rb' + + +# Offense count: 1 +Rails/TransactionExitStatement: + Exclude: + - 'app/services/scholarsphere_location_operator.rb' # Offense count: 7 # Configuration parameters: Include. @@ -391,13 +371,21 @@ Style/IfInsideElse: Exclude: - 'app/importers/activity_insight_importer.rb' +# Offense count: 1 +# This cop supports unsafe autocorrection (--autocorrect-all). +# Configuration parameters: AllowedMethods. +# AllowedMethods: nonzero? +Style/IfWithBooleanLiteralBranches: + Exclude: + - 'app/models/publication_merge_on_doi_policy.rb' + # Offense count: 1 # This cop supports unsafe autocorrection (--autocorrect-all). Style/MapToHash: Exclude: - 'app/models/user_profile.rb' -# Offense count: 26 +# Offense count: 15 Style/OpenStructUse: Exclude: - 'app/services/scholarsphere_location_operator.rb' @@ -414,13 +402,6 @@ Style/RedundantSort: - 'app/models/authorship_merge_policy.rb' # Offense count: 2 -# Configuration parameters: Max. -Style/SafeNavigationChainLength: - Exclude: - - 'app/importers/activity_insight_importer.rb' - - 'app/importers/scholarsphere_response.rb' - -# Offense count: 3 # This cop supports safe autocorrection (--autocorrect). # Configuration parameters: AllowModifier. Style/SoleNestedConditional: @@ -428,9 +409,9 @@ Style/SoleNestedConditional: - 'app/models/publication.rb' - 'app/models/scholarsphere_work_deposit.rb' -# Offense count: 693 +# Offense count: 435 # This cop supports safe autocorrection (--autocorrect). -# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, AllowedPatterns, SplitStrings. +# Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, AllowedPatterns, IgnoredPatterns. # URISchemes: http, https Layout/LineLength: - Max: 294 + Max: 302 diff --git a/.ruby-version b/.ruby-version index 9cec7165a..ef538c281 100644 --- a/.ruby-version +++ b/.ruby-version @@ -1 +1 @@ -3.1.6 +3.1.2 diff --git a/Dockerfile b/Dockerfile index 0bdc22c25..06c7176cb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM harbor.k8s.libraries.psu.edu/library/ruby-3.1.6-node-18:20241220 as base +FROM harbor.k8s.libraries.psu.edu/library/ruby-3.1.2-node-18:20240827 as base # Isilon has issues with uid 2000 for some reason # change the app to run as 201 ARG UID=201 diff --git a/Gemfile b/Gemfile index c3ea1b8a1..c47810f67 100644 --- a/Gemfile +++ b/Gemfile @@ -48,7 +48,7 @@ gem 'pdf-reader' # Pdf reader gem 'pg', '>= 0.18', '< 2.0' # use postgresql as the database for Active Record gem 'progressbar' # the ultimate text progress bar library for Ruby gem 'psu_identity', '~> 0.2' # connect to Penn State's identity API -gem 'puma', '~> 6.5' # use Puma as the app server +gem 'puma', github: 'puma/puma', branch: 'master' # use Puma as the app server (master branch until bug fixed: https://github.com/puma/puma/issues/3531) gem 'rss' # RSS reading and writing gem 'sass-rails' # sass for stylesheets gem 'scholarsphere-client', '~> 0.3' # upload content into ScholarSphere diff --git a/Gemfile.lock b/Gemfile.lock index 8f4a6b46b..1ee0b442d 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,3 +1,11 @@ +GIT + remote: https://github.com/puma/puma.git + revision: fba741b91780224a1db1c456645335b2dd7f27dd + branch: master + specs: + puma (6.4.3) + nio4r (~> 2.0) + GEM remote: https://rubygems.org/ specs: @@ -201,7 +209,7 @@ GEM rainbow rubocop smart_properties - erubi (1.13.1) + erubi (1.13.0) execjs (2.8.1) exiftool (1.2.4) json @@ -236,7 +244,7 @@ GEM faraday-rack (1.0.0) faraday-retry (1.0.3) ffaker (2.20.0) - ffi (1.17.0) + ffi (1.16.3) fieldhand (0.12.0) ox (~> 2.5) font-awesome-rails (4.7.0.8) @@ -269,7 +277,7 @@ GEM thor (>= 0.14, < 2.0) jsbundling-rails (1.1.2) railties (>= 6.0.0) - json (2.9.1) + json (2.6.3) json-schema (5.0.1) addressable (~> 2.8) jsonapi-serializer (2.2.0) @@ -288,13 +296,12 @@ GEM activerecord kaminari-core (= 1.2.2) kaminari-core (1.2.2) - language_server-protocol (3.17.0.3) launchy (2.5.0) addressable (~> 2.7) listen (3.7.0) rb-fsevent (~> 0.10, >= 0.10.3) rb-inotify (~> 0.9, >= 0.9.10) - logger (1.6.4) + logger (1.6.2) lograge (0.12.0) actionpack (>= 4) activesupport (>= 4) @@ -313,8 +320,8 @@ GEM method_source (1.1.0) mini_magick (4.12.0) mini_mime (1.1.5) - mini_portile2 (2.8.8) - minitest (5.25.4) + mini_portile2 (2.8.7) + minitest (5.25.2) multi_json (1.15.0) multi_xml (0.7.1) bigdecimal (~> 3.1) @@ -332,7 +339,7 @@ GEM timeout net-smtp (0.5.0) net-protocol - niftany (0.11.0) + niftany (0.10.1) colorize (~> 0.8.1) erb_lint (~> 0.0.22) rubocop (~> 1.3) @@ -341,7 +348,7 @@ GEM rubocop-rspec (~> 2) scss_lint (~> 0.55) nio4r (2.7.4) - nokogiri (1.17.2) + nokogiri (1.16.7) mini_portile2 (~> 2.8.2) racc (~> 1.4) oauth2 (2.0.9) @@ -363,11 +370,10 @@ GEM actionpack (>= 4.2) omniauth (~> 2.0) orm_adapter (0.5.0) - ox (2.14.18) - parallel (1.26.3) - parser (3.3.6.0) + ox (2.14.6) + parallel (1.22.1) + parser (3.2.0.0) ast (~> 2.4.1) - racc pastel (0.8.0) tty-color (~> 0.5) pdf-reader (2.11.0) @@ -392,8 +398,6 @@ GEM psych (5.2.0) stringio public_suffix (6.0.1) - puma (6.5.0) - nio4r (~> 2.0) racc (1.8.1) rack (2.2.10) rack-protection (3.2.0) @@ -428,9 +432,9 @@ GEM activesupport (>= 5.0.0) minitest nokogiri (>= 1.6) - rails-html-sanitizer (1.6.2) + rails-html-sanitizer (1.6.0) loofah (~> 2.21) - nokogiri (>= 1.15.7, != 1.16.7, != 1.16.6, != 1.16.5, != 1.16.4, != 1.16.3, != 1.16.2, != 1.16.1, != 1.16.0.rc1, != 1.16.0) + nokogiri (~> 1.14) rails_admin (3.2.1) activemodel-serializers-xml (>= 1.0) csv @@ -449,11 +453,11 @@ GEM rainbow (3.1.1) rake (13.2.1) rb-fsevent (0.11.2) - rb-inotify (0.11.1) + rb-inotify (0.10.1) ffi (~> 1.0) rdoc (6.7.0) psych (>= 4.0.0) - regexp_parser (2.9.3) + regexp_parser (2.8.1) reline (0.5.11) io-console (~> 0.5) request_store (1.5.1) @@ -461,7 +465,8 @@ GEM responders (3.1.1) actionpack (>= 5.2) railties (>= 5.2) - rexml (3.4.0) + rexml (3.3.1) + strscan rspec-core (3.13.2) rspec-support (~> 3.13.0) rspec-expectations (3.13.3) @@ -489,38 +494,28 @@ GEM json-schema (>= 2.2, < 6.0) railties (>= 5.2, < 8.0) rspec-core (>= 2.14) - rubocop (1.69.2) + rubocop (1.42.0) json (~> 2.3) - language_server-protocol (>= 3.17.0) parallel (~> 1.10) - parser (>= 3.3.0.2) + parser (>= 3.1.2.1) rainbow (>= 2.2.2, < 4.0) - regexp_parser (>= 2.9.3, < 3.0) - rubocop-ast (>= 1.36.2, < 2.0) + regexp_parser (>= 1.8, < 3.0) + rexml (>= 3.2.5, < 4.0) + rubocop-ast (>= 1.24.1, < 2.0) ruby-progressbar (~> 1.7) - unicode-display_width (>= 2.4.0, < 4.0) - rubocop-ast (1.37.0) - parser (>= 3.3.1.0) - rubocop-capybara (2.21.0) - rubocop (~> 1.41) - rubocop-factory_bot (2.26.1) - rubocop (~> 1.61) - rubocop-performance (1.23.0) - rubocop (>= 1.48.1, < 2.0) - rubocop-ast (>= 1.31.1, < 2.0) - rubocop-rails (2.27.0) + unicode-display_width (>= 1.4.0, < 3.0) + rubocop-ast (1.24.1) + parser (>= 3.1.1.0) + rubocop-performance (1.15.2) + rubocop (>= 1.7.0, < 2.0) + rubocop-ast (>= 0.4.0) + rubocop-rails (2.17.4) activesupport (>= 4.2.0) rack (>= 1.1) - rubocop (>= 1.52.0, < 2.0) - rubocop-ast (>= 1.31.1, < 2.0) - rubocop-rspec (2.31.0) - rubocop (~> 1.40) - rubocop-capybara (~> 2.17) - rubocop-factory_bot (~> 2.22) - rubocop-rspec_rails (~> 2.28) - rubocop-rspec_rails (2.29.1) - rubocop (~> 1.61) - ruby-progressbar (1.13.0) + rubocop (>= 1.33.0, < 2.0) + rubocop-rspec (2.16.0) + rubocop (~> 1.33) + ruby-progressbar (1.11.0) ruby-rc4 (0.1.5) ruby-vips (2.2.1) ffi (~> 1.12) @@ -545,9 +540,9 @@ GEM aws-sdk-s3 (~> 1.49) faraday (> 0.12) marcel (~> 1.0) - scss_lint (0.60.0) + scss_lint (0.59.0) sass (~> 3.5, >= 3.5.5) - securerandom (0.4.1) + securerandom (0.4.0) selenium-webdriver (4.10.0) rexml (~> 3.2, >= 3.2.5) rubyzip (>= 1.2.2, < 3.0) @@ -584,7 +579,7 @@ GEM ssrf_filter (1.1.2) string-similarity (2.1.0) stringio (3.1.2) - strscan (3.0.9) + strscan (3.0.7) terser (1.1.16) execjs (>= 0.3.0, < 3) thor (1.3.2) @@ -609,9 +604,7 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - unicode-display_width (3.1.2) - unicode-emoji (~> 4.0, >= 4.0.4) - unicode-emoji (4.0.4) + unicode-display_width (2.4.2) useragent (0.16.10) vcr (6.1.0) version_gem (1.1.4) @@ -697,7 +690,7 @@ DEPENDENCIES progressbar pry-byebug psu_identity (~> 0.2) - puma (~> 6.5) + puma! rails (~> 7.2) rails-controller-testing rails_admin (~> 3.2) diff --git a/app/controllers/admin/publication_merges_controller.rb b/app/controllers/admin/publication_merges_controller.rb index 75f070b98..f52c8aa18 100644 --- a/app/controllers/admin/publication_merges_controller.rb +++ b/app/controllers/admin/publication_merges_controller.rb @@ -15,8 +15,9 @@ def create if params[:known_non_duplicate] pub_ids = [params[:selected_publication_ids], params[:merge_target_publication_id]].flatten - known_non_dup_ids = pub_ids.map do |pub_id| - Publication.find(pub_id).non_duplicate_group_ids + known_non_dup_ids = [] + pub_ids.each do |pub_id| + known_non_dup_ids << Publication.find(pub_id).non_duplicate_group_ids end hashed_kwn_non_dup = known_non_dup_ids .flatten diff --git a/app/controllers/open_access_publications_controller.rb b/app/controllers/open_access_publications_controller.rb index 0c50d920b..cf72003be 100644 --- a/app/controllers/open_access_publications_controller.rb +++ b/app/controllers/open_access_publications_controller.rb @@ -148,7 +148,7 @@ def create_scholarsphere_deposit @deposit.file_uploads = [] files = params.dig(:scholarsphere_work_deposit, :file_uploads_attributes) - files&.each_value do |file| + files&.each do |_index, file| if file.present? && file[:cache_path].present? ss_file_upload = ScholarsphereFileUpload.new ss_file_upload.file = File.new(file[:cache_path]) diff --git a/app/controllers/webhooks_controller.rb b/app/controllers/webhooks_controller.rb index 79fbc8bf6..b8cd2baea 100644 --- a/app/controllers/webhooks_controller.rb +++ b/app/controllers/webhooks_controller.rb @@ -21,6 +21,6 @@ def scholarsphere_events def authenticate_request raise 'ScholarSphere webhook secret not configured.' if Settings.scholarsphere.webhook_secret.blank? - head(:unauthorized) unless request.headers['X-API-KEY'] == Settings.scholarsphere.webhook_secret + return head(:unauthorized) unless request.headers['X-API-KEY'] == Settings.scholarsphere.webhook_secret end end diff --git a/app/importers/activity_insight_importer.rb b/app/importers/activity_insight_importer.rb index 0e682151d..f1816ba19 100644 --- a/app/importers/activity_insight_importer.rb +++ b/app/importers/activity_insight_importer.rb @@ -794,7 +794,7 @@ def status end def importable? - status == 'Published' || status == 'In Press' + (status == 'Published' || status == 'In Press') end def activity_insight_id diff --git a/app/importers/csv_importer.rb b/app/importers/csv_importer.rb index 972e69f03..24a761ca7 100644 --- a/app/importers/csv_importer.rb +++ b/app/importers/csv_importer.rb @@ -11,7 +11,7 @@ def initialize(filename:, batch_size: 500) @fatal_errors = [] @filename = filename @fatal_errors << "Cannot find file #{filename.inspect}" unless File.exists?(filename) - @fatal_errors << "File is empty #{filename.inspect}" if File.empty?(filename) + @fatal_errors << "File is empty #{filename.inspect}" if File.zero?(filename) @fatal_errors << "File has no records #{filename.inspect}" unless File.new(filename).readlines.size > 1 @batch_size = batch_size end diff --git a/app/importers/news_feed_item_importer.rb b/app/importers/news_feed_item_importer.rb index 0aed4983d..a1833863a 100644 --- a/app/importers/news_feed_item_importer.rb +++ b/app/importers/news_feed_item_importer.rb @@ -35,6 +35,7 @@ def call tag_names: binding.local_variable_get(:tag_names) }) end + rescue StandardError => e log_error(e, { feed: feed }) end diff --git a/app/importers/pure_organizations_importer.rb b/app/importers/pure_organizations_importer.rb index b48d5a758..add683c99 100644 --- a/app/importers/pure_organizations_importer.rb +++ b/app/importers/pure_organizations_importer.rb @@ -22,6 +22,7 @@ def call }) end pbar.increment + rescue StandardError => e log_error(e, { organizations: organizations @@ -50,6 +51,7 @@ def call }) end pbar.increment + rescue StandardError => e log_error(e, { organizations: organizations diff --git a/app/importers/pure_publication_importer.rb b/app/importers/pure_publication_importer.rb index e4dbbdcbc..9c4370e6c 100644 --- a/app/importers/pure_publication_importer.rb +++ b/app/importers/pure_publication_importer.rb @@ -93,6 +93,7 @@ def call end end end + rescue StandardError => e log_error(e, { publication: publication }) end diff --git a/app/importers/pure_publishers_importer.rb b/app/importers/pure_publishers_importer.rb index 83d0f655b..9fd500b25 100644 --- a/app/importers/pure_publishers_importer.rb +++ b/app/importers/pure_publishers_importer.rb @@ -20,6 +20,7 @@ def call }) end pbar.increment + rescue StandardError => e log_error(e, {}) end diff --git a/app/importers/pure_user_importer.rb b/app/importers/pure_user_importer.rb index 2455c3b92..e9e6dcbb0 100644 --- a/app/importers/pure_user_importer.rb +++ b/app/importers/pure_user_importer.rb @@ -60,6 +60,7 @@ def call }) end pbar.increment + rescue StandardError => e log_error(e, {}) end diff --git a/app/models/concerns/null_object_pattern.rb b/app/models/concerns/null_object_pattern.rb index 1d7d8d28b..16bacb03c 100644 --- a/app/models/concerns/null_object_pattern.rb +++ b/app/models/concerns/null_object_pattern.rb @@ -4,7 +4,7 @@ module NullObjectPattern extend ActiveSupport::Concern def respond_to_missing?(_name, _include_private) - false + nil end def method_missing(_name, *_args) diff --git a/app/models/null_time.rb b/app/models/null_time.rb index de05ea1d4..d62323c1f 100644 --- a/app/models/null_time.rb +++ b/app/models/null_time.rb @@ -5,7 +5,6 @@ class NullTime def <=>(other) return -1 if other.is_a? Time - - 0 if other.is_a? NullTime + return 0 if other.is_a? NullTime end end diff --git a/app/models/publication.rb b/app/models/publication.rb index 154992189..5e525961b 100644 --- a/app/models/publication.rb +++ b/app/models/publication.rb @@ -160,7 +160,7 @@ def self.preferred_version_options .distinct(:id) } - scope :subject_to_open_access_policy, -> { oa_publication.published.where(published_on: Publication::OPEN_ACCESS_POLICY_START..) } + scope :subject_to_open_access_policy, -> { oa_publication.published.where('published_on >= ?', Publication::OPEN_ACCESS_POLICY_START) } scope :claimable_by, ->(user) { oa_publication.visible.where.not(id: user.authorships.unclaimable.map(&:publication_id)) } scope :open_access, -> { distinct(:id).left_outer_joins(:open_access_locations).where.not(open_access_locations: { publication_id: nil }) } diff --git a/app/models/scholarsphere_file_handler.rb b/app/models/scholarsphere_file_handler.rb index 6f0c91007..f7d026065 100644 --- a/app/models/scholarsphere_file_handler.rb +++ b/app/models/scholarsphere_file_handler.rb @@ -23,7 +23,7 @@ def file_uploads_attributes=(attributes) @exif_file_versions ||= [] @file_uploads ||= [] - attributes.each_value do |file_upload_params| + attributes.each do |_i, file_upload_params| file = file_upload_params[:file] if file.present? exif_file_version = ExifFileVersionChecker.new(file_path: file.path, diff --git a/app/models/user.rb b/app/models/user.rb index e3d6270f6..9764a5c5e 100644 --- a/app/models/user.rb +++ b/app/models/user.rb @@ -122,10 +122,10 @@ def self.needs_open_access_notification .where("publications.publication_type IN (#{User.oa_publication_type_params})", *Publication.oa_publication_types) .where('publications.id NOT IN (SELECT publication_id from authorships WHERE authorships.id IN (SELECT authorship_id FROM internal_publication_waivers))') .where(%{publications.id NOT IN (SELECT publication_id from authorships WHERE authorships.id IN (SELECT authorship_id FROM scholarsphere_work_deposits WHERE status = 'Pending'))}) - .where.not(users: { psu_identity: nil }) + .where.not('users.psu_identity IS NULL') .where("psu_identity->'data'->>'affiliation' != '[\"MEMBER\"]'") .where('users.open_access_notification_sent_at IS NULL OR users.open_access_notification_sent_at < ?', 6.months.ago) - .where(publications: { published_on: 2.years.ago.. }) + .where('publications.published_on >= ?', 2.years.ago) .where('publications.published_on >= user_organization_memberships.started_on AND (publications.published_on <= user_organization_memberships.ended_on OR user_organization_memberships.ended_on IS NULL)') .where('authorships.confirmed IS TRUE') .where('publications.visible = true') @@ -164,7 +164,7 @@ def available_deputy?(other_user) def notifiable_potential_open_access_publications potential_open_access_publications - .where(publications: { published_on: 2.years.ago.. }) + .where('publications.published_on >= ?', 2.years.ago) .order(published_on: :desc) .select(&:no_open_access_information?) .first(6) diff --git a/app/models/wos_author_name.rb b/app/models/wos_author_name.rb index ae346b3f7..71091824e 100644 --- a/app/models/wos_author_name.rb +++ b/app/models/wos_author_name.rb @@ -38,6 +38,6 @@ def middle_name_or_initial end def first_and_middle - full_name.split(',')[1]&.strip&.split + full_name.split(',')[1]&.strip&.split(' ') end end diff --git a/app/rails_admin_actions/new.rb b/app/rails_admin_actions/new.rb index c0474374f..570549cc7 100644 --- a/app/rails_admin_actions/new.rb +++ b/app/rails_admin_actions/new.rb @@ -21,7 +21,7 @@ class New < RailsAdmin::Config::Actions::Base @object = @abstract_model.new @action = @action.with(@action.bindings.merge(object: @object)) @authorization_adapter&.attributes_for(:new, @abstract_model)&.each do |name, value| - @object.send(:"#{name}=", value) + @object.send("#{name}=", value) end object_params = params[@abstract_model.param_key] if object_params diff --git a/app/services/psu_identity_user_service.rb b/app/services/psu_identity_user_service.rb index 0b69f58ac..d2af6b430 100644 --- a/app/services/psu_identity_user_service.rb +++ b/app/services/psu_identity_user_service.rb @@ -39,9 +39,9 @@ def query_psu_identity(webaccess_id) def attrs(identity) { - first_name: identity.preferred_given_name.presence || identity.given_name, - middle_name: identity.preferred_middle_name.presence || identity.middle_name, - last_name: identity.preferred_family_name.presence || identity.family_name, + first_name: (identity.preferred_given_name.presence || identity.given_name), + middle_name: (identity.preferred_middle_name.presence || identity.middle_name), + last_name: (identity.preferred_family_name.presence || identity.family_name), psu_identity: identity, psu_identity_updated_at: Time.zone.now } diff --git a/config/initializers/application_controller_renderer.rb b/config/initializers/application_controller_renderer.rb index 6d56e4390..f4556db39 100644 --- a/config/initializers/application_controller_renderer.rb +++ b/config/initializers/application_controller_renderer.rb @@ -1,5 +1,4 @@ # frozen_string_literal: true - # Be sure to restart your server when you modify this file. # ActiveSupport::Reloader.to_prepare do diff --git a/config/initializers/content_security_policy.rb b/config/initializers/content_security_policy.rb index 9c49284a8..f3bcce546 100644 --- a/config/initializers/content_security_policy.rb +++ b/config/initializers/content_security_policy.rb @@ -1,5 +1,4 @@ # frozen_string_literal: true - # Be sure to restart your server when you modify this file. # Define an application-wide content security policy diff --git a/config/initializers/mime_types.rb b/config/initializers/mime_types.rb index be6fedc53..6e1d16f02 100644 --- a/config/initializers/mime_types.rb +++ b/config/initializers/mime_types.rb @@ -1,5 +1,4 @@ # frozen_string_literal: true - # Be sure to restart your server when you modify this file. # Add new mime types for use in respond_to blocks: diff --git a/config/initializers/new_framework_defaults_7_2.rb b/config/initializers/new_framework_defaults_7_2.rb index 0344a6d02..7fb3740c3 100644 --- a/config/initializers/new_framework_defaults_7_2.rb +++ b/config/initializers/new_framework_defaults_7_2.rb @@ -1,5 +1,4 @@ # frozen_string_literal: true - # Be sure to restart your server when you modify this file. # # This file eases your Rails 7.2 framework defaults upgrade. diff --git a/config/initializers/permissions_policy.rb b/config/initializers/permissions_policy.rb index 810aadeb9..50bcf4ead 100644 --- a/config/initializers/permissions_policy.rb +++ b/config/initializers/permissions_policy.rb @@ -1,5 +1,4 @@ # frozen_string_literal: true - # Define an application-wide HTTP permissions policy. For further # information see https://developers.google.com/web/updates/2018/06/feature-policy # diff --git a/lib/tasks/generate_works.rake b/lib/tasks/generate_works.rake index e88baa9f5..e1e9710f4 100644 --- a/lib/tasks/generate_works.rake +++ b/lib/tasks/generate_works.rake @@ -4,38 +4,38 @@ require_relative '../utilities/works_generator' namespace :generate do task :oa_publication_no_open_access_location, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).oa_publication_no_open_access_location + WorksGenerator.new(args[:webaccess_id]).oa_publication_no_open_access_location end task :oa_publication_with_open_access_location, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).oa_publication_with_open_access_location + WorksGenerator.new(args[:webaccess_id]).oa_publication_with_open_access_location end task :oa_publication_in_press, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).oa_publication_in_press + WorksGenerator.new(args[:webaccess_id]).oa_publication_in_press end task :other_work, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).other_work + WorksGenerator.new(args[:webaccess_id]).other_work end task :oa_publication_from_activity_insight, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).oa_publication_from_activity_insight + WorksGenerator.new(args[:webaccess_id]).oa_publication_from_activity_insight end task :oa_publication_duplicate_group, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).oa_publication_duplicate_group + WorksGenerator.new(args[:webaccess_id]).oa_publication_duplicate_group end task :oa_publication_non_duplicate_group, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).oa_publication_non_duplicate_group + WorksGenerator.new(args[:webaccess_id]).oa_publication_non_duplicate_group end task :presentation, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).presentation + WorksGenerator.new(args[:webaccess_id]).presentation end task :performance, [:webaccess_id] => :environment do |_task, args| - Utilities::WorksGenerator.new(args[:webaccess_id]).performance + WorksGenerator.new(args[:webaccess_id]).performance end end diff --git a/lib/utilities/download_pure_fingerprints b/lib/utilities/download_pure_fingerprints index afb24cbf9..5ff355b2b 100755 --- a/lib/utilities/download_pure_fingerprints +++ b/lib/utilities/download_pure_fingerprints @@ -1,9 +1,9 @@ #!/usr/bin/env ruby # frozen_string_literal: true -require_relative 'pure_downloader' +require_relative './pure_downloader' -downloader = Utilities::PureDownloader.new +downloader = PureDownloader.new if File.exists? downloader.fingerprint_data_file print 'Publication fingerprint data file already exists. Overwrite (Yn)? ' diff --git a/lib/utilities/download_pure_orgs b/lib/utilities/download_pure_orgs index 1f02fa1bd..ce6285324 100755 --- a/lib/utilities/download_pure_orgs +++ b/lib/utilities/download_pure_orgs @@ -1,9 +1,9 @@ #!/usr/bin/env ruby # frozen_string_literal: true -require_relative 'pure_downloader' +require_relative './pure_downloader' -downloader = Utilities::PureDownloader.new +downloader = PureDownloader.new if File.exists? downloader.org_data_file print 'Organization data file already exists. Overwrite (Yn)? ' diff --git a/lib/utilities/download_pure_pubs b/lib/utilities/download_pure_pubs index d1f0e1d2e..210e6b8cd 100755 --- a/lib/utilities/download_pure_pubs +++ b/lib/utilities/download_pure_pubs @@ -1,9 +1,9 @@ #!/usr/bin/env ruby # frozen_string_literal: true -require_relative 'pure_downloader' +require_relative './pure_downloader' -downloader = Utilities::PureDownloader.new +downloader = PureDownloader.new if Dir.exists?(downloader.pure_pub_dir) && !Dir.empty?(downloader.pure_pub_dir) print 'Pure publication data is already present. Overwrite (Yn)? ' diff --git a/lib/utilities/download_pure_users b/lib/utilities/download_pure_users index 52a20933f..85f78f6bb 100755 --- a/lib/utilities/download_pure_users +++ b/lib/utilities/download_pure_users @@ -1,9 +1,9 @@ #!/usr/bin/env ruby # frozen_string_literal: true -require_relative 'pure_downloader' +require_relative './pure_downloader' -downloader = Utilities::PureDownloader.new +downloader = PureDownloader.new if File.exists? downloader.user_data_file print 'User data file already exists. Overwrite (Yn)? ' diff --git a/lib/utilities/pure_downloader.rb b/lib/utilities/pure_downloader.rb index 5bac5b140..c1e5d03e1 100644 --- a/lib/utilities/pure_downloader.rb +++ b/lib/utilities/pure_downloader.rb @@ -4,103 +4,101 @@ require 'pathname' require 'io/console' -module Utilities - class PureDownloader - def download_pure_orgs - get_api_key +class PureDownloader + def download_pure_orgs + get_api_key - first_org_result = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/organisational-units?navigationLink=false&size=1&offset=0'` + first_org_result = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/organisational-units?navigationLink=false&size=1&offset=0'` - total_orgs = JSON.parse(first_org_result)['count'] + total_orgs = JSON.parse(first_org_result)['count'] - all_orgs_results = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/organisational-units?navigationLink=false&size=#{total_orgs}&offset=0'` + all_orgs_results = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/organisational-units?navigationLink=false&size=#{total_orgs}&offset=0'` - File.open(org_data_file, 'w') do |f| - f.puts all_orgs_results - end + File.open(org_data_file, 'w') do |f| + f.puts all_orgs_results end + end - def download_pure_users - get_api_key + def download_pure_users + get_api_key - first_person_result = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/persons?navigationLink=false&size=1&offset=0'` + first_person_result = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/persons?navigationLink=false&size=1&offset=0'` - total_persons = JSON.parse(first_person_result)['count'] + total_persons = JSON.parse(first_person_result)['count'] - all_persons_results = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/persons?navigationLink=false&size=#{total_persons}&offset=0'` + all_persons_results = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/persons?navigationLink=false&size=#{total_persons}&offset=0'` - File.open(user_data_file, 'w') do |f| - f.puts all_persons_results - end + File.open(user_data_file, 'w') do |f| + f.puts all_persons_results end + end - def download_pure_pubs - get_api_key + def download_pure_pubs + get_api_key - first_pub_result = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/research-outputs?navigationLink=false&size=1&offset=0'` + first_pub_result = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/research-outputs?navigationLink=false&size=1&offset=0'` - page_size = 1000 - total_pubs = JSON.parse(first_pub_result)['count'] + page_size = 1000 + total_pubs = JSON.parse(first_pub_result)['count'] - total_pages = (total_pubs / page_size.to_f).ceil + total_pages = (total_pubs / page_size.to_f).ceil - 1.upto(total_pages) do |i| - offset = (i - 1) * page_size - pubs = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/research-outputs?navigationLink=false&size=#{page_size}&offset=#{offset}'` - download_file = pure_pub_dir.join("pure_publications_#{i}.json") - File.open(download_file, 'w') do |f| - f.puts pubs - end + 1.upto(total_pages) do |i| + offset = (i - 1) * page_size + pubs = `curl -X GET --header 'Accept: application/json' --header 'api-key: #{api_key}' 'https://pure.psu.edu/ws/api/524/research-outputs?navigationLink=false&size=#{page_size}&offset=#{offset}'` + download_file = pure_pub_dir.join("pure_publications_#{i}.json") + File.open(download_file, 'w') do |f| + f.puts pubs end end + end - def download_pure_fingerprints - get_api_key + def download_pure_fingerprints + get_api_key - first_fingerprint_result = `curl -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'api-key: #{api_key}' -d '{"size": 1, "offset": 0, "renderings": ["fingerprint"] }' 'https://pure.psu.edu/ws/api/524/research-outputs'` + first_fingerprint_result = `curl -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'api-key: #{api_key}' -d '{"size": 1, "offset": 0, "renderings": ["fingerprint"] }' 'https://pure.psu.edu/ws/api/524/research-outputs'` - total_fingerprints = JSON.parse(first_fingerprint_result)['count'] + total_fingerprints = JSON.parse(first_fingerprint_result)['count'] - all_fingerprints_results = `curl -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'api-key: #{api_key}' -d '{"size": #{total_fingerprints}, "offset": 0, "renderings": ["fingerprint"] }' 'https://pure.psu.edu/ws/api/524/research-outputs'` + all_fingerprints_results = `curl -X POST --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'api-key: #{api_key}' -d '{"size": #{total_fingerprints}, "offset": 0, "renderings": ["fingerprint"] }' 'https://pure.psu.edu/ws/api/524/research-outputs'` - File.open(fingerprint_data_file, 'w') do |f| - f.puts all_fingerprints_results - end + File.open(fingerprint_data_file, 'w') do |f| + f.puts all_fingerprints_results end + end - def data_dir - root_dir.join('db', 'data') - end + def data_dir + root_dir.join('db', 'data') + end - def pure_pub_dir - data_dir.join('pure_publications') - end + def pure_pub_dir + data_dir.join('pure_publications') + end - def user_data_file - data_dir.join('pure_users.json') - end + def user_data_file + data_dir.join('pure_users.json') + end - def org_data_file - data_dir.join('pure_organizations.json') - end + def org_data_file + data_dir.join('pure_organizations.json') + end - def fingerprint_data_file - data_dir.join('pure_publication_fingerprints.json') - end + def fingerprint_data_file + data_dir.join('pure_publication_fingerprints.json') + end - private + private - attr_reader :api_key + attr_reader :api_key - def get_api_key - print 'Enter Pure API key: ' - @api_key = $stdin.noecho(&:gets).chomp + def get_api_key + print 'Enter Pure API key: ' + @api_key = $stdin.noecho(&:gets).chomp - puts "\n" - end + puts "\n" + end - def root_dir - Pathname.new(File.expand_path("#{File.dirname(__FILE__)}/../..")) - end - end + def root_dir + Pathname.new(File.expand_path("#{File.dirname(__FILE__)}/../..")) + end end diff --git a/lib/utilities/works_generator.rb b/lib/utilities/works_generator.rb index f091867a0..97dd88d3b 100644 --- a/lib/utilities/works_generator.rb +++ b/lib/utilities/works_generator.rb @@ -1,51 +1,49 @@ # frozen_string_literal: true -module Utilities - class WorksGenerator - def initialize(webaccess_id) - raise 'Cannot generate publications in the production environment' if Rails.env.production? +class WorksGenerator + def initialize(webaccess_id) + raise 'Cannot generate publications in the production environment' if Rails.env.production? - @user = User.find_by(webaccess_id: webaccess_id) || FactoryBot.create(:sample_user, webaccess_id: webaccess_id) - end + @user = User.find_by(webaccess_id: webaccess_id) || FactoryBot.create(:sample_user, webaccess_id: webaccess_id) + end - def oa_publication_no_open_access_location - FactoryBot.create :sample_publication, :oa_publication, :from_pure, user: user - end + def oa_publication_no_open_access_location + FactoryBot.create :sample_publication, :oa_publication, :from_pure, user: user + end - def oa_publication_with_open_access_location - FactoryBot.create :sample_publication, :oa_publication, :from_pure, :with_open_access_location, user: user - end + def oa_publication_with_open_access_location + FactoryBot.create :sample_publication, :oa_publication, :from_pure, :with_open_access_location, user: user + end - def oa_publication_in_press - FactoryBot.create :sample_publication, :oa_publication, :from_pure, :in_press, user: user - end + def oa_publication_in_press + FactoryBot.create :sample_publication, :oa_publication, :from_pure, :in_press, user: user + end - def oa_publication_from_activity_insight - FactoryBot.create :sample_publication, :oa_publication, :from_activity_insight, user: user - end + def oa_publication_from_activity_insight + FactoryBot.create :sample_publication, :oa_publication, :from_activity_insight, user: user + end - def oa_publication_duplicate_group - FactoryBot.create :sample_publication, :oa_publication, :from_pure, :with_duplicate_group, user: user - end + def oa_publication_duplicate_group + FactoryBot.create :sample_publication, :oa_publication, :from_pure, :with_duplicate_group, user: user + end - def oa_publication_non_duplicate_group - FactoryBot.create :sample_publication, :oa_publication, :from_pure, :with_non_duplicate_group, user: user - end + def oa_publication_non_duplicate_group + FactoryBot.create :sample_publication, :oa_publication, :from_pure, :with_non_duplicate_group, user: user + end - def other_work - FactoryBot.create :sample_publication, :other_work, :from_pure, user: user - end + def other_work + FactoryBot.create :sample_publication, :other_work, :from_pure, user: user + end - def presentation - FactoryBot.create :sample_presentation, user: user - end + def presentation + FactoryBot.create :sample_presentation, user: user + end - def performance - FactoryBot.create :sample_performance, user: user - end + def performance + FactoryBot.create :sample_performance, user: user + end - private + private - attr_accessor :user - end + attr_accessor :user end diff --git a/spec/component/components/activity_insight_oa_dashboard_component_spec.rb b/spec/component/components/activity_insight_oa_dashboard_component_spec.rb index 286270980..069887a1f 100644 --- a/spec/component/components/activity_insight_oa_dashboard_component_spec.rb +++ b/spec/component/components/activity_insight_oa_dashboard_component_spec.rb @@ -15,7 +15,7 @@ render_inline(described_class.new) expect(page.find_by_id('doi-verification-card').to_json).to include('text-muted') expect(page.find_by_id('doi-verification-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/doi_verification') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/doi_verification') end end @@ -40,7 +40,7 @@ render_inline(described_class.new) expect(page.find_by_id('file-version-check-card').to_json).to include('text-muted') expect(page.find_by_id('file-version-check-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/file_version_review') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/file_version_review') end end @@ -79,7 +79,7 @@ render_inline(described_class.new) expect(page.find_by_id('wrong-file-version-check-card').to_json).to include('text-muted') expect(page.find_by_id('wrong-file-version-check-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/wrong_file_version_review') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/wrong_file_version_review') end end @@ -158,7 +158,7 @@ render_inline(described_class.new) expect(page.find_by_id('wrong-version-author-notified-check-card').to_json).to include('text-muted') expect(page.find_by_id('wrong-version-author-notified-check-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/wrong_version_author_notified_review') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/wrong_version_author_notified_review') end end @@ -218,7 +218,7 @@ render_inline(described_class.new) expect(page.find_by_id('preferred-file-version-none-check-card').to_json).to include('text-muted') expect(page.find_by_id('preferred-file-version-none-check-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/preferred_file_version_none_review') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/preferred_file_version_none_review') end end @@ -250,7 +250,7 @@ render_inline(described_class.new) expect(page.find_by_id('preferred-version-check-card').to_json).to include('text-muted') expect(page.find_by_id('preferred-version-check-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/preferred_version_review') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/preferred_version_review') end end @@ -280,7 +280,7 @@ render_inline(described_class.new) expect(page.find_by_id('permissions-review-card').to_json).to include('text-muted') expect(page.find_by_id('permissions-review-card').text).to include('0') - expect(rendered_content).to have_no_link( + expect(rendered_content).not_to have_link( href: Rails.application.routes.url_helpers.activity_insight_oa_workflow_permissions_review_path ) end @@ -324,7 +324,7 @@ render_inline(described_class.new) expect(page.find_by_id('metadata-check-card').to_json).to include('text-muted') expect(page.find_by_id('metadata-check-card').text).to include('0') - expect(rendered_content).to have_no_link( + expect(rendered_content).not_to have_link( href: Rails.application.routes.url_helpers.activity_insight_oa_workflow_metadata_review_path ) end @@ -370,7 +370,7 @@ render_inline(described_class.new) expect(page.find_by_id('flagged-for-review-card').to_json).to include('text-muted') expect(page.find_by_id('flagged-for-review-card').text).to include('0') - expect(rendered_content).to have_no_link(href: '/activity_insight_oa_workflow/flagged_for_review') + expect(rendered_content).not_to have_link(href: '/activity_insight_oa_workflow/flagged_for_review') end end diff --git a/spec/component/components/deputy_assignment_component_spec.rb b/spec/component/components/deputy_assignment_component_spec.rb index cfa6ca9d4..b101847ab 100644 --- a/spec/component/components/deputy_assignment_component_spec.rb +++ b/spec/component/components/deputy_assignment_component_spec.rb @@ -35,7 +35,7 @@ let(:confirmed_at) { Time.zone.now } it 'does not show a "pending" message' do - expect(rendered_content).to have_no_text(I18n.t!('view_component.deputy_assignment_component.pending_as_primary')) + expect(rendered_content).not_to have_text(I18n.t!('view_component.deputy_assignment_component.pending_as_primary')) end it 'shows the delete button' do @@ -75,15 +75,15 @@ let(:confirmed_at) { Time.zone.now } it 'does not add an action-required class' do - expect(rendered_content).to have_no_css('.deputy-assignment--action-required') + expect(rendered_content).not_to have_css('.deputy-assignment--action-required') end it 'does not show an action-required message' do - expect(rendered_content).to have_no_text(I18n.t!('view_component.deputy_assignment_component.pending_as_deputy')) + expect(rendered_content).not_to have_text(I18n.t!('view_component.deputy_assignment_component.pending_as_deputy')) end it 'does not show a button to accept the DeputyAssignment' do - expect(rendered_content).to have_no_button(I18n.t!('view_component.deputy_assignment_component.accept')) + expect(rendered_content).not_to have_button(I18n.t!('view_component.deputy_assignment_component.accept')) end it 'shows the delete button' do diff --git a/spec/component/components/scholarsphere_deposit_form_component_spec.rb b/spec/component/components/scholarsphere_deposit_form_component_spec.rb index 202f4375f..699c77d01 100644 --- a/spec/component/components/scholarsphere_deposit_form_component_spec.rb +++ b/spec/component/components/scholarsphere_deposit_form_component_spec.rb @@ -51,7 +51,7 @@ it 'renders editable doi field' do view_render - expect(rendered_content).to have_no_field('DOI', readonly: true) + expect(rendered_content).not_to have_field('DOI', readonly: true) expect(rendered_content).to have_field('DOI') end end @@ -91,7 +91,7 @@ it 'renders no notice and defaults to "All Rights Reserved"' do view_render - expect(rendered_content).to have_no_text('We found the license for your work') + expect(rendered_content).not_to have_text('We found the license for your work') expect(rendered_content).to have_field('License', with: 'https://rightsstatements.org/page/InC/1.0/') end end @@ -111,7 +111,7 @@ it 'does not render any notice and does not prefill data' do view_render - expect(rendered_content).to have_no_text('We found the set statement for your work') + expect(rendered_content).not_to have_text('We found the set statement for your work') expect(rendered_content).to have_field('Publisher Statement', with: '') end end @@ -149,7 +149,7 @@ it 'does not render any notice and does not prefill data' do view_render - expect(rendered_content).to have_no_text('We found the embargo end date for your work') + expect(rendered_content).not_to have_text('We found the embargo end date for your work') expect(rendered_content).to have_css('#scholarsphere_work_deposit_embargoed_until_1i', text: '') expect(rendered_content).to have_css('#scholarsphere_work_deposit_embargoed_until_2i', text: '') expect(rendered_content).to have_css('#scholarsphere_work_deposit_embargoed_until_3i', text: '') diff --git a/spec/component/controllers/admin/duplicate_publication_groups_controller_spec.rb b/spec/component/controllers/admin/duplicate_publication_groups_controller_spec.rb index 7d41e5316..3ba32c74d 100644 --- a/spec/component/controllers/admin/duplicate_publication_groups_controller_spec.rb +++ b/spec/component/controllers/admin/duplicate_publication_groups_controller_spec.rb @@ -53,7 +53,7 @@ it "doesn't remove the publications from the group" do delete :delete, params: { id: group.id } - expect(group.publications).to contain_exactly(pub1, pub2) + expect(group.publications).to match_array [pub1, pub2] end it "doesn't delete the group" do diff --git a/spec/component/controllers/deputy_assignments_controller_spec.rb b/spec/component/controllers/deputy_assignments_controller_spec.rb index 040fcf68b..9bbef5fd1 100644 --- a/spec/component/controllers/deputy_assignments_controller_spec.rb +++ b/spec/component/controllers/deputy_assignments_controller_spec.rb @@ -35,7 +35,8 @@ context 'when all is well' do before do - allow(mock_form).to receive_messages(save: true, deputy_assignment: mock_assignment) + allow(mock_form).to receive(:save).and_return(true) + allow(mock_form).to receive(:deputy_assignment).and_return(mock_assignment) end it 'creates the DeputyAssignment' do @@ -168,7 +169,9 @@ allow(DeputyAssignmentDeleteService).to receive(:call) - allow(DeputyAssignmentsMailer).to receive_messages(deputy_assignment_declination: mock_mailer, deputy_status_revoked: mock_mailer, deputy_status_ended: mock_mailer) + allow(DeputyAssignmentsMailer).to receive(:deputy_assignment_declination).and_return(mock_mailer) + allow(DeputyAssignmentsMailer).to receive(:deputy_status_revoked).and_return(mock_mailer) + allow(DeputyAssignmentsMailer).to receive(:deputy_status_ended).and_return(mock_mailer) end context 'when the current user is the primary' do diff --git a/spec/component/controllers/internal_publication_waivers_controller_spec.rb b/spec/component/controllers/internal_publication_waivers_controller_spec.rb index 7de8dff52..476b48d01 100644 --- a/spec/component/controllers/internal_publication_waivers_controller_spec.rb +++ b/spec/component/controllers/internal_publication_waivers_controller_spec.rb @@ -114,7 +114,7 @@ context 'when given the ID for a publication that belongs to the user and is not open access' do it 'returns 200 OK' do get :new, params: { id: pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end end end diff --git a/spec/component/controllers/open_access_publications_controller_spec.rb b/spec/component/controllers/open_access_publications_controller_spec.rb index 127f5379e..1336b717c 100644 --- a/spec/component/controllers/open_access_publications_controller_spec.rb +++ b/spec/component/controllers/open_access_publications_controller_spec.rb @@ -82,7 +82,7 @@ context 'when given the ID for a publication that belongs to the user and has an open access URL' do it 'returns 200 OK' do get :edit, params: { id: oa_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders a readonly view of the publication' do @@ -93,7 +93,7 @@ context 'when given the ID for a publication that belongs to the user and has a user-submitted open access URL' do it 'returns 200 OK' do get :edit, params: { id: uoa_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders a readonly view of the publication' do @@ -104,7 +104,7 @@ context 'when given the ID for a publication that has already been uploaded to ScholarSphere by the user' do it 'returns 200 OK' do get :edit, params: { id: uploaded_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders a readonly view of the publication' do @@ -115,7 +115,7 @@ context 'when given the ID for a publication that has already been uploaded to ScholarSphere by another user' do it 'returns 200 OK' do get :edit, params: { id: other_uploaded_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders a readonly view of the publication' do @@ -126,7 +126,7 @@ context 'when given the ID for a publication for which the user has waived open access' do it 'returns 200 OK' do get :edit, params: { id: waived_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders a readonly view of the publication' do @@ -137,7 +137,7 @@ context 'when given the ID for a publication for which another user has waived open access' do it 'returns 200 OK' do get :edit, params: { id: other_waived_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders a readonly view of the publication' do @@ -149,7 +149,7 @@ context 'when the open access fields are nil' do it 'returns 200 OK' do get :edit, params: { id: pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders the open access form' do @@ -160,7 +160,7 @@ context 'when the open access fields are blank' do it 'returns 200 OK' do get :edit, params: { id: blank_oa_pub.id } - expect(response).to have_http_status :ok + expect(response.code).to eq '200' end it 'renders the open access form' do diff --git a/spec/component/importers/activity_insight_importer_spec.rb b/spec/component/importers/activity_insight_importer_spec.rb index 618bba099..d45550df6 100644 --- a/spec/component/importers/activity_insight_importer_spec.rb +++ b/spec/component/importers/activity_insight_importer_spec.rb @@ -869,7 +869,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -1190,7 +1190,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -1481,7 +1481,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -2530,7 +2530,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -2797,7 +2797,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -3091,7 +3091,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -4112,7 +4112,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -4374,7 +4374,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do @@ -4667,7 +4667,7 @@ group = p1.duplicate_group - expect(group.publications).to contain_exactly(p1, duplicate_pub) + expect(group.publications).to match_array [p1, duplicate_pub] end it 'hides new publications that might be duplicates' do diff --git a/spec/component/importers/psu_dickinson_publication_importer_spec.rb b/spec/component/importers/psu_dickinson_publication_importer_spec.rb index 472bbad1a..53496a49a 100644 --- a/spec/component/importers/psu_dickinson_publication_importer_spec.rb +++ b/spec/component/importers/psu_dickinson_publication_importer_spec.rb @@ -153,7 +153,7 @@ group = pub.duplicate_group - expect(group.publications).to contain_exactly(pub, duplicate_pub) + expect(group.publications).to match_array [pub, duplicate_pub] end it 'hides new publications that might be duplicates' do diff --git a/spec/component/importers/psu_law_school_publication_importer_spec.rb b/spec/component/importers/psu_law_school_publication_importer_spec.rb index 26b3bb7d9..0a9578562 100644 --- a/spec/component/importers/psu_law_school_publication_importer_spec.rb +++ b/spec/component/importers/psu_law_school_publication_importer_spec.rb @@ -153,7 +153,7 @@ group = pub.duplicate_group - expect(group.publications).to contain_exactly(pub, duplicate_pub) + expect(group.publications).to match_array [pub, duplicate_pub] end it 'hides new publications that might be duplicates' do diff --git a/spec/component/importers/pure_publication_importer_spec.rb b/spec/component/importers/pure_publication_importer_spec.rb index 42e77ecb2..2503a4127 100644 --- a/spec/component/importers/pure_publication_importer_spec.rb +++ b/spec/component/importers/pure_publication_importer_spec.rb @@ -230,7 +230,7 @@ p2 = found_pub2.publication group = p2.duplicate_group - expect(group.publications).to contain_exactly(p2, duplicate_pub1, duplicate_pub2) + expect(group.publications).to match_array [p2, duplicate_pub1, duplicate_pub2] end it 'hides existing publications that might be duplicates' do @@ -447,7 +447,7 @@ p2 = found_pub2.publication group = p2.duplicate_group - expect(group.publications).to contain_exactly(p2, duplicate_pub1, duplicate_pub2) + expect(group.publications).to match_array [p2, duplicate_pub1, duplicate_pub2] end it 'hides existing publications that might be duplicates' do @@ -618,7 +618,7 @@ p2 = found_pub2.publication group = p2.duplicate_group - expect(group.publications).to contain_exactly(p2, duplicate_pub1, duplicate_pub2) + expect(group.publications).to match_array [p2, duplicate_pub1, duplicate_pub2] end it 'hides existing publications that might be duplicates' do diff --git a/spec/component/importers/pure_publication_tag_importer_spec.rb b/spec/component/importers/pure_publication_tag_importer_spec.rb index bad39ff2c..67be69239 100644 --- a/spec/component/importers/pure_publication_tag_importer_spec.rb +++ b/spec/component/importers/pure_publication_tag_importer_spec.rb @@ -86,7 +86,7 @@ expect { importer.call }.to change(PublicationTagging, :count).by 3 expect(pub1.tags).to eq [found_tag1] - expect(pub2.tags).to contain_exactly(found_tag2, found_tag3) + expect(pub2.tags).to match_array [found_tag2, found_tag3] end it 'saves the correct ranks on the taggings' do @@ -121,7 +121,7 @@ expect { importer.call }.to change(PublicationTagging, :count).by 3 expect(pub1.tags).to eq [found_tag1] - expect(pub2.tags).to contain_exactly(found_tag2, found_tag3) + expect(pub2.tags).to match_array [found_tag2, found_tag3] end it 'saves the correct ranks on the taggings' do diff --git a/spec/component/jobs/publication_download_job_spec.rb b/spec/component/jobs/publication_download_job_spec.rb index 178584c86..7e46948d7 100644 --- a/spec/component/jobs/publication_download_job_spec.rb +++ b/spec/component/jobs/publication_download_job_spec.rb @@ -21,7 +21,7 @@ # # Also, the API key needed for the Activity Insight S3 Authorizer is not stored in settings/test.yml. # So, you'll need to override this in your test.local.yml to live test this. - describe '#perform_now', :no_ci do + describe '#perform_now', no_ci: true do let!(:publication) { create(:publication) } let!(:ai_oa_file) { create(:activity_insight_oa_file, publication: publication, version: 'acceptedVersion', location: 'nmg110/intellcont/test_file-1.pdf') } let(:file_path) { Rails.root.join("tmp/uploads/activity_insight_file_uploads/#{ai_oa_file.id}/file/test_file-1.pdf") } diff --git a/spec/component/lib/omniauth/strategies/azure_oauth_spec.rb b/spec/component/lib/omniauth/strategies/azure_oauth_spec.rb index 07d11947b..2f26899e9 100644 --- a/spec/component/lib/omniauth/strategies/azure_oauth_spec.rb +++ b/spec/component/lib/omniauth/strategies/azure_oauth_spec.rb @@ -24,9 +24,9 @@ end end - # describe '#callback_url' do - # xit - # end + describe '#callback_url' do + xit + end describe '#uid' do it "returns user's WebAccess ID that's parsed out of the access token" do diff --git a/spec/component/lib/utilities/works_generator_spec.rb b/spec/component/lib/utilities/works_generator_spec.rb index a4594d422..66fc6012f 100644 --- a/spec/component/lib/utilities/works_generator_spec.rb +++ b/spec/component/lib/utilities/works_generator_spec.rb @@ -3,7 +3,7 @@ require 'component/component_spec_helper' require_relative '../../../../lib/utilities/works_generator' -describe Utilities::WorksGenerator do +describe WorksGenerator do let!(:user) { create(:user, webaccess_id: 'abc123') } let(:generator) { described_class.new(user.webaccess_id) } diff --git a/spec/component/models/activity_insight_oa_file_spec.rb b/spec/component/models/activity_insight_oa_file_spec.rb index 8a22c5f96..d3fa00bc5 100644 --- a/spec/component/models/activity_insight_oa_file_spec.rb +++ b/spec/component/models/activity_insight_oa_file_spec.rb @@ -273,31 +273,31 @@ describe '.subject_to_ai_oa_workflow' do it 'returns files that have an associated publication that is subject to the activity insight oa workflow' do - expect(described_class.subject_to_ai_oa_workflow).to contain_exactly(file1, file3, file4, file17, file18, file19) + expect(described_class.subject_to_ai_oa_workflow).to match_array [file1, file3, file4, file17, file18, file19] end end describe '.ready_for_download' do it 'returns files that are ready to download from Activity Insight' do - expect(described_class.ready_for_download).to contain_exactly(file1) + expect(described_class.ready_for_download).to match_array [file1] end end describe '.needs_version_check' do it 'returns files that are ready to have their versions automatically determined' do - expect(described_class.needs_version_check).to contain_exactly(file17) + expect(described_class.needs_version_check).to match_array [file17] end end describe '.send_oa_status_to_activity_insight' do it 'returns files that have not yet been exported to activity insight & whose publication has a gold or hybrid oa status' do - expect(described_class.send_oa_status_to_activity_insight).to contain_exactly(file6, file7, file9, file10, file11, file20, file21) + expect(described_class.send_oa_status_to_activity_insight).to match_array [file6, file7, file9, file10, file11, file20, file21] end end describe '.needs_permissions_check' do it 'returns files that have a known version but have not had their permissions checked yet' do - expect(described_class.needs_permissions_check).to contain_exactly(file12, file13) + expect(described_class.needs_permissions_check).to match_array [file12, file13] end end end diff --git a/spec/component/models/api_token_spec.rb b/spec/component/models/api_token_spec.rb index 18d232681..c905f1089 100644 --- a/spec/component/models/api_token_spec.rb +++ b/spec/component/models/api_token_spec.rb @@ -140,19 +140,19 @@ describe '#all_publications' do it "returns publications that were published during their users' memberships in associated organizations and their descendants" do - expect(token.all_publications).to contain_exactly(pub1, pub2_1, pub3_1, pub3_2) + expect(token.all_publications).to match_array [pub1, pub2_1, pub3_1, pub3_2] end end describe '#all_current_users' do it 'returns users that are currently members of associated organizations and their descendants' do - expect(token.all_current_users).to contain_exactly(user1, user2, user3_1) + expect(token.all_current_users).to match_array [user1, user2, user3_1] end end describe '#all_organizations' do it 'returns organizations that are associated organizations and their descendants' do - expect(token.all_organizations).to contain_exactly(org1, org2, org3) + expect(token.all_organizations).to match_array [org1, org2, org3] end end end diff --git a/spec/component/models/authorship_spec.rb b/spec/component/models/authorship_spec.rb index 7ec6dc2b9..bb96b0d4d 100644 --- a/spec/component/models/authorship_spec.rb +++ b/spec/component/models/authorship_spec.rb @@ -76,7 +76,7 @@ let!(:auth4) { create(:authorship, claimed_by_user: true, confirmed: true) } it 'only returns authorships that are either confirmed or already claimed by a user' do - expect(described_class.unclaimable).to contain_exactly(auth2, auth3, auth4) + expect(described_class.unclaimable).to match_array [auth2, auth3, auth4] end end @@ -96,7 +96,7 @@ let!(:auth4) { create(:authorship, claimed_by_user: true, confirmed: true) } it 'only returns authorships that are both claimed by a user and unconfirmed' do - expect(described_class.claimed_and_unconfirmed).to contain_exactly(auth2) + expect(described_class.claimed_and_unconfirmed).to match_array [auth2] end end diff --git a/spec/component/models/contract_spec.rb b/spec/component/models/contract_spec.rb index 6cf098e00..560c0bf62 100644 --- a/spec/component/models/contract_spec.rb +++ b/spec/component/models/contract_spec.rb @@ -62,7 +62,7 @@ let(:invisible_contract) { create(:contract, visible: false) } it 'returns the contracts that are marked as visible' do - expect(described_class.visible).to contain_exactly(visible_contract1, visible_contract2) + expect(described_class.visible).to match_array [visible_contract1, visible_contract2] end end end diff --git a/spec/component/models/duplicate_publication_group_spec.rb b/spec/component/models/duplicate_publication_group_spec.rb index a91cf5cba..40a6468a7 100644 --- a/spec/component/models/duplicate_publication_group_spec.rb +++ b/spec/component/models/duplicate_publication_group_spec.rb @@ -294,28 +294,28 @@ it 'finds similar publications and groups them' do described_class.group_duplicates - expect(p1_1.reload.duplicate_group.publications).to contain_exactly(p1_1, p1_2, p1_3, p1) + expect(p1_1.reload.duplicate_group.publications).to match_array [p1_1, p1_2, p1_3, p1] - expect(p2_1.reload.duplicate_group.publications).to contain_exactly(p2_1, p2_2) + expect(p2_1.reload.duplicate_group.publications).to match_array [p2_1, p2_2] expect(p3_1.reload.duplicate_group).to be_nil expect(p3_2.reload.duplicate_group).to be_nil - expect(p4_1.reload.duplicate_group.publications).to contain_exactly(p4_1, p4_2) + expect(p4_1.reload.duplicate_group.publications).to match_array [p4_1, p4_2] - expect(p5_1.reload.duplicate_group.publications).to contain_exactly(p5_1, p5_2) + expect(p5_1.reload.duplicate_group.publications).to match_array [p5_1, p5_2] expect(p6_1.reload.duplicate_group).to be_nil expect(p6_2.reload.duplicate_group).to be_nil - expect(p7_1.reload.duplicate_group.publications).to contain_exactly(p7_1, p7_2) + expect(p7_1.reload.duplicate_group.publications).to match_array [p7_1, p7_2] - expect(p8_1.reload.duplicate_group.publications).to contain_exactly(p8_1, p8_2) + expect(p8_1.reload.duplicate_group.publications).to match_array [p8_1, p8_2] expect(p9_1.reload.duplicate_group).to be_nil expect(p9_2.reload.duplicate_group).to be_nil - expect(p10_1.reload.duplicate_group.publications).to contain_exactly(p10_1, p10_2) + expect(p10_1.reload.duplicate_group.publications).to match_array [p10_1, p10_2] expect(p11_1.reload.duplicate_group).to be_nil expect(p11_2.reload.duplicate_group).to be_nil @@ -349,21 +349,21 @@ # deduplication process. # 4. The same scenarios as #2 and #3 above may occur only with the second and third # publication being the true match instead of the first and the second. - expect(p12_1.reload.duplicate_group.publications).to contain_exactly(p12_1, p12_2, p12_3) - expect(p12_2.reload.duplicate_group.publications).to contain_exactly(p12_1, p12_2, p12_3) - expect(p12_3.reload.duplicate_group.publications).to contain_exactly(p12_1, p12_2, p12_3) + expect(p12_1.reload.duplicate_group.publications).to match_array [p12_1, p12_2, p12_3] + expect(p12_2.reload.duplicate_group.publications).to match_array [p12_1, p12_2, p12_3] + expect(p12_3.reload.duplicate_group.publications).to match_array [p12_1, p12_2, p12_3] expect(p13_1.reload.duplicate_group).to be_nil expect(p13_2.reload.duplicate_group).to be_nil - expect(p14_1.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p14_2.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p14_3.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p14_4.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) + expect(p14_1.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p14_2.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p14_3.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p14_4.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] - expect(p15_1.reload.duplicate_group.publications).to contain_exactly(p15_1, p15_2) + expect(p15_1.reload.duplicate_group.publications).to match_array [p15_1, p15_2] - expect(p16_1.reload.duplicate_group.publications).to contain_exactly(p16_1, p16_2) + expect(p16_1.reload.duplicate_group.publications).to match_array [p16_1, p16_2] expect(p17_1.reload.duplicate_group).to be_nil expect(p17_2.reload.duplicate_group).to be_nil @@ -372,65 +372,65 @@ it 'is idempotent' do expect { 2.times { described_class.group_duplicates } }.to change(described_class, :count).by 10 - expect(p1_1.reload.duplicate_group.publications).to contain_exactly(p1_1, p1_2, p1_3, p1) - expect(p2_1.reload.duplicate_group.publications).to contain_exactly(p2_1, p2_2) + expect(p1_1.reload.duplicate_group.publications).to match_array [p1_1, p1_2, p1_3, p1] + expect(p2_1.reload.duplicate_group.publications).to match_array [p2_1, p2_2] expect(p3_1.reload.duplicate_group).to be_nil expect(p3_2.reload.duplicate_group).to be_nil - expect(p4_1.reload.duplicate_group.publications).to contain_exactly(p4_1, p4_2) - expect(p5_1.reload.duplicate_group.publications).to contain_exactly(p5_1, p5_2) + expect(p4_1.reload.duplicate_group.publications).to match_array [p4_1, p4_2] + expect(p5_1.reload.duplicate_group.publications).to match_array [p5_1, p5_2] expect(p6_1.reload.duplicate_group).to be_nil expect(p6_2.reload.duplicate_group).to be_nil - expect(p7_1.reload.duplicate_group.publications).to contain_exactly(p7_1, p7_2) - expect(p8_1.reload.duplicate_group.publications).to contain_exactly(p8_1, p8_2) + expect(p7_1.reload.duplicate_group.publications).to match_array [p7_1, p7_2] + expect(p8_1.reload.duplicate_group.publications).to match_array [p8_1, p8_2] expect(p9_1.reload.duplicate_group).to be_nil expect(p9_2.reload.duplicate_group).to be_nil - expect(p10_1.reload.duplicate_group.publications).to contain_exactly(p10_1, p10_2) + expect(p10_1.reload.duplicate_group.publications).to match_array [p10_1, p10_2] expect(p11_1.reload.duplicate_group).to be_nil expect(p11_2.reload.duplicate_group).to be_nil expect(p11_3.reload.duplicate_group).to be_nil - expect(p12_1.reload.duplicate_group.publications).to contain_exactly(p12_1, p12_2, p12_3) - expect(p12_2.reload.duplicate_group.publications).to contain_exactly(p12_1, p12_2, p12_3) - expect(p12_3.reload.duplicate_group.publications).to contain_exactly(p12_1, p12_2, p12_3) + expect(p12_1.reload.duplicate_group.publications).to match_array [p12_1, p12_2, p12_3] + expect(p12_2.reload.duplicate_group.publications).to match_array [p12_1, p12_2, p12_3] + expect(p12_3.reload.duplicate_group.publications).to match_array [p12_1, p12_2, p12_3] expect(p13_1.reload.duplicate_group).to be_nil expect(p13_2.reload.duplicate_group).to be_nil - expect(p14_1.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p14_2.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p14_3.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p14_4.reload.duplicate_group.publications).to contain_exactly(p14_1, p14_2, p14_3, p14_4) - expect(p15_1.reload.duplicate_group.publications).to contain_exactly(p15_1, p15_2) - expect(p16_1.reload.duplicate_group.publications).to contain_exactly(p16_1, p16_2) + expect(p14_1.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p14_2.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p14_3.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p14_4.reload.duplicate_group.publications).to match_array [p14_1, p14_2, p14_3, p14_4] + expect(p15_1.reload.duplicate_group.publications).to match_array [p15_1, p15_2] + expect(p16_1.reload.duplicate_group.publications).to match_array [p16_1, p16_2] end it "sets grouped publications' visible statuses to false when publication was not already grouped before process started" do described_class.group_duplicates - expect(p1_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(true, false, false, false) - expect(p2_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) + expect(p1_1.reload.duplicate_group.publications.map(&:visible)).to match_array [true, false, false, false] + expect(p2_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] expect(p3_1.reload.visible).to be true expect(p3_2.reload.visible).to be true - expect(p4_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) - expect(p5_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) + expect(p4_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] + expect(p5_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] expect(p6_1.reload.visible).to be true expect(p6_2.reload.visible).to be true - expect(p7_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) - expect(p8_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) + expect(p7_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] + expect(p8_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] expect(p9_1.reload.visible).to be true expect(p9_2.reload.visible).to be true - expect(p10_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) + expect(p10_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] expect(p11_1.reload.visible).to be true expect(p11_2.reload.visible).to be true expect(p11_3.reload.visible).to be true - expect(p12_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false) - expect(p12_2.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false) - expect(p12_3.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false) + expect(p12_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false] + expect(p12_2.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false] + expect(p12_3.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false] expect(p13_1.reload.visible).to be true expect(p13_2.reload.visible).to be true - expect(p14_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false, false) - expect(p14_2.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false, false) - expect(p14_3.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false, false) - expect(p14_4.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false, false, false) - expect(p15_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(false, false) - expect(p16_1.reload.duplicate_group.publications.map(&:visible)).to contain_exactly(true, false) + expect(p14_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false, false] + expect(p14_2.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false, false] + expect(p14_3.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false, false] + expect(p14_4.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false, false, false] + expect(p15_1.reload.duplicate_group.publications.map(&:visible)).to match_array [false, false] + expect(p16_1.reload.duplicate_group.publications.map(&:visible)).to match_array [true, false] expect(p17_1.reload.visible).to be true expect(p17_2.reload.visible).to be true end @@ -484,7 +484,7 @@ it 'leaves the publications in the existing group' do described_class.group_duplicates_of(p1) - expect(group.reload.publications).to contain_exactly(p1, p2, p3) + expect(group.reload.publications).to match_array [p1, p2, p3] end end @@ -1099,7 +1099,7 @@ new_group = pub1.reload.duplicate_group - expect(new_group.publications).to contain_exactly(pub1, pub2, pub3) + expect(new_group.publications).to match_array [pub1, pub2, pub3] end end @@ -1111,7 +1111,7 @@ it 'adds all of the given publications to the existing group' do described_class.group_publications([pub1, pub2, grouped_pub1]) - expect(existing_group1.reload.publications).to contain_exactly(pub1, pub2, grouped_pub1, grouped_pub2) + expect(existing_group1.reload.publications).to match_array [pub1, pub2, grouped_pub1, grouped_pub2] end end @@ -1123,7 +1123,7 @@ it 'adds all of the given publications to the existing group' do described_class.group_publications([pub2, grouped_pub1, grouped_pub2]) - expect(existing_group1.reload.publications).to contain_exactly(pub2, grouped_pub1, grouped_pub2) + expect(existing_group1.reload.publications).to match_array [pub2, grouped_pub1, grouped_pub2] end end @@ -1137,7 +1137,11 @@ remaining_group = grouped_pub1.reload.duplicate_group || grouped_pub3.reload.duplicate_group - expect(remaining_group.publications).to contain_exactly(pub2, grouped_pub1, grouped_pub2, grouped_pub3, grouped_pub4) + expect(remaining_group.publications).to match_array [pub2, + grouped_pub1, + grouped_pub2, + grouped_pub3, + grouped_pub4] end end end @@ -1182,13 +1186,13 @@ expect { group2.reload }.to raise_error ActiveRecord::RecordNotFound expect(group3.reload).to eq group3 - expect(group1_pub1.reload.imports).to contain_exactly(group1_pub1_pure_import, group1_pub2_ai_import) + expect(group1_pub1.reload.imports).to match_array [group1_pub1_pure_import, group1_pub2_ai_import] expect { group1_pub2.reload }.to raise_error ActiveRecord::RecordNotFound - expect(group2_pub1.reload.imports).to contain_exactly(group2_pub1_pure_import, group2_pub2_ai_import) + expect(group2_pub1.reload.imports).to match_array [group2_pub1_pure_import, group2_pub2_ai_import] expect { group2_pub2.reload }.to raise_error ActiveRecord::RecordNotFound - expect(group3.reload.publications).to contain_exactly(group3_pub1, group3_pub2) + expect(group3.reload.publications).to match_array [group3_pub1, group3_pub2] expect(group3_pub1.reload.imports).to eq [group3_pub1_ai_import] expect(group3_pub2.reload.imports).to eq [group3_pub2_ai_import] end @@ -1371,7 +1375,7 @@ it 'does not change the group membership' do group.auto_merge - expect(group.reload.publications).to contain_exactly(pub1, pub2) + expect(group.reload.publications).to match_array [pub1, pub2] end it 'returns false' do @@ -1400,7 +1404,7 @@ it 'does not change the group membership' do group.auto_merge - expect(group.reload.publications).to contain_exactly(pub1, pub2) + expect(group.reload.publications).to match_array [pub1, pub2] end it "does not change the member publications' imports" do @@ -1437,7 +1441,7 @@ it 'does not change the group membership' do group.auto_merge - expect(group.reload.publications).to contain_exactly(pub1, pub2) + expect(group.reload.publications).to match_array [pub1, pub2] end it "does not change the member publications' imports" do @@ -1471,7 +1475,7 @@ it "reassigns the Activity Insight publication's import to the Pure publication" do group.auto_merge - expect(pub2.reload.imports).to contain_exactly(ai_import, pure_import) + expect(pub2.reload.imports).to match_array [ai_import, pure_import] end it "does not mark the Pure publication's import as having been auto merged" do @@ -1513,7 +1517,7 @@ it 'does not change the group membership' do group.auto_merge - expect(group.reload.publications).to contain_exactly(pub1, pub2) + expect(group.reload.publications).to match_array [pub1, pub2] end it "does not change the member publications' imports" do @@ -1557,7 +1561,7 @@ it 'does not change the group membership' do group.auto_merge - expect(group.reload.publications).to contain_exactly(pub1, pub2, pub3) + expect(group.reload.publications).to match_array [pub1, pub2, pub3] end it "does not change the member publications' imports" do diff --git a/spec/component/models/external_publication_waiver_spec.rb b/spec/component/models/external_publication_waiver_spec.rb index b7f29aed5..6a5a46c5b 100644 --- a/spec/component/models/external_publication_waiver_spec.rb +++ b/spec/component/models/external_publication_waiver_spec.rb @@ -70,7 +70,7 @@ let!(:pub5) { create(:publication, title: 'A Publication with a Distinct Title of Some Sort') } it 'returns all publications with a title that closely matches the title in the waiver' do - expect(waiver.matching_publications).to contain_exactly(pub1, pub2, pub5) + expect(waiver.matching_publications).to match_array [pub1, pub2, pub5] end end diff --git a/spec/component/models/organization_spec.rb b/spec/component/models/organization_spec.rb index 56225e319..ed3d6163a 100644 --- a/spec/component/models/organization_spec.rb +++ b/spec/component/models/organization_spec.rb @@ -83,7 +83,7 @@ end it 'returns a collection of unique users who have been members of the organization' do - expect(org.users).to contain_exactly(u1, u2) + expect(org.users).to match_array [u1, u2] end end @@ -155,7 +155,7 @@ end it 'returns visible, unique publications by users who were members of the organization or one of its descendants when they were published' do - expect(org.all_publications).to contain_exactly(pub_1, pub_4, pub_5, pub_7, pub_9) + expect(org.all_publications).to match_array [pub_1, pub_4, pub_5, pub_7, pub_9] end end @@ -185,7 +185,7 @@ end it 'retuns the users who are members of the organization or one of its descendants' do - expect(org.all_users).to contain_exactly(user_1, user_2, user_3) + expect(org.all_users).to match_array [user_1, user_2, user_3] end end @@ -226,7 +226,7 @@ let!(:child_org_child) { create(:organization, parent: child_org) } it 'returns the id of the parent organization and all its descendant organization ids' do - expect(org.descendant_ids).to contain_exactly(org.id, child_org.id, child_org_child.id) + expect(org.descendant_ids).to match_array([org.id, child_org.id, child_org_child.id]) end end diff --git a/spec/component/models/performance_spec.rb b/spec/component/models/performance_spec.rb index 2858d8baa..4066ff45b 100644 --- a/spec/component/models/performance_spec.rb +++ b/spec/component/models/performance_spec.rb @@ -53,7 +53,7 @@ let(:invisible_performance) { create(:performance, visible: false) } it 'returns the performances that are marked as visible' do - expect(described_class.visible).to contain_exactly(visible_performance1, visible_performance2) + expect(described_class.visible).to match_array [visible_performance1, visible_performance2] end end end diff --git a/spec/component/models/presentation_spec.rb b/spec/component/models/presentation_spec.rb index 66cd377d8..7cefd5a9f 100644 --- a/spec/component/models/presentation_spec.rb +++ b/spec/component/models/presentation_spec.rb @@ -49,7 +49,7 @@ let(:invisible_pres) { create(:presentation, visible: false) } it 'returns the presentations that are marked as visible' do - expect(described_class.visible).to contain_exactly(visible_pres1, visible_pres2) + expect(described_class.visible).to match_array [visible_pres1, visible_pres2] end end diff --git a/spec/component/models/psu_law_school_oai_creator_spec.rb b/spec/component/models/psu_law_school_oai_creator_spec.rb index df83beb54..f15abcc21 100644 --- a/spec/component/models/psu_law_school_oai_creator_spec.rb +++ b/spec/component/models/psu_law_school_oai_creator_spec.rb @@ -136,7 +136,7 @@ end it 'returns the matching users' do - expect(creator.ambiguous_user_matches).to contain_exactly(user1, user2) + expect(creator.ambiguous_user_matches).to match_array [user1, user2] end end end diff --git a/spec/component/models/publication_spec.rb b/spec/component/models/publication_spec.rb index 58391eb7e..94dc3d047 100644 --- a/spec/component/models/publication_spec.rb +++ b/spec/component/models/publication_spec.rb @@ -188,7 +188,7 @@ let(:pv) { 'acceptedVersion' } it "returns the publication's Activity Insight open access files with versions that match the publication's preferred version" do - expect(pub.preferred_ai_oa_files).to contain_exactly(aif1) + expect(pub.preferred_ai_oa_files).to match_array [aif1] end end @@ -196,7 +196,7 @@ let(:pv) { 'publishedVersion' } it "returns the publication's Activity Insight open access files with versions that match the publication's preferred version" do - expect(pub.preferred_ai_oa_files).to contain_exactly(aif2) + expect(pub.preferred_ai_oa_files).to match_array [aif2] end end @@ -204,7 +204,7 @@ let(:pv) { 'Published or Accepted' } it "returns the publication's Activity Insight open access files with versions that match the publication's preferred version" do - expect(pub.preferred_ai_oa_files).to contain_exactly(aif1, aif2) + expect(pub.preferred_ai_oa_files).to match_array [aif1, aif2] end end @@ -379,7 +379,7 @@ let(:invisible_pub) { create(:publication, visible: false) } it 'returns the publications that are marked as visible' do - expect(described_class.visible).to contain_exactly(visible_pub1, visible_pub2) + expect(described_class.visible).to match_array [visible_pub1, visible_pub2] end end @@ -430,7 +430,7 @@ end it 'returns visible, unique publications by users who were members of an organization when they were published' do - expect(described_class.published_during_membership).to contain_exactly(pub_1, pub_4, pub_5, pub_6, pub_7) + expect(described_class.published_during_membership).to match_array [pub_1, pub_4, pub_5, pub_6, pub_7] end end @@ -477,7 +477,7 @@ let!(:pub5) { create(:publication, published_on: Date.new(2020, 7, 2), status: 'In Press') } it "returns publications that were published after Penn State's open access policy went into effect and have a status of 'Published'" do - expect(described_class.subject_to_open_access_policy).to contain_exactly(pub2, pub3) + expect(described_class.subject_to_open_access_policy).to match_array [pub2, pub3] end end @@ -528,7 +528,7 @@ before { oa_location.update_column(:publication_id, nil) } it 'only returns publications without scholarsphere open access locations' do - expect(described_class.with_no_scholarsphere_oa_locations).to contain_exactly(pub1, pub2, pub3, pub7) + expect(described_class.with_no_scholarsphere_oa_locations).to match_array [pub1, pub2, pub3, pub7] end end @@ -573,7 +573,7 @@ let!(:activity_insight_oa_file6) { create(:activity_insight_oa_file, publication: pub6) } it 'returns all activity insight oa publications without a scholarsphere source or gold/hyrbid oa status' do - expect(described_class.troubleshooting_list).to contain_exactly(pub1, pub2, pub3) + expect(described_class.troubleshooting_list).to match_array [pub1, pub2, pub3] end end @@ -1111,7 +1111,7 @@ describe '.activity_insight_oa_publication' do it 'returns not_open_access publications that are linked to an activity insight oa file with a location' do - expect(described_class.activity_insight_oa_publication).to contain_exactly( + expect(described_class.activity_insight_oa_publication).to match_array [ pub2, pub2b, pub2c, @@ -1151,19 +1151,19 @@ pub14b, pub14c, pub14e - ) + ] end end describe '.doi_failed_verification' do it 'returns activity_insight_oa_publications whose doi_verified is false' do - expect(described_class.doi_failed_verification).to contain_exactly(pub2) + expect(described_class.doi_failed_verification).to match_array [pub2] end end describe '.oa_workflow_needs_doi_verification' do it 'returns activity_insight_oa_publications whose doi_verified is nil' do - expect(described_class.oa_workflow_needs_doi_verification).to contain_exactly( + expect(described_class.oa_workflow_needs_doi_verification).to match_array [ pub4, pub4b, pub11, @@ -1178,23 +1178,25 @@ pub13c, pub13d, pub13o - ) + ] end end describe '.all_pubs_needs_doi_verification' do it 'returns all publications whose doi_verified is false or nil' do - expect(described_class.all_pubs_needs_doi_verification).to contain_exactly( + expect(described_class.all_pubs_needs_doi_verification).to match_array [ pub1, pub2, pub2b, pub2c, pub4, + pub4b, pub5, pub7, pub11, pub11b, + pub11c, pub11d, pub12, @@ -1215,7 +1217,7 @@ pub13m, pub13n, pub13o - ) + ] end it 'does not return publications with publication_type Extension Publication' do @@ -1226,49 +1228,66 @@ describe '.file_version_check_failed' do it "returns activity_insight_oa_publications whose associated files' versions still contain an 'unknown' version and no correct version" do - expect(described_class.file_version_check_failed).to contain_exactly(pub11) + expect(described_class.file_version_check_failed).to match_array [pub11] end end describe '.wrong_file_version_base' do it "returns activity_insight_oa_publications whose associated files' versions does not contain an 'unknown' version or correct version" do - expect(described_class.wrong_file_version_base).to contain_exactly(pub12, pub12d) + expect(described_class.wrong_file_version_base).to match_array [pub12, pub12d] end end describe '.wrong_file_version' do it "returns activity_insight_oa_publications whose associated files' versions does not contain an 'unknown' version or correct version and email notifications have not been sent for at least one file" do - expect(described_class.wrong_file_version).to contain_exactly(pub12) + expect(described_class.wrong_file_version).to match_array [pub12] end end describe '.wrong_version_author_notified' do it "returns activity_insight_oa_publications whose associated files' versions does not contain an 'unknown' version or correct version and email notifications have been sent for each file" do - expect(described_class.wrong_version_author_notified).to contain_exactly(pub12d) + expect(described_class.wrong_version_author_notified).to match_array [pub12d] end end describe '.needs_oa_metadata_search' do it 'returns activity_insight_oa_publications with a verified doi that have not been checked' do - expect(described_class.needs_oa_metadata_search).to contain_exactly(pub8, pub8c, pub9a, pub9b, pub9c, pub9d, pub9e, pub9f, pub9g, pub9h, pub9i, pub9j, pub9k, pub10, pub10b, pub12b) + expect(described_class.needs_oa_metadata_search).to match_array [ + pub8, + pub8c, + pub9a, + pub9b, + pub9c, + pub9d, + pub9e, + pub9f, + pub9g, + pub9h, + pub9i, + pub9j, + pub9k, + pub10, + pub10b, + pub12b + ] end end describe '.needs_permissions_check' do it 'returns activity_insight_oa_publications that have not been checked for a preferred version' do - expect(described_class.needs_permissions_check).to contain_exactly(pub8, pub8c) + expect(described_class.needs_permissions_check).to match_array [pub8, pub8c] end end describe '.needs_manual_preferred_version_check' do it 'returns activity_insight_oa_publications that have had their permissions checked but are still missing permissions data' do - expect(described_class.needs_manual_preferred_version_check).to contain_exactly(pub9b, pub9k) + expect(described_class.needs_manual_preferred_version_check).to match_array [pub9b, pub9k] end end describe '.preferred_file_version_none' do it 'returns activity_insight_oa_publications where the preferred version is none' do - expect(described_class.preferred_file_version_none).to contain_exactly(pub11b, pub14c) + expect(described_class.preferred_file_version_none).to match_array [pub11b, pub14c] end end @@ -1276,7 +1295,7 @@ # TODO: Ideally, we should create more records to exercise more of the logic in the # SQL query for this scope. it 'returns activity_insight_oa_publications that have a preferred version, matching file(s) with incomplete permissions metadata, and no matching file(s) with complete permissions metadata' do - expect(described_class.needs_manual_permissions_review).to contain_exactly(pub14a) + expect(described_class.needs_manual_permissions_review).to match_array [pub14a] end end @@ -1284,13 +1303,13 @@ # TODO: Ideally, we should create more records to exercise more of the logic in the # SQL query for this scope. it 'returns activity_insight_oa_publications with a preferred version and a downloaded file that matches the preferred version' do - expect(described_class.ready_for_metadata_review).to contain_exactly(pub13b, pub13c) + expect(described_class.ready_for_metadata_review).to match_array [pub13b, pub13c] end end describe '.flagged_for_review' do it 'returns activity_insight_oa_publications that have been flagged for review' do - expect(described_class.flagged_for_review).to contain_exactly(pub2c, pub4b, pub8c, pub9k, pub10b, pub11c, pub11d, pub12c, pub13o, pub14e) + expect(described_class.flagged_for_review).to match_array [pub2c, pub4b, pub8c, pub9k, pub10b, pub11c, pub11d, pub12c, pub13o, pub14e] end end end @@ -1512,7 +1531,7 @@ let(:pub7) { create(:publication, publication_type: 'Trade Journal Article') } it 'returns publications that have open access publication types' do - expect(described_class.not_extension_publication).to contain_exactly(pub1, pub2, pub3, pub4, pub5, pub7) + expect(described_class.not_extension_publication).to match_array [pub1, pub2, pub3, pub4, pub5, pub7] end end @@ -1526,7 +1545,7 @@ let(:pub7) { create(:publication, publication_type: 'Trade Journal Article') } it 'returns publications that have open access publication types' do - expect(described_class.oa_publication).to contain_exactly(pub1, pub2, pub3, pub6) + expect(described_class.oa_publication).to match_array [pub1, pub2, pub3, pub6] end end @@ -1540,7 +1559,7 @@ let(:pub7) { create(:publication, publication_type: 'Trade Journal Article') } it 'returns publications that do not have open access publication types' do - expect(described_class.non_oa_publication).to contain_exactly(pub4, pub5, pub7) + expect(described_class.non_oa_publication).to match_array [pub4, pub5, pub7] end end @@ -1549,7 +1568,7 @@ let(:pub2) { create(:publication, status: 'In Press') } it 'returns publications that are not journal articles' do - expect(described_class.published).to contain_exactly(pub1) + expect(described_class.published).to match_array [pub1] end end @@ -1642,7 +1661,7 @@ end it "returns an array of the source identifiers from the publication's Activity Insight imports" do - expect(pub.ai_import_identifiers).to contain_exactly('ai-abc123', 'ai-xyz789') + expect(pub.ai_import_identifiers).to match_array ['ai-abc123', 'ai-xyz789'] end end end @@ -1674,7 +1693,7 @@ end it "returns an array of the source identifiers from the publication's Pure imports" do - expect(pub.pure_import_identifiers).to contain_exactly('pure-abc123', 'pure-xyz789') + expect(pub.pure_import_identifiers).to match_array ['pure-abc123', 'pure-xyz789'] end end end @@ -2405,7 +2424,10 @@ it 'reassigns all of the imports from the given publications to the publication' do pub1.merge!([pub2, pub3, pub4]) - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2, pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1, + pub2_import1, + pub2_import2, + pub3_import1] end it 'transfers all of the authorships from all of the given publications to the publication' do @@ -2497,7 +2519,7 @@ auth2 = pub1.authorships.find_by(user: user2) auth3 = pub1.authorships.find_by(user: user3) - expect(auth1.scholarsphere_work_deposits).to contain_exactly(deposit1, deposit2, deposit3) + expect(auth1.scholarsphere_work_deposits).to match_array [deposit1, deposit2, deposit3] expect(auth2.scholarsphere_work_deposits).to eq [] expect(auth3.scholarsphere_work_deposits).to eq [] end @@ -2570,14 +2592,18 @@ pub1.merge!([pub2, pub3, pub4]) expect(pub1.reload.activity_insight_oa_files.count).to eq 2 - expect(pub1.reload.activity_insight_oa_files).to contain_exactly(activity_insight_oa_file1, activity_insight_oa_file2) + expect(pub1.reload.activity_insight_oa_files).to match_array [activity_insight_oa_file1, + activity_insight_oa_file2] end context 'when the given publications include the publication' do it 'reassigns all of the imports from the given publications to the publication' do pub1.merge!([pub1, pub2, pub3, pub4]) - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2, pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1, + pub2_import1, + pub2_import2, + pub3_import1] end it 'transfers all activity insight oa files from publications to the publication' do @@ -2585,7 +2611,8 @@ pub1.merge!([pub2, pub3, pub4]) expect(pub1.reload.activity_insight_oa_files.count).to eq 2 - expect(pub1.reload.activity_insight_oa_files).to contain_exactly(activity_insight_oa_file1, activity_insight_oa_file2) + expect(pub1.reload.activity_insight_oa_files).to match_array [activity_insight_oa_file1, + activity_insight_oa_file2] end it 'transfers doi verification from publications to the publication' do @@ -2633,9 +2660,9 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue RuntimeError; end - expect(pub1.reload.imports).to contain_exactly(pub1_import1) - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) - expect(pub3.reload.imports).to contain_exactly(pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1] + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] + expect(pub3.reload.imports).to match_array [pub3_import1] expect(pub4.reload.imports).to eq [] end @@ -2781,7 +2808,10 @@ it 'reassigns all of the imports from the given publications to the publication' do pub1.merge!([pub2, pub3, pub4]) - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2, pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1, + pub2_import1, + pub2_import2, + pub3_import1] end it 'deletes the given publications' do @@ -2893,7 +2923,7 @@ auth2 = pub1.authorships.find_by(user: user2) auth3 = pub1.authorships.find_by(user: user3) - expect(auth1.scholarsphere_work_deposits).to contain_exactly(deposit1, deposit2, deposit3) + expect(auth1.scholarsphere_work_deposits).to match_array [deposit1, deposit2, deposit3] expect(auth2.scholarsphere_work_deposits).to eq [] expect(auth3.scholarsphere_work_deposits).to eq [] end @@ -2952,7 +2982,8 @@ pub1.merge!([pub2, pub3, pub4]) expect(pub1.reload.activity_insight_oa_files.count).to eq 2 - expect(pub1.reload.activity_insight_oa_files).to contain_exactly(activity_insight_oa_file1, activity_insight_oa_file2) + expect(pub1.reload.activity_insight_oa_files).to match_array [activity_insight_oa_file1, + activity_insight_oa_file2] end end @@ -2963,7 +2994,10 @@ it 'reassigns all of the imports from the given publications to the publication' do pub1.merge!([pub2, pub3, pub4]) - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2, pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1, + pub2_import1, + pub2_import2, + pub3_import1] end it 'deletes the given publications' do @@ -3076,7 +3110,7 @@ auth2 = pub1.authorships.find_by(user: user2) auth3 = pub1.authorships.find_by(user: user3) - expect(auth1.scholarsphere_work_deposits).to contain_exactly(deposit1, deposit2, deposit3) + expect(auth1.scholarsphere_work_deposits).to match_array [deposit1, deposit2, deposit3] expect(auth2.scholarsphere_work_deposits).to eq [] expect(auth3.scholarsphere_work_deposits).to eq [] end @@ -3135,7 +3169,8 @@ pub1.merge!([pub2, pub3, pub4]) expect(pub1.reload.activity_insight_oa_files.count).to eq 2 - expect(pub1.reload.activity_insight_oa_files).to contain_exactly(activity_insight_oa_file1, activity_insight_oa_file2) + expect(pub1.reload.activity_insight_oa_files).to match_array [activity_insight_oa_file1, + activity_insight_oa_file2] end end @@ -3150,9 +3185,9 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(pub1.reload.imports).to contain_exactly(pub1_import1) - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) - expect(pub3.reload.imports).to contain_exactly(pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1] + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] + expect(pub3.reload.imports).to match_array [pub3_import1] expect(pub4.reload.imports).to eq [] end @@ -3176,7 +3211,7 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(ndpg.reload.publications).to contain_exactly(pub2, pub4) + expect(ndpg.reload.publications).to match_array [pub2, pub4] end it 'does not transfer any authorships' do @@ -3311,9 +3346,9 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(pub1.reload.imports).to contain_exactly(pub1_import1) - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) - expect(pub3.reload.imports).to contain_exactly(pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1] + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] + expect(pub3.reload.imports).to match_array [pub3_import1] expect(pub4.reload.imports).to eq [] end @@ -3337,8 +3372,8 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(ndpg1.reload.publications).to contain_exactly(pub2, pub4) - expect(ndpg2.reload.publications).to contain_exactly(pub2, pub4) + expect(ndpg1.reload.publications).to match_array [pub2, pub4] + expect(ndpg2.reload.publications).to match_array [pub2, pub4] end it 'does not transfer any authorships' do @@ -3472,9 +3507,9 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(pub1.reload.imports).to contain_exactly(pub1_import1) - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) - expect(pub3.reload.imports).to contain_exactly(pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1] + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] + expect(pub3.reload.imports).to match_array [pub3_import1] expect(pub4.reload.imports).to eq [] end @@ -3498,7 +3533,7 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(ndpg.reload.publications).to contain_exactly(pub1, pub3) + expect(ndpg.reload.publications).to match_array [pub1, pub3] end it 'does not transfer any authorships' do @@ -3632,9 +3667,9 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(pub1.reload.imports).to contain_exactly(pub1_import1) - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) - expect(pub3.reload.imports).to contain_exactly(pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1] + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] + expect(pub3.reload.imports).to match_array [pub3_import1] expect(pub4.reload.imports).to eq [] end @@ -3658,7 +3693,7 @@ begin pub1.merge!([pub2, pub3, pub4]) rescue Publication::NonDuplicateMerge; end - expect(ndpg.reload.publications).to contain_exactly(pub1, pub2, pub3, pub4) + expect(ndpg.reload.publications).to match_array [pub1, pub2, pub3, pub4] end it 'does not transfer any authorships' do diff --git a/spec/component/models/scholarsphere_work_deposit_spec.rb b/spec/component/models/scholarsphere_work_deposit_spec.rb index 9ff8b1fee..31a106454 100644 --- a/spec/component/models/scholarsphere_work_deposit_spec.rb +++ b/spec/component/models/scholarsphere_work_deposit_spec.rb @@ -550,7 +550,7 @@ end it "returns a file object for each of the deposit's associated uploads" do - expect(dep.files).to contain_exactly(file1, file2) + expect(dep.files).to match_array [file1, file2] end end diff --git a/spec/component/models/user_spec.rb b/spec/component/models/user_spec.rb index 81a7e4739..eeb88f7fa 100644 --- a/spec/component/models/user_spec.rb +++ b/spec/component/models/user_spec.rb @@ -384,7 +384,7 @@ end it 'returns one instance of each matching user' do - expect(described_class.find_all_by_wos_pub(wp)).to contain_exactly(u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13) + expect(described_class.find_all_by_wos_pub(wp)).to match_array [u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13] end end end @@ -406,7 +406,7 @@ before { create(:user, orcid_identifier: nil) } it 'returns one instance of each matching user' do - expect(described_class.find_confirmed_by_wos_pub(wp)).to contain_exactly(u1, u2) + expect(described_class.find_confirmed_by_wos_pub(wp)).to match_array [u1, u2] end end end @@ -422,7 +422,7 @@ let(:grant) { double 'grant', investigators: [i1, i2] } it 'returns the existing users' do - expect(described_class.find_by_nsf_grant(grant)).to contain_exactly(u1, u2) + expect(described_class.find_by_nsf_grant(grant)).to match_array [u1, u2] end end end @@ -826,7 +826,10 @@ confirmed: true) } it 'returns only users who should currently receive an email reminder about open access publications' do - expect(described_class.needs_open_access_notification).to contain_exactly(email_user_1, email_user_2, email_user_3, email_user_4) + expect(described_class.needs_open_access_notification).to match_array [email_user_1, + email_user_2, + email_user_3, + email_user_4] end end @@ -960,7 +963,6 @@ publication: other_pub_16, confirmed: true, open_access_notification_sent_at: 1.month.ago) } - let!(:org) { create(:organization) } let!(:now) { DateTime.now } let!(:membership) { create(:user_organization_membership, @@ -971,20 +973,20 @@ # Publications that meet the criteria for an open access reminder 7.times do |x| - let!(:"potential_pub_#{x + 1}") { create(:publication, - # The publication dates are different so that we can test the sorting of the results - published_on: 1.month.ago + x.days) } - let!(:"p_auth_#{x + 1}") { create(:authorship, - user: user, - publication: send("potential_pub_#{x + 1}"), - confirmed: true, - open_access_notification_sent_at: 1.month.ago) } + let!("potential_pub_#{x + 1}".to_sym) { create(:publication, + # The publication dates are different so that we can test the sorting of the results + published_on: 1.month.ago + x.days) } + let!("p_auth_#{x + 1}".to_sym) { create(:authorship, + user: user, + publication: send("potential_pub_#{x + 1}"), + confirmed: true, + open_access_notification_sent_at: 1.month.ago) } end it "returns the user's six most recent recent publications that don't have any associated open access information and have a 'Published' status" do results = user.notifiable_potential_open_access_publications # We expect potential_pub_1 to be filtered out because we only want to include the most recent 6 publications - expect(results).to contain_exactly(potential_pub_7, potential_pub_6, potential_pub_5, potential_pub_4, potential_pub_3, potential_pub_2) + expect(results).to match_array [potential_pub_7, potential_pub_6, potential_pub_5, potential_pub_4, potential_pub_3, potential_pub_2] expect(results.length).to eq 6 # We expect the results to be sorted by publication date in descending order expect(results[0].published_on).to be > results[1].published_on diff --git a/spec/component/queries/api/v1/user_query_spec.rb b/spec/component/queries/api/v1/user_query_spec.rb index 45e4b3170..a7e4d1a92 100644 --- a/spec/component/queries/api/v1/user_query_spec.rb +++ b/spec/component/queries/api/v1/user_query_spec.rb @@ -43,7 +43,7 @@ end it "returns all of the user's grants" do - expect(uq.grants).to contain_exactly(g1, g2) + expect(uq.grants).to match_array [g1, g2] end end end @@ -110,7 +110,7 @@ context 'when given params with a flag to include unconfirmed publications' do it "returns all of the user's visible publications" do - expect(uq.publications({ include_unconfirmed: true })).to contain_exactly(vis_conf_pub, vis_unconf_pub) + expect(uq.publications({ include_unconfirmed: true })).to match_array [vis_conf_pub, vis_unconf_pub] end end end diff --git a/spec/component/serializers/api/v1/publication_serializer_spec.rb b/spec/component/serializers/api/v1/publication_serializer_spec.rb index 3937f2b36..7cd4acfe4 100644 --- a/spec/component/serializers/api/v1/publication_serializer_spec.rb +++ b/spec/component/serializers/api/v1/publication_serializer_spec.rb @@ -154,13 +154,16 @@ end it 'includes profile preferences' do - expect(serialized_data_attributes(publication)[:profile_preferences]).to contain_exactly({ user_id: u1.id, - webaccess_id: 'abc123', - visible_in_profile: true, - position_in_profile: 4 }, { user_id: u2.id, - webaccess_id: 'def456', - visible_in_profile: false, - position_in_profile: nil }) + expect(serialized_data_attributes(publication)[:profile_preferences]).to match_array( + [{ user_id: u1.id, + webaccess_id: 'abc123', + visible_in_profile: true, + position_in_profile: 4 }, + { user_id: u2.id, + webaccess_id: 'def456', + visible_in_profile: false, + position_in_profile: nil }] + ) end end end diff --git a/spec/component/services/oab_permissions_service_spec.rb b/spec/component/services/oab_permissions_service_spec.rb index 5fa9b7f20..b5bbd4ca2 100644 --- a/spec/component/services/oab_permissions_service_spec.rb +++ b/spec/component/services/oab_permissions_service_spec.rb @@ -188,7 +188,8 @@ context 'when accepted version is present and published version is not' do before do - allow(service).to receive_messages(accepted_version: { 'version' => I18n.t('file_versions.accepted_version') }, published_version: {}) + allow(service).to receive(:accepted_version).and_return({ 'version' => I18n.t('file_versions.accepted_version') }) + allow(service).to receive(:published_version).and_return({}) end it 'returns true' do @@ -198,7 +199,8 @@ context 'when published version is present and accepted version is not' do before do - allow(service).to receive_messages(accepted_version: {}, published_version: { 'version' => I18n.t('file_versions.published_version') }) + allow(service).to receive(:accepted_version).and_return({}) + allow(service).to receive(:published_version).and_return({ 'version' => I18n.t('file_versions.published_version') }) end it 'returns true' do @@ -208,7 +210,8 @@ context 'when no version is present' do before do - allow(service).to receive_messages(accepted_version: {}, published_version: {}) + allow(service).to receive(:accepted_version).and_return({}) + allow(service).to receive(:published_version).and_return({}) end it 'returns false' do diff --git a/spec/component/services/psu_identity_user_service_spec.rb b/spec/component/services/psu_identity_user_service_spec.rb index e2e5859ef..c4011cea0 100644 --- a/spec/component/services/psu_identity_user_service_spec.rb +++ b/spec/component/services/psu_identity_user_service_spec.rb @@ -2,7 +2,7 @@ require 'component/component_spec_helper' -describe PSUIdentityUserService, :vcr do +describe PSUIdentityUserService, vcr: true do # Note this spec uses VCR to mock HTTP requests to the actual PSU identity # server. If you change this value, you will will invalidate the VCR # cassettes and send new requests. diff --git a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/2_35_1_1.yml b/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/2_35_1_1.yml deleted file mode 100644 index c87ac4fa7..000000000 --- a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/2_35_1_1.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -http_interactions: -- request: - method: get - uri: https://identity.apps.psu.edu/search-service/resources/people/userid/ajk5603 - body: - encoding: US-ASCII - string: '' - headers: - User-Agent: - - Faraday v1.10.4 - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - response: - status: - code: 200 - message: OK - headers: - Date: - - Thu, 23 Jan 2025 17:41:01 GMT - Content-Type: - - application/vnd-psu.edu-v1+json - Content-Length: - - '495' - Connection: - - keep-alive - Content-Security-Policy: - - frame-ancestors 'none' - Uniqueid: - - 6568cd64-a52e-42e6-b04e-d40a957b3dc5 - X-Content-Security-Policy: - - frame-ancestors 'none' - X-Frame-Options: - - DENY - X-Request-Id: - - 797c2f061f5f79cc0372af85da09c651 - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - body: - encoding: UTF-8 - string: '{"userid":"ajk5603","cprid":"2460424","givenName":"Alexander","middleName":"","familyName":"Kiessling","honorificSuffix":"","preferredGivenName":"Alex","preferredMiddleName":"","preferredFamilyName":"Kiessling","preferredHonorificSuffix":"","active":true,"confHold":false,"universityEmail":"ajk5603@psu.edu","serviceAccount":false,"primaryAffiliation":"STAFF","altUserids":[],"affiliation":["STAFF"],"displayName":"Alex - Kiessling","link":{"href":"https://cpr.k8s.psu.edu/cpr/resources/2460424"}}' - recorded_at: Thu, 23 Jan 2025 17:41:02 GMT -recorded_with: VCR 6.1.0 diff --git a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/given_name/2_35_1_3_1.yml b/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/given_name/2_35_1_3_1.yml deleted file mode 100644 index 905175409..000000000 --- a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/given_name/2_35_1_3_1.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -http_interactions: -- request: - method: get - uri: https://identity.apps.psu.edu/search-service/resources/people/userid/ajk5603 - body: - encoding: US-ASCII - string: '' - headers: - User-Agent: - - Faraday v1.10.4 - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - response: - status: - code: 200 - message: OK - headers: - Date: - - Thu, 23 Jan 2025 17:41:02 GMT - Content-Type: - - application/vnd-psu.edu-v1+json - Content-Length: - - '495' - Connection: - - keep-alive - Content-Security-Policy: - - frame-ancestors 'none' - Uniqueid: - - 8e260a8e-53b9-4aa5-92ad-3bec12385705 - X-Content-Security-Policy: - - frame-ancestors 'none' - X-Frame-Options: - - DENY - X-Request-Id: - - 44c6f53cf9b11fc76bc00b5e5af6b5eb - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - body: - encoding: UTF-8 - string: '{"userid":"ajk5603","cprid":"2460424","givenName":"Alexander","middleName":"","familyName":"Kiessling","honorificSuffix":"","preferredGivenName":"Alex","preferredMiddleName":"","preferredFamilyName":"Kiessling","preferredHonorificSuffix":"","active":true,"confHold":false,"universityEmail":"ajk5603@psu.edu","serviceAccount":false,"primaryAffiliation":"STAFF","altUserids":[],"affiliation":["STAFF"],"displayName":"Alex - Kiessling","link":{"href":"https://cpr.k8s.psu.edu/cpr/resources/2460424"}}' - recorded_at: Thu, 23 Jan 2025 17:41:02 GMT -recorded_with: VCR 6.1.0 diff --git a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/surname/2_35_1_2_1.yml b/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/surname/2_35_1_2_1.yml deleted file mode 100644 index d4d1e7f31..000000000 --- a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/surname/2_35_1_2_1.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -http_interactions: -- request: - method: get - uri: https://identity.apps.psu.edu/search-service/resources/people/userid/ajk5603 - body: - encoding: US-ASCII - string: '' - headers: - User-Agent: - - Faraday v1.10.4 - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - response: - status: - code: 200 - message: OK - headers: - Date: - - Thu, 23 Jan 2025 17:41:02 GMT - Content-Type: - - application/vnd-psu.edu-v1+json - Content-Length: - - '495' - Connection: - - keep-alive - Content-Security-Policy: - - frame-ancestors 'none' - Uniqueid: - - fbec2beb-3ae5-4432-af32-fbcd646f66e1 - X-Content-Security-Policy: - - frame-ancestors 'none' - X-Frame-Options: - - DENY - X-Request-Id: - - 35022de18c64bed222f39aebc69c63c3 - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - body: - encoding: UTF-8 - string: '{"userid":"ajk5603","cprid":"2460424","givenName":"Alexander","middleName":"","familyName":"Kiessling","honorificSuffix":"","preferredGivenName":"Alex","preferredMiddleName":"","preferredFamilyName":"Kiessling","preferredHonorificSuffix":"","active":true,"confHold":false,"universityEmail":"ajk5603@psu.edu","serviceAccount":false,"primaryAffiliation":"STAFF","altUserids":[],"affiliation":["STAFF"],"displayName":"Alex - Kiessling","link":{"href":"https://cpr.k8s.psu.edu/cpr/resources/2460424"}}' - recorded_at: Thu, 23 Jan 2025 17:41:02 GMT -recorded_with: VCR 6.1.0 diff --git a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/user_id/2_35_1_4_1.yml b/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/user_id/2_35_1_4_1.yml deleted file mode 100644 index 774fa29c6..000000000 --- a/spec/fixtures/vcr_cassettes/User/_psu_identity/when_identity_data_is_present/user_id/2_35_1_4_1.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -http_interactions: -- request: - method: get - uri: https://identity.apps.psu.edu/search-service/resources/people/userid/ajk5603 - body: - encoding: US-ASCII - string: '' - headers: - User-Agent: - - Faraday v1.10.4 - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - response: - status: - code: 200 - message: OK - headers: - Date: - - Thu, 23 Jan 2025 17:41:02 GMT - Content-Type: - - application/vnd-psu.edu-v1+json - Content-Length: - - '495' - Connection: - - keep-alive - Content-Security-Policy: - - frame-ancestors 'none' - Uniqueid: - - 19e8132d-0e8a-41b3-89ab-136406020455 - X-Content-Security-Policy: - - frame-ancestors 'none' - X-Frame-Options: - - DENY - X-Request-Id: - - 8e59c6fa920d16abeb21c5de99a48e38 - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - body: - encoding: UTF-8 - string: '{"userid":"ajk5603","cprid":"2460424","givenName":"Alexander","middleName":"","familyName":"Kiessling","honorificSuffix":"","preferredGivenName":"Alex","preferredMiddleName":"","preferredFamilyName":"Kiessling","preferredHonorificSuffix":"","active":true,"confHold":false,"universityEmail":"ajk5603@psu.edu","serviceAccount":false,"primaryAffiliation":"STAFF","altUserids":[],"affiliation":["STAFF"],"displayName":"Alex - Kiessling","link":{"href":"https://cpr.k8s.psu.edu/cpr/resources/2460424"}}' - recorded_at: Thu, 23 Jan 2025 17:41:02 GMT -recorded_with: VCR 6.1.0 diff --git a/spec/integration/admin/activity_insight_oa_files/update_spec.rb b/spec/integration/admin/activity_insight_oa_files/update_spec.rb index 6c6529363..fe64cd939 100644 --- a/spec/integration/admin/activity_insight_oa_files/update_spec.rb +++ b/spec/integration/admin/activity_insight_oa_files/update_spec.rb @@ -22,7 +22,7 @@ describe 'viewing the form' do it 'does not allow the location to be set' do - expect(page).to have_no_field 'Location' + expect(page).not_to have_field 'Location' expect(page).to have_content aif.location end diff --git a/spec/integration/admin/activity_insight_oa_workflow/metadata_review/index_spec.rb b/spec/integration/admin/activity_insight_oa_workflow/metadata_review/index_spec.rb index 0a7cee5d2..adba39458 100644 --- a/spec/integration/admin/activity_insight_oa_workflow/metadata_review/index_spec.rb +++ b/spec/integration/admin/activity_insight_oa_workflow/metadata_review/index_spec.rb @@ -102,8 +102,8 @@ expect(page).to have_content(aif5.user.webaccess_id) expect(page).to have_content(aif5.created_at.to_date) - expect(page).to have_no_text('Pub1') - expect(page).to have_no_text('Pub3') + expect(page).not_to have_text('Pub1') + expect(page).not_to have_text('Pub3') expect(page).to have_link 'Back', href: activity_insight_oa_workflow_path end diff --git a/spec/integration/admin/activity_insight_oa_workflow/metadata_review/show_spec.rb b/spec/integration/admin/activity_insight_oa_workflow/metadata_review/show_spec.rb index e0a39cc40..759e41217 100644 --- a/spec/integration/admin/activity_insight_oa_workflow/metadata_review/show_spec.rb +++ b/spec/integration/admin/activity_insight_oa_workflow/metadata_review/show_spec.rb @@ -68,7 +68,7 @@ it 'does not have a button to deposit to scholarsphere and indicates the metadata is incomplete' do expect(page).to have_content 'Insufficient metadata to upload to ScholarSphere' - expect(page).to have_no_link 'Deposit to ScholarSphere' + expect(page).not_to have_link 'Deposit to ScholarSphere' end end @@ -81,7 +81,7 @@ it 'does not have a button to deposit to scholarsphere and indicates the metadata is incomplete' do expect(page).to have_content 'Insufficient metadata to upload to ScholarSphere' expect(page).to have_content 'Not Found' - expect(page).to have_no_link 'Deposit to ScholarSphere' + expect(page).not_to have_link 'Deposit to ScholarSphere' end end @@ -93,7 +93,7 @@ it 'does not have a button to deposit to scholarsphere and indicates the deposit is pending' do expect(page).to have_content 'ScholarSphere upload pending...' - expect(page).to have_no_link 'Deposit to ScholarSphere' + expect(page).not_to have_link 'Deposit to ScholarSphere' end end @@ -105,7 +105,7 @@ it 'does not have a button to deposit to scholarsphere and indicates the deposit is failed' do expect(page).to have_content 'ScholarSphere upload failed' - expect(page).to have_no_link 'Deposit to ScholarSphere' + expect(page).not_to have_link 'Deposit to ScholarSphere' end end diff --git a/spec/integration/admin/activity_insight_oa_workflow/permissions_review_spec.rb b/spec/integration/admin/activity_insight_oa_workflow/permissions_review_spec.rb index 5c2a3f591..110e379fb 100644 --- a/spec/integration/admin/activity_insight_oa_workflow/permissions_review_spec.rb +++ b/spec/integration/admin/activity_insight_oa_workflow/permissions_review_spec.rb @@ -101,14 +101,14 @@ expect(page).to have_link('Pub2', href: "#{rails_admin.edit_path(model_name: :publication, id: pub2.id)}#publication_preferred_version") expect(page).to have_link(aif2a.download_filename, href: rails_admin.edit_path(model_name: :activity_insight_oa_file, id: aif2a.id)) expect(page).to have_text('https://doi.org/10.123/zzz123') - expect(page).to have_no_text aif2b.download_filename + expect(page).not_to have_text aif2b.download_filename end within "#publication_#{pub4.id}" do expect(page).to have_link('Pub4', href: "#{rails_admin.edit_path(model_name: :publication, id: pub4.id)}#publication_preferred_version") expect(page).to have_link(aif4a.download_filename, href: rails_admin.edit_path(model_name: :activity_insight_oa_file, id: aif4a.id)) expect(page).to have_text('https://doi.org/10.123/aaa123') - expect(page).to have_no_text aif4b.download_filename + expect(page).not_to have_text aif4b.download_filename end tr_elements = all('tr') @@ -116,8 +116,8 @@ expect(tr_elements[1][:id]).to eq "publication_#{pub4.id}" expect(tr_elements[2][:id]).to eq "publication_#{pub2.id}" - expect(page).to have_no_text('Pub1') - expect(page).to have_no_text('Pub3') + expect(page).not_to have_text('Pub1') + expect(page).not_to have_text('Pub3') end end diff --git a/spec/integration/admin/activity_insight_oa_workflow/wrong_file_version_review_spec.rb b/spec/integration/admin/activity_insight_oa_workflow/wrong_file_version_review_spec.rb index 9b95fd4e5..cba213fe0 100644 --- a/spec/integration/admin/activity_insight_oa_workflow/wrong_file_version_review_spec.rb +++ b/spec/integration/admin/activity_insight_oa_workflow/wrong_file_version_review_spec.rb @@ -33,7 +33,7 @@ expect(page).to have_text('Download File') expect(page).to have_text(pub1.title) expect(page).to have_text(pub2.title) - expect(page).to have_no_text(pub3.title) + expect(page).not_to have_text(pub3.title) expect(page).to have_text('Accepted Manuscript').twice expect(page).to have_text('Final Published Version').twice expect(page).to have_link(aif1.download_filename) diff --git a/spec/integration/admin/activity_insight_oa_workflow/wrong_version_author_notified_review_spec.rb b/spec/integration/admin/activity_insight_oa_workflow/wrong_version_author_notified_review_spec.rb index 6864b0acb..c31094493 100644 --- a/spec/integration/admin/activity_insight_oa_workflow/wrong_version_author_notified_review_spec.rb +++ b/spec/integration/admin/activity_insight_oa_workflow/wrong_version_author_notified_review_spec.rb @@ -34,9 +34,9 @@ expect(page).to have_text('Download File') expect(page).to have_text(pub1.title) expect(page).to have_text(pub1.wrong_oa_version_notification_sent_at.strftime('%m/%d/%Y')) - expect(page).to have_no_text(aif1.created_at.strftime('%m/%d/%Y')) + expect(page).not_to have_text(aif1.created_at.strftime('%m/%d/%Y')) expect(page).to have_text(pub2.title) - expect(page).to have_no_text(pub3.title) + expect(page).not_to have_text(pub3.title) expect(page).to have_text('Accepted Manuscript').twice expect(page).to have_text('Final Published Version').twice expect(page).to have_link(aif1.download_filename) diff --git a/spec/integration/admin/api_tokens/create_spec.rb b/spec/integration/admin/api_tokens/create_spec.rb index e79b54413..0d51931fd 100644 --- a/spec/integration/admin/api_tokens/create_spec.rb +++ b/spec/integration/admin/api_tokens/create_spec.rb @@ -17,11 +17,11 @@ end it "does not allow the new token's value to be set" do - expect(page).to have_no_field 'Token' + expect(page).not_to have_field 'Token' end it "does not allow the new token's total requests to be set" do - expect(page).to have_no_field 'Total requests' + expect(page).not_to have_field 'Total requests' end end diff --git a/spec/integration/admin/api_tokens/edit_spec.rb b/spec/integration/admin/api_tokens/edit_spec.rb index f3648dbeb..0b0216125 100644 --- a/spec/integration/admin/api_tokens/edit_spec.rb +++ b/spec/integration/admin/api_tokens/edit_spec.rb @@ -21,11 +21,11 @@ end it "does not allow the token's value to be set" do - expect(page).to have_no_field 'Token' + expect(page).not_to have_field 'Token' end it "does not allow the token's total requests to be set" do - expect(page).to have_no_field 'Total requests' + expect(page).not_to have_field 'Total requests' end end diff --git a/spec/integration/admin/authorships/create_spec.rb b/spec/integration/admin/authorships/create_spec.rb index afcb3670c..84b91fde1 100644 --- a/spec/integration/admin/authorships/create_spec.rb +++ b/spec/integration/admin/authorships/create_spec.rb @@ -20,7 +20,7 @@ end end - describe 'submitting the form to create a new authorship', :js do + describe 'submitting the form to create a new authorship', js: true do before do within '#authorship_user_id_field' do find('.dropdown-toggle').click diff --git a/spec/integration/admin/duplicate_publication_groupings/create_spec.rb b/spec/integration/admin/duplicate_publication_groupings/create_spec.rb index b04c304e0..c286435c3 100644 --- a/spec/integration/admin/duplicate_publication_groupings/create_spec.rb +++ b/spec/integration/admin/duplicate_publication_groupings/create_spec.rb @@ -75,7 +75,7 @@ it 'groups all of the selected publications into the same group' do group = pub1.reload.duplicate_group || pub2.reload.duplicate_group - expect(group.publications).to contain_exactly(pub1, pub2, pub3, pub4, pub5) + expect(group.publications).to match_array [pub1, pub2, pub3, pub4, pub5] end it 'redirects back to the user details page' do diff --git a/spec/integration/admin/duplicate_publication_groups/show_spec.rb b/spec/integration/admin/duplicate_publication_groups/show_spec.rb index c0343e518..98692acb5 100644 --- a/spec/integration/admin/duplicate_publication_groups/show_spec.rb +++ b/spec/integration/admin/duplicate_publication_groups/show_spec.rb @@ -84,7 +84,7 @@ let(:pub1_group) { group } let(:pub2_group) { group } - describe 'the page content', :js do + describe 'the page content', js: true do before { visit rails_admin.show_path(model_name: :duplicate_publication_group, id: group.id) } it 'shows the correct data for the group' do @@ -148,7 +148,7 @@ expect(page).to have_content 'Select' expect(page).to have_content 'Merge Target' - expect(page).to have_no_content 'Delete' + expect(page).not_to have_content 'Delete' end it 'disables/enables buttons' do @@ -173,10 +173,10 @@ before { visit rails_admin.show_path(model_name: :duplicate_publication_group, id: group.id) } it 'shows the correct controls' do - expect(page).to have_no_content 'Select' - expect(page).to have_no_content 'Merge Target' - expect(page).to have_no_content 'Merge Selected' - expect(page).to have_no_content 'Ignore Selected' + expect(page).not_to have_content 'Select' + expect(page).not_to have_content 'Merge Target' + expect(page).not_to have_content 'Merge Selected' + expect(page).not_to have_content 'Ignore Selected' expect(page).to have_button 'Delete Group' end end @@ -187,10 +187,10 @@ before { visit rails_admin.show_path(model_name: :duplicate_publication_group, id: group.id) } it 'shows the correct controls' do - expect(page).to have_no_content 'Select' - expect(page).to have_no_content 'Merge Target' - expect(page).to have_no_content 'Merge Selected' - expect(page).to have_no_content 'Ignore Selected' + expect(page).not_to have_content 'Select' + expect(page).not_to have_content 'Merge Target' + expect(page).not_to have_content 'Merge Selected' + expect(page).not_to have_content 'Ignore Selected' expect(page).to have_button 'Delete Group' end end diff --git a/spec/integration/admin/external_publication_waivers/index_spec.rb b/spec/integration/admin/external_publication_waivers/index_spec.rb index f26ee7b06..8c406a45d 100644 --- a/spec/integration/admin/external_publication_waivers/index_spec.rb +++ b/spec/integration/admin/external_publication_waivers/index_spec.rb @@ -41,8 +41,8 @@ end it 'does not show waivers that have been linked to publications' do - expect(page).to have_no_content 'Felix Tester' - expect(page).to have_no_content 'Publication Three' + expect(page).not_to have_content 'Felix Tester' + expect(page).not_to have_content 'Publication Three' end end diff --git a/spec/integration/admin/external_publication_waivers/show_spec.rb b/spec/integration/admin/external_publication_waivers/show_spec.rb index 8c2147e1c..a8fe9f6f7 100644 --- a/spec/integration/admin/external_publication_waivers/show_spec.rb +++ b/spec/integration/admin/external_publication_waivers/show_spec.rb @@ -70,7 +70,7 @@ expect(page).to have_content 'Author Three' end - expect(page).to have_no_content 'Some Other Publication' + expect(page).not_to have_content 'Some Other Publication' end describe 'linking a publication to the waiver' do @@ -103,16 +103,16 @@ end it 'does not list matching publications' do - expect(page).to have_no_content '2011' - expect(page).to have_no_content 'Some Journal' - expect(page).to have_no_content 'Joe Testerson, Author One, Author Two' + expect(page).not_to have_content '2011' + expect(page).not_to have_content 'Some Journal' + expect(page).not_to have_content 'Joe Testerson, Author One, Author Two' - expect(page).to have_no_link 'Another publication' - expect(page).to have_no_content '1999' - expect(page).to have_no_content 'Another Journal' - expect(page).to have_no_content 'Author Three' + expect(page).not_to have_link 'Another publication' + expect(page).not_to have_content '1999' + expect(page).not_to have_content 'Another Journal' + expect(page).not_to have_content 'Author Three' - expect(page).to have_no_content 'Some Other Publication' + expect(page).not_to have_content 'Some Other Publication' end it 'shows a link to the internal waiver' do diff --git a/spec/integration/admin/oa_notification_settings/edit_spec.rb b/spec/integration/admin/oa_notification_settings/edit_spec.rb index bf3207eb6..4fcd46a03 100644 --- a/spec/integration/admin/oa_notification_settings/edit_spec.rb +++ b/spec/integration/admin/oa_notification_settings/edit_spec.rb @@ -19,7 +19,7 @@ end it 'does not allow the singleton_guard value to be set' do - expect(page).to have_no_field 'Singleton guard' + expect(page).not_to have_field 'Singleton guard' end end diff --git a/spec/integration/admin/oa_notification_settings/show_spec.rb b/spec/integration/admin/oa_notification_settings/show_spec.rb index ccdf20dd8..41b6d401a 100644 --- a/spec/integration/admin/oa_notification_settings/show_spec.rb +++ b/spec/integration/admin/oa_notification_settings/show_spec.rb @@ -17,7 +17,7 @@ expect(page).to have_content 'Details for OA notification setting' expect(page).to have_content settings.email_cap expect(page).to have_css 'span.fa-check', count: 1 - expect(page).to have_no_content 'Singleton guard' + expect(page).not_to have_content 'Singleton guard' end end diff --git a/spec/integration/admin/open_access_locations/create_spec.rb b/spec/integration/admin/open_access_locations/create_spec.rb index 85833da89..d392b59cf 100644 --- a/spec/integration/admin/open_access_locations/create_spec.rb +++ b/spec/integration/admin/open_access_locations/create_spec.rb @@ -31,7 +31,7 @@ expect(page).to have_field 'Source' end - it 'shows the correct options for the Source field', :js do + it 'shows the correct options for the Source field', js: true do find('.dropdown-toggle').click within '#ui-id-1' do expect(page).to have_content Source.new(Source::USER).display diff --git a/spec/integration/admin/open_access_locations/update_spec.rb b/spec/integration/admin/open_access_locations/update_spec.rb index 3f57e4ff6..e2e1b010e 100644 --- a/spec/integration/admin/open_access_locations/update_spec.rb +++ b/spec/integration/admin/open_access_locations/update_spec.rb @@ -20,11 +20,11 @@ expect(page).to have_field 'Source' end - it 'shows the correct options for the Source field', :js do + it 'shows the correct options for the Source field', js: true do find('.dropdown-toggle').click within '#ui-id-1' do expect(page).to have_content Source.new(Source::OPEN_ACCESS_BUTTON).display - expect(page).to have_no_content Source.new(Source::USER).display + expect(page).not_to have_content Source.new(Source::USER).display end end end diff --git a/spec/integration/admin/publication_merges/create_spec.rb b/spec/integration/admin/publication_merges/create_spec.rb index 4d24b9a60..e30b2d7e0 100644 --- a/spec/integration/admin/publication_merges/create_spec.rb +++ b/spec/integration/admin/publication_merges/create_spec.rb @@ -67,12 +67,12 @@ end it "doesn't change the group" do - expect(group.reload.publications).to contain_exactly(pub1, pub2, pub3) + expect(group.reload.publications).to match_array [pub1, pub2, pub3] end it "doesn't change the publications" do expect(pub1.reload.imports).to eq [pub1_import1] - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] expect(pub3.reload.imports).to eq [pub3_import1] end end @@ -93,12 +93,12 @@ end it "doesn't change the group" do - expect(group.reload.publications).to contain_exactly(pub1, pub2, pub3) + expect(group.reload.publications).to match_array [pub1, pub2, pub3] end it "doesn't change the publications" do expect(pub1.reload.imports).to eq [pub1_import1] - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] expect(pub3.reload.imports).to eq [pub3_import1] end end @@ -119,12 +119,12 @@ end it "doesn't change the group" do - expect(group.reload.publications).to contain_exactly(pub1, pub2, pub3) + expect(group.reload.publications).to match_array [pub1, pub2, pub3] end it "doesn't change the publications" do expect(pub1.reload.imports).to eq [pub1_import1] - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] expect(pub3.reload.imports).to eq [pub3_import1] end end @@ -142,12 +142,12 @@ end it "doesn't change the group" do - expect(group.reload.publications).to contain_exactly(pub1, pub2, pub3) + expect(group.reload.publications).to match_array [pub1, pub2, pub3] end it "doesn't change the publications" do expect(pub1.reload.imports).to eq [pub1_import1] - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] expect(pub3.reload.imports).to eq [pub3_import1] end end @@ -173,7 +173,7 @@ end it "reassigns the merged publication's imports" do - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2) + expect(pub1.reload.imports).to match_array [pub1_import1, pub2_import1, pub2_import2] end it "doesn't change the unselected publication" do @@ -181,12 +181,12 @@ end it 'leaves the group containing the correct publications' do - expect(group.reload.publications).to contain_exactly(pub1, pub3) + expect(group.reload.publications).to match_array [pub1, pub3] end end context "choosing one publication as the merge target and selecting another - publication that's in the same non-duplicate group to merge", :js do + publication that's in the same non-duplicate group to merge", js: true do let(:ndpg) { create(:non_duplicate_publication_group) } before do @@ -224,8 +224,8 @@ end it "does not reassign the merged publication's imports" do - expect(pub1.reload.imports).to contain_exactly(pub1_import1) - expect(pub2.reload.imports).to contain_exactly(pub2_import1, pub2_import2) + expect(pub1.reload.imports).to match_array [pub1_import1] + expect(pub2.reload.imports).to match_array [pub2_import1, pub2_import2] end it "doesn't change the unselected publication" do @@ -233,7 +233,7 @@ end it "doesn't change the contents of the duplicate group" do - expect(group.reload.publications).to contain_exactly(pub1, pub2, pub3) + expect(group.reload.publications).to match_array [pub1, pub2, pub3] end end @@ -257,7 +257,7 @@ it "reassigns the merged publication's imports" do sleep 0.5 - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2) + expect(pub1.reload.imports).to match_array [pub1_import1, pub2_import1, pub2_import2] end it "doesn't change the unselected publication" do @@ -267,7 +267,7 @@ it 'leaves the group containing the correct publications' do sleep 0.5 - expect(group.reload.publications).to contain_exactly(pub1, pub3) + expect(group.reload.publications).to match_array [pub1, pub3] end it 'deletes the non-duplicate group' do @@ -277,7 +277,7 @@ end end - context 'choosing one publication as the merge target and selecting two other publications to merge who are in a non-duplicate group', :js do + context 'choosing one publication as the merge target and selecting two other publications to merge who are in a non-duplicate group', js: true do let(:ndpg) { create(:non_duplicate_publication_group) } before do @@ -318,12 +318,12 @@ it "reassigns the merged publication's imports" do sleep 0.5 - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2, pub3_import1) + expect(pub1.reload.imports).to match_array [pub1_import1, pub2_import1, pub2_import2, pub3_import1] end it 'leaves the group containing the correct publications' do sleep 0.5 - expect(group.reload.publications).to contain_exactly(pub1) + expect(group.reload.publications).to match_array [pub1] end it 'deletes the non-duplicate group' do @@ -355,7 +355,7 @@ end it "reassigns the merged publication's imports" do - expect(pub1.reload.imports).to contain_exactly(pub1_import1, pub2_import1, pub2_import2) + expect(pub1.reload.imports).to match_array [pub1_import1, pub2_import1, pub2_import2] end it "doesn't change the unselected publication" do @@ -363,7 +363,7 @@ end it 'leaves the group containing the correct publications' do - expect(group.reload.publications).to contain_exactly(pub1, pub3) + expect(group.reload.publications).to match_array [pub1, pub3] end end @@ -397,7 +397,7 @@ it 'creates a new non-duplicate group with the selected publications' do g = NonDuplicatePublicationGroup.last - expect(g.publications).to contain_exactly(pub1, pub2) + expect(g.publications).to match_array [pub1, pub2] end it 'redirects back to the group' do diff --git a/spec/integration/admin/publications/create_spec.rb b/spec/integration/admin/publications/create_spec.rb index b2d57c42e..3f6a5c971 100644 --- a/spec/integration/admin/publications/create_spec.rb +++ b/spec/integration/admin/publications/create_spec.rb @@ -18,7 +18,7 @@ end it 'does not allow the total Scopus citations to be set' do - expect(page).to have_no_field 'Total scopus citations' + expect(page).not_to have_field 'Total scopus citations' end end diff --git a/spec/integration/admin/publications/index_publications_by_organization_spec.rb b/spec/integration/admin/publications/index_publications_by_organization_spec.rb index 5f083bf2d..42437476e 100644 --- a/spec/integration/admin/publications/index_publications_by_organization_spec.rb +++ b/spec/integration/admin/publications/index_publications_by_organization_spec.rb @@ -5,7 +5,7 @@ require 'integration/admin/shared_examples_for_admin_page' describe 'Admin list of publications by organization', type: :feature do - context 'when the current user is an admin', :js do + context 'when the current user is an admin', js: true do before { authenticate_admin_user } let(:user1) { create(:user) } @@ -68,9 +68,9 @@ expect(page).to have_content '2 publications' - expect(page).to have_no_content 'Pub Three' - expect(page).to have_no_content 'Pub Four' - expect(page).to have_no_content 'Pub Five' + expect(page).not_to have_content 'Pub Three' + expect(page).not_to have_content 'Pub Four' + expect(page).not_to have_content 'Pub Five' end end @@ -87,10 +87,10 @@ expect(page).to have_content '1 publication' - expect(page).to have_no_content 'Pub One' - expect(page).to have_no_content 'Pub Three' - expect(page).to have_no_content 'Pub Four' - expect(page).to have_no_content 'Pub Five' + expect(page).not_to have_content 'Pub One' + expect(page).not_to have_content 'Pub Three' + expect(page).not_to have_content 'Pub Four' + expect(page).not_to have_content 'Pub Five' end end @@ -109,14 +109,14 @@ (3..26).each do |i| expect(page).to have_content "Pub #{i}" end - expect(page).to have_no_content 'Pub One' + expect(page).not_to have_content 'Pub One' click_link '2' expect(page).to have_content '26 publications' - expect(page).to have_no_content 'Pub Two' + expect(page).not_to have_content 'Pub Two' (3..26).each do |i| - expect(page).to have_no_content "Pub #{i}" + expect(page).not_to have_content "Pub #{i}" end expect(page).to have_content 'Pub One' end diff --git a/spec/integration/admin/publications/update_spec.rb b/spec/integration/admin/publications/update_spec.rb index 6fa8f9da7..10a4d4af0 100644 --- a/spec/integration/admin/publications/update_spec.rb +++ b/spec/integration/admin/publications/update_spec.rb @@ -20,7 +20,7 @@ describe 'viewing the form' do it 'does not allow the total Scopus citations to be set' do - expect(page).to have_no_field 'Total scopus citations' + expect(page).not_to have_field 'Total scopus citations' end it 'shows a nested form for adding open access locations' do diff --git a/spec/integration/admin/users/create_spec.rb b/spec/integration/admin/users/create_spec.rb index 7490de4c7..d48d380ba 100644 --- a/spec/integration/admin/users/create_spec.rb +++ b/spec/integration/admin/users/create_spec.rb @@ -17,8 +17,8 @@ end it "does not allow the new user's H-Index to be set" do - expect(page).to have_no_field 'H-Index' - expect(page).to have_no_field 'Scopus h index' + expect(page).not_to have_field 'H-Index' + expect(page).not_to have_field 'Scopus h index' end end diff --git a/spec/integration/admin/users/update_spec.rb b/spec/integration/admin/users/update_spec.rb index 6636a225b..6e35dfac9 100644 --- a/spec/integration/admin/users/update_spec.rb +++ b/spec/integration/admin/users/update_spec.rb @@ -35,7 +35,7 @@ end it "does not allow the user's webaccess ID to be updated" do - expect(page).to have_no_field 'Penn State WebAccess ID' + expect(page).not_to have_field 'Penn State WebAccess ID' end it "shows the user record's managed organizations" do @@ -43,12 +43,12 @@ end it "does not allow the user's database timestamps to be manually updated" do - expect(page).to have_no_field 'Created at' - expect(page).to have_no_field 'Updated at' + expect(page).not_to have_field 'Created at' + expect(page).not_to have_field 'Updated at' end it "does not allow the user's manual update timestamp to be updated" do - expect(page).to have_no_field 'Updated by user at' + expect(page).not_to have_field 'Updated by user at' end end @@ -144,56 +144,56 @@ end it "does not allow the user's webaccess ID to be updated" do - expect(page).to have_no_field 'Penn State WebAccess ID' + expect(page).not_to have_field 'Penn State WebAccess ID' end it "does not allow the user's first name to be updated" do - expect(page).to have_no_field 'First name' + expect(page).not_to have_field 'First name' end it "does not allow the user's middle name to be updated" do - expect(page).to have_no_field 'Middle name' + expect(page).not_to have_field 'Middle name' end it "does not allow the user's last name to be updated" do - expect(page).to have_no_field 'Last name' + expect(page).not_to have_field 'Last name' end it "does not allow the user's admin flag to be updated" do - expect(page).to have_no_field 'Admin user?' + expect(page).not_to have_field 'Admin user?' end it "does not show the user's Pure ID" do - expect(page).to have_no_content 'pure-abc123' + expect(page).not_to have_content 'pure-abc123' end it "does not allow the user's Pure ID to be updated" do - expect(page).to have_no_field 'Pure ID' + expect(page).not_to have_field 'Pure ID' end it "does not show the user's Activity Insight ID" do - expect(page).to have_no_content 'ai-xyz789' + expect(page).not_to have_content 'ai-xyz789' end it "does not allow the user's Activity Insight ID to be updated" do - expect(page).to have_no_field 'Activity Insight ID' + expect(page).not_to have_field 'Activity Insight ID' end it "does not show the user's Penn State ID" do - expect(page).to have_no_content '987654321' + expect(page).not_to have_content '987654321' end it "does not allow the user's Penn State ID to be updated" do - expect(page).to have_no_field 'Penn State ID' + expect(page).not_to have_field 'Penn State ID' end it "does not show the user's H-Index" do - expect(page).to have_no_content '649' + expect(page).not_to have_content '649' end it "does not allow the user's H-Index to be updated" do - expect(page).to have_no_field 'H-Index' - expect(page).to have_no_field 'Scopus h index' + expect(page).not_to have_field 'H-Index' + expect(page).not_to have_field 'Scopus h index' end end diff --git a/spec/integration/api_docs/home_spec.rb b/spec/integration/api_docs/home_spec.rb index 6e24aee76..7af98abe7 100644 --- a/spec/integration/api_docs/home_spec.rb +++ b/spec/integration/api_docs/home_spec.rb @@ -3,7 +3,7 @@ require 'integration/integration_spec_helper' require 'support/webdrivers' -describe 'API documentation home page', :js, type: :feature do +describe 'API documentation home page', js: true, type: :feature do context 'when the user is logged in' do before do authenticate_user @@ -19,7 +19,7 @@ end it 'does not show a link to the Admin interface' do - expect(page).to have_no_link 'Admin' + expect(page).not_to have_link 'Admin' end end @@ -46,15 +46,15 @@ end it 'shows a link for publications API' do - expect(page).to have_css 'a.nostyle span', text: 'publication' + expect(page).to have_selector 'a.nostyle span', text: 'publication' end it 'shows a link for users API' do - expect(page).to have_css 'a.nostyle span', text: 'user' + expect(page).to have_selector 'a.nostyle span', text: 'user' end it 'shows a link for organizations API' do - expect(page).to have_css 'a.nostyle span', text: 'organization' + expect(page).to have_selector 'a.nostyle span', text: 'organization' end it 'shows a link to the home page' do @@ -62,7 +62,7 @@ end it 'does not show a link to the Admin interface' do - expect(page).to have_no_link 'Admin' + expect(page).not_to have_link 'Admin' end end end diff --git a/spec/integration/home_page_spec.rb b/spec/integration/home_page_spec.rb index 1c8947cb0..eefd55977 100644 --- a/spec/integration/home_page_spec.rb +++ b/spec/integration/home_page_spec.rb @@ -42,7 +42,7 @@ end it 'does not show a link to the admin interface' do - expect(page).to have_no_link 'Admin' + expect(page).not_to have_link 'Admin' end it 'shows a sign out link' do @@ -113,11 +113,11 @@ end it 'does not show a link to the admin interface' do - expect(page).to have_no_link 'Admin' + expect(page).not_to have_link 'Admin' end it 'does not show a sign out link' do - expect(page).to have_no_link 'Sign out' + expect(page).not_to have_link 'Sign out' end it_behaves_like 'a page with the public layout' diff --git a/spec/integration/organization_profiles/show_spec.rb b/spec/integration/organization_profiles/show_spec.rb index c0a7db140..9afe10911 100644 --- a/spec/integration/organization_profiles/show_spec.rb +++ b/spec/integration/organization_profiles/show_spec.rb @@ -36,7 +36,7 @@ User.first.publications.first.contributor_names.each do |name| expect(page).to have_content name.name end - expect(page).to have_no_content User.last.publications.first.title + expect(page).not_to have_content User.last.publications.first.title expect(page).to have_content 'Displaying publications 1 - 25 of 100 in total' click_link '3' expect(page).to have_content 'Displaying publications 51 - 75 of 100 in total' @@ -45,7 +45,7 @@ expect(page).to have_link 'Next ›' expect(page).to have_link 'Last »' click_link 'Last »' - expect(page).to have_no_content User.first.publications.first.title + expect(page).not_to have_content User.first.publications.first.title expect(page).to have_content User.last.publications.first.title end end diff --git a/spec/integration/profiles/claim_publication_spec.rb b/spec/integration/profiles/claim_publication_spec.rb index 6068a3edf..5669e58ae 100644 --- a/spec/integration/profiles/claim_publication_spec.rb +++ b/spec/integration/profiles/claim_publication_spec.rb @@ -45,7 +45,7 @@ expect(page).to have_content 'Another Researcher Metadata Database Test Publication' end - expect(page).to have_no_content pub3.title + expect(page).not_to have_content pub3.title end it 'shows the term used in the search' do @@ -59,7 +59,7 @@ before { do_title_search } it 'does not show the matching publications' do - expect(page).to have_no_content 'Researcher Metadata Database Test Publication' + expect(page).not_to have_content 'Researcher Metadata Database Test Publication' end it 'shows the term used in the search' do @@ -74,7 +74,7 @@ before { do_title_search } it 'does not show the matching publications' do - expect(page).to have_no_content 'Researcher Metadata Database Test Publication' + expect(page).not_to have_content 'Researcher Metadata Database Test Publication' end it 'shows the term used in the search' do @@ -94,7 +94,7 @@ end it 'does not show the matching publications' do - expect(page).to have_no_content 'Researcher Metadata Database Test Publication' + expect(page).not_to have_content 'Researcher Metadata Database Test Publication' end it 'shows the term used in the search' do @@ -116,8 +116,8 @@ expect(page).to have_content 'Researcher Metadata Database Test Publication' end - expect(page).to have_no_content 'Another Researcher Metadata Database Test Publication' - expect(page).to have_no_content pub3.title + expect(page).not_to have_content 'Another Researcher Metadata Database Test Publication' + expect(page).not_to have_content pub3.title end it 'shows the term used in the search' do @@ -132,9 +132,9 @@ end it 'does not show any publications' do - expect(page).to have_no_content 'Test Publication' - expect(page).to have_no_content pub3.title - expect(page).to have_no_content 'Matching Publications' + expect(page).not_to have_content 'Test Publication' + expect(page).not_to have_content pub3.title + expect(page).not_to have_content 'Matching Publications' end it 'shows a helpful message' do @@ -204,12 +204,12 @@ it "does not show a button to add the claimed publication to the user's ORCiD record" do within "#authorship_row_#{new_authorship.id}" do - expect(page).to have_no_css '.orcid-button' + expect(page).not_to have_css '.orcid-button' end end it 'does not show a link to edit open access information for the claimed publication' do - expect(page).to have_no_link 'Another Researcher Metadata Database Test Publication' + expect(page).not_to have_link 'Another Researcher Metadata Database Test Publication' end end end diff --git a/spec/integration/profiles/deputy_assignments/confirm_spec.rb b/spec/integration/profiles/deputy_assignments/confirm_spec.rb index 666ca39fb..61a1c7110 100644 --- a/spec/integration/profiles/deputy_assignments/confirm_spec.rb +++ b/spec/integration/profiles/deputy_assignments/confirm_spec.rb @@ -8,7 +8,7 @@ before { visit deputy_assignments_path } it 'is not allowed' do - expect(page).to have_no_current_path(deputy_assignments_path) + expect(page).not_to have_current_path(deputy_assignments_path) end end @@ -31,7 +31,7 @@ expect(deputy_assignment.reload).to be_confirmed within "##{dom_id(deputy_assignment)}" do - expect(page).to have_no_button(accept_button) + expect(page).not_to have_button(accept_button) end end diff --git a/spec/integration/profiles/deputy_assignments/create_spec.rb b/spec/integration/profiles/deputy_assignments/create_spec.rb index b9e11cbef..34c7c878b 100644 --- a/spec/integration/profiles/deputy_assignments/create_spec.rb +++ b/spec/integration/profiles/deputy_assignments/create_spec.rb @@ -7,7 +7,7 @@ before { visit deputy_assignments_path } it 'is not allowed' do - expect(page).to have_no_current_path(deputy_assignments_path) + expect(page).not_to have_current_path(deputy_assignments_path) end end @@ -55,7 +55,7 @@ click_button I18n.t!('helpers.submit.new_deputy_assignment_form.create') expect(find_field('new_deputy_assignment_form_deputy_webaccess_id').value).to eq 'agw13' - expect(page).to have_css('.invalid-feedback') + expect(page).to have_selector('.invalid-feedback') end end end diff --git a/spec/integration/profiles/deputy_assignments/destroy_spec.rb b/spec/integration/profiles/deputy_assignments/destroy_spec.rb index 1e0e859b3..4939be229 100644 --- a/spec/integration/profiles/deputy_assignments/destroy_spec.rb +++ b/spec/integration/profiles/deputy_assignments/destroy_spec.rb @@ -8,7 +8,7 @@ before { visit deputy_assignments_path } it 'is not allowed' do - expect(page).to have_no_current_path(deputy_assignments_path) + expect(page).not_to have_current_path(deputy_assignments_path) end end @@ -30,7 +30,7 @@ it 'allows the deputy assignment to be deactivated' do expect(deputy_assignment.reload).not_to be_active - expect(page).to have_no_css "##{dom_id(deputy_assignment)}" + expect(page).not_to have_selector "##{dom_id(deputy_assignment)}" end it 'emails the other user' do @@ -47,7 +47,7 @@ it 'allows the deputy assignment to be destroyed' do expect { deputy_assignment.reload }.to raise_error(ActiveRecord::RecordNotFound) - expect(page).to have_no_css "##{dom_id(deputy_assignment)}" + expect(page).not_to have_selector "##{dom_id(deputy_assignment)}" end it 'emails the other user' do diff --git a/spec/integration/profiles/deputy_assignments/index_spec.rb b/spec/integration/profiles/deputy_assignments/index_spec.rb index 2e56a6ce1..9782bcd0c 100644 --- a/spec/integration/profiles/deputy_assignments/index_spec.rb +++ b/spec/integration/profiles/deputy_assignments/index_spec.rb @@ -8,7 +8,7 @@ before { visit deputy_assignments_path } it 'is not allowed' do - expect(page).to have_no_current_path(deputy_assignments_path) + expect(page).not_to have_current_path(deputy_assignments_path) end end @@ -45,10 +45,10 @@ expect(page).to have_content deputy_unconfirmed.name expect(page).to have_link primary_confirmed.name expect(page).to have_content primary_unconfirmed.name - expect(page).to have_no_link primary_unconfirmed.name + expect(page).not_to have_link primary_unconfirmed.name - expect(page).to have_no_content deputy_inactive - expect(page).to have_no_content primary_inactive + expect(page).not_to have_content deputy_inactive + expect(page).not_to have_content primary_inactive end describe "when link is clicked in confirmed primary's name" do diff --git a/spec/integration/profiles/edit_spec.rb b/spec/integration/profiles/edit_spec.rb index ac3df5d15..f95fc10ba 100644 --- a/spec/integration/profiles/edit_spec.rb +++ b/spec/integration/profiles/edit_spec.rb @@ -48,7 +48,7 @@ let(:orcid_id) { nil } let(:orcid_token) { nil } - describe 'the manage profile link', :js, type: :feature do + describe 'the manage profile link', js: true, type: :feature do describe 'visiting the profile page for a given user' do context 'when not logged in' do before do @@ -59,7 +59,7 @@ end it 'does not display a link to manage the profile' do - expect(page).to have_no_link 'Manage my profile' + expect(page).not_to have_link 'Manage my profile' end end @@ -84,7 +84,7 @@ end it 'does not display a link to manage the profile' do - expect(page).to have_no_link 'Manage my profile' + expect(page).not_to have_link 'Manage my profile' end end @@ -149,11 +149,11 @@ context 'when the user has no ORCID ID' do it 'does not display an ORCID ID link' do - expect(page).to have_no_link 'ORCID iD' + expect(page).not_to have_link 'ORCID iD' end it 'does not display an ORCID call to action' do - expect(page).to have_no_link 'Link my ORCID ID' + expect(page).not_to have_link 'Link my ORCID ID' end end @@ -165,7 +165,7 @@ end it 'does not display an ORCID call to action' do - expect(page).to have_no_link 'Link my ORCID ID' + expect(page).not_to have_link 'Link my ORCID ID' end end end @@ -178,7 +178,7 @@ context 'when the user has no ORCID ID' do it 'does not display an ORCID ID link' do - expect(page).to have_no_link 'ORCID iD' + expect(page).not_to have_link 'ORCID iD' end it 'does display an ORCID call to action' do @@ -194,7 +194,7 @@ end it 'does not display an ORCID call to action' do - expect(page).to have_no_link 'Link my ORCID ID' + expect(page).not_to have_link 'Link my ORCID ID' end end end @@ -211,11 +211,11 @@ context 'when the user has no ORCID ID' do it 'does not display an ORCID ID link' do - expect(page).to have_no_link 'ORCID iD' + expect(page).not_to have_link 'ORCID iD' end it 'does not display an ORCID call to action' do - expect(page).to have_no_link 'Link my ORCID ID' + expect(page).not_to have_link 'Link my ORCID ID' end end @@ -227,7 +227,7 @@ end it 'does not display an ORCID call to action' do - expect(page).to have_no_link 'Link my ORCID ID' + expect(page).not_to have_link 'Link my ORCID ID' end end end @@ -305,19 +305,19 @@ it "shows descriptions of the user's visible publications" do expect(page).to have_content "Bob's Publication, The Journal, 2007" expect(page).to have_link "Bob's Publication", href: edit_open_access_publication_path(pub_1) - expect(page).to have_no_content "Bob's Other Publication" + expect(page).not_to have_content "Bob's Other Publication" expect(page).to have_content "Bob's Open Access Publication" - expect(page).to have_no_link "Bob's Open Access Publication" + expect(page).not_to have_link "Bob's Open Access Publication" expect(page).to have_content "Bob's Other Open Access Publication" - expect(page).to have_no_link "Bob's Other Open Access Publication" + expect(page).not_to have_link "Bob's Other Open Access Publication" expect(page).to have_content "Bob's Non-Open Access Publication" - expect(page).to have_no_link "Bob's Non-Open Access Publication" + expect(page).not_to have_link "Bob's Non-Open Access Publication" expect(page).to have_content "Bob's Pending Scholarsphere Publication" - expect(page).to have_no_link "Bob's Pending Scholarsphere Publication" + expect(page).not_to have_link "Bob's Pending Scholarsphere Publication" expect(page).to have_content "Bob's In Press Publication" - expect(page).to have_no_link "Bob's In Press Publication" + expect(page).not_to have_link "Bob's In Press Publication" expect(page).to have_content "Bob's Uploaded to Activity Insight" - expect(page).to have_no_link "Bob's Uploaded to Activity Insight" + expect(page).not_to have_link "Bob's Uploaded to Activity Insight" end it "shows an icon to indicate when we don't have open access information for a publication" do @@ -365,7 +365,7 @@ end it 'does not show the empty list message' do - expect(page).to have_no_content 'There are currently no publications to show for your profile.' + expect(page).not_to have_content 'There are currently no publications to show for your profile.' end end @@ -377,7 +377,7 @@ context 'when the user has no external publication waivers' do it 'does not show the waiver list' do - expect(page).to have_no_content 'Open Access Waivers' + expect(page).not_to have_content 'Open Access Waivers' end end @@ -413,7 +413,7 @@ before { visit edit_profile_publications_path } it 'does not allow the user to visit the page' do - expect(page).to have_no_current_path edit_profile_publications_path, ignore_query: true + expect(page).not_to have_current_path edit_profile_publications_path, ignore_query: true end end end @@ -457,7 +457,7 @@ it "shows descriptions of the user's visible other publications" do expect(page).to have_content 'Chapter' expect(page).to have_content 'Title 1, 2007' - expect(page).to have_no_content 'Title 2, 2008' + expect(page).not_to have_content 'Title 2, 2008' expect(page).to have_content 'Letter' expect(page).to have_content 'Title 1, Journal 1, 2008' end @@ -468,7 +468,7 @@ before { visit edit_profile_other_publications_path } it 'does not allow the user to visit the page' do - expect(page).to have_no_current_path edit_profile_other_publications_path, ignore_query: true + expect(page).not_to have_current_path edit_profile_other_publications_path, ignore_query: true end end end @@ -488,7 +488,7 @@ it "shows descriptions of the user's visible presentations" do expect(page).to have_content "Bob's Presentation - Penn State - University Park, PA" - expect(page).to have_no_content "Bob's Other Presentation - -" + expect(page).not_to have_content "Bob's Other Presentation - -" end end @@ -496,7 +496,7 @@ before { visit edit_profile_presentations_path } it 'does not allow the user to visit the page' do - expect(page).to have_no_current_path edit_profile_presentations_path, ignore_query: true + expect(page).not_to have_current_path edit_profile_presentations_path, ignore_query: true end end end @@ -516,7 +516,7 @@ it "shows descriptions of the user's visible performances" do expect(page).to have_content "Bob's Performance - University Park, PA - 2000-01-01" - expect(page).to have_no_content "Bob's Other Performance - -" + expect(page).not_to have_content "Bob's Other Performance - -" end end @@ -524,7 +524,7 @@ before { visit edit_profile_performances_path } it 'does not allow the user to visit the page' do - expect(page).to have_no_current_path edit_profile_performances_path, ignore_query: true + expect(page).not_to have_current_path edit_profile_performances_path, ignore_query: true end end end @@ -548,7 +548,7 @@ context "when the user doesn't belong to an organization" do it 'does not show organization information' do - expect(page).to have_no_content 'Organizations' + expect(page).not_to have_content 'Organizations' end end @@ -600,11 +600,11 @@ context 'when the user does not have an ORCID iD' do it "does not show a button to connect to the user's ORCID record" do - expect(page).to have_no_button connect_orcid_button_text + expect(page).not_to have_button connect_orcid_button_text end it 'does not show a button to add an employment to their ORCID record' do - expect(page).to have_no_button employment_button_text + expect(page).not_to have_button employment_button_text end end @@ -615,7 +615,7 @@ let(:orcid_token) { 'abc123' } it "does not show a button to connect to the user's ORCID record" do - expect(page).to have_no_button connect_orcid_button_text + expect(page).not_to have_button connect_orcid_button_text end it "shows the user's ORCID iD" do @@ -628,7 +628,7 @@ it 'does not show a button to add that employment to their ORCID record' do within "#organization_membership_#{mem1.id}" do - expect(page).to have_no_button employment_button_text + expect(page).not_to have_button employment_button_text end end @@ -659,7 +659,7 @@ end it 'does not show a button to add the employment to their ORCID record' do - expect(page).to have_no_button employment_button_text + expect(page).not_to have_button employment_button_text end end end @@ -670,7 +670,7 @@ before { visit profile_bio_path } it 'does not allow the user to visit the page' do - expect(page).to have_no_current_path profile_bio_path, ignore_query: true + expect(page).not_to have_current_path profile_bio_path, ignore_query: true end end end diff --git a/spec/integration/profiles/external_publication_waivers/new_spec.rb b/spec/integration/profiles/external_publication_waivers/new_spec.rb index 361645f23..212d38818 100644 --- a/spec/integration/profiles/external_publication_waivers/new_spec.rb +++ b/spec/integration/profiles/external_publication_waivers/new_spec.rb @@ -10,7 +10,7 @@ before { visit new_external_publication_waiver_path } it 'does not allow them to visit the page' do - expect(page).to have_no_current_path new_external_publication_waiver_path, ignore_query: true + expect(page).not_to have_current_path new_external_publication_waiver_path, ignore_query: true end end diff --git a/spec/integration/profiles/internal_publication_waivers/new_spec.rb b/spec/integration/profiles/internal_publication_waivers/new_spec.rb index 9c3bd6dd4..232cbe7f3 100644 --- a/spec/integration/profiles/internal_publication_waivers/new_spec.rb +++ b/spec/integration/profiles/internal_publication_waivers/new_spec.rb @@ -31,7 +31,7 @@ before { visit new_internal_publication_waiver_path(pub) } it 'does not allow them to visit the page' do - expect(page).to have_no_current_path new_internal_publication_waiver_path(pub), ignore_query: true + expect(page).not_to have_current_path new_internal_publication_waiver_path(pub), ignore_query: true end end diff --git a/spec/integration/profiles/open_access_publications/edit_spec.rb b/spec/integration/profiles/open_access_publications/edit_spec.rb index 9d94c54c3..c0c15cfa2 100644 --- a/spec/integration/profiles/open_access_publications/edit_spec.rb +++ b/spec/integration/profiles/open_access_publications/edit_spec.rb @@ -71,7 +71,7 @@ before { visit edit_open_access_publication_path(pub) } it 'does not allow them to visit the page' do - expect(page).to have_no_current_path edit_open_access_publication_path(pub), ignore_query: true + expect(page).not_to have_current_path edit_open_access_publication_path(pub), ignore_query: true end end @@ -150,7 +150,7 @@ end end - describe 'file upload and version check', :js do + describe 'file upload and version check', js: true do include ActiveJob::TestHelper let(:file_store) { ActiveSupport::Cache.lookup_store(:file_store, file_caching_path) } let(:cache) { Rails.cache } @@ -244,7 +244,7 @@ end # Tagged as glacial. This test takes over a minute to complete. - context 'when timeout is reached', :glacial do + context 'when timeout is reached', glacial: true do let(:exif_version) { nil } let(:test_file) { "#{Rails.root}/spec/fixtures/pdf_check_unknown_version.pdf" } let(:cache_file) { { original_filename: 'pdf_check_unknown_version.pdf', @@ -262,7 +262,7 @@ expect(page).to have_content('Attempting to determine file version, please wait...') sleep 10 expect(page).to have_content('Attempting to determine file version, please wait...') - expect(page).to have_no_content('Attempting to determine file version, please wait...', wait: 15) + expect(page).not_to have_content('Attempting to determine file version, please wait...', wait: 15) expect(page).to have_content('We were not able to determine the version of your uploaded publication article.', wait: 15) expect(find_field('scholarsphere_work_deposit_file_version_acceptedversion').checked?).to be false expect(find_field('scholarsphere_work_deposit_file_version_publishedversion').checked?).to be false @@ -270,7 +270,7 @@ end end - describe 'completing the workflow', :js do + describe 'completing the workflow', js: true do include ActiveJob::TestHelper let(:file_store) { ActiveSupport::Cache.lookup_store(:file_store, file_caching_path) } let(:cache) { Rails.cache } @@ -357,7 +357,7 @@ ScholarsphereWorkDeposit.find_by(title: 'Test Publication') expect(Scholarsphere::Client::Ingest).to have_received(:new) do |args| expect(args).to be_a Hash - expect(args.keys).to contain_exactly(:metadata, :files, :depositor) + expect(args.keys).to match_array [:metadata, :files, :depositor] expect(args[:metadata]).to eq({ creators: [{ display_name: 'Bob Author', psu_id: 'xyz123' }], description: 'An abstract of the test publication', @@ -403,7 +403,7 @@ within "#authorship_row_#{auth.id}" do expect(page).to have_css '.fa-unlock-alt' expect(page).to have_content pub.title - expect(page).to have_no_link pub.title + expect(page).not_to have_link pub.title end end end diff --git a/spec/integration/profiles/show_spec.rb b/spec/integration/profiles/show_spec.rb index 148718196..04eabd966 100644 --- a/spec/integration/profiles/show_spec.rb +++ b/spec/integration/profiles/show_spec.rb @@ -2,7 +2,7 @@ require 'integration/integration_spec_helper' -describe 'Profile page', :js, type: :feature do +describe 'Profile page', js: true, type: :feature do let!(:user) { create(:user, :with_psu_identity, webaccess_id: 'abc123', @@ -287,10 +287,10 @@ click_link 'Other Works' within '#other-publications' do expect(page).to have_content 'Books' - expect(page).to have_no_content 'Letters' + expect(page).not_to have_content 'Letters' expect(page).to have_content 'Third Publication, Journal 3, 2013' - expect(page).to have_no_content 'Conference Proceedings' - expect(page).to have_no_content 'Fourth Publication, Journal 4, 2010' + expect(page).not_to have_content 'Conference Proceedings' + expect(page).not_to have_content 'Fourth Publication, Journal 4, 2010' end end end @@ -302,7 +302,7 @@ end it 'does NOT show the email' do - expect(page).to have_no_link 'abc123@psu.edu', href: 'mailto:abc123@psu.edu' + expect(page).not_to have_link 'abc123@psu.edu', href: 'mailto:abc123@psu.edu' end end end diff --git a/spec/requests/api/v1/organizations_spec.rb b/spec/requests/api/v1/organizations_spec.rb index 2cb56d2e6..1f95a62bf 100644 --- a/spec/requests/api/v1/organizations_spec.rb +++ b/spec/requests/api/v1/organizations_spec.rb @@ -162,7 +162,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end @@ -178,7 +178,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end diff --git a/spec/requests/api/v1/publications_spec.rb b/spec/requests/api/v1/publications_spec.rb index cda71bd70..ded124925 100644 --- a/spec/requests/api/v1/publications_spec.rb +++ b/spec/requests/api/v1/publications_spec.rb @@ -90,7 +90,7 @@ def query_pubs query_pubs end - it 'returns a unique list of publications matching the specified Activity Insight ID', :skip_before do + it 'returns a unique list of publications matching the specified Activity Insight ID', skip_before: true do expect(json_response[:data].size).to eq(1) expect(json_response[:data].first[:attributes][:activity_insight_ids].size).to eq(1) expect(json_response[:data].first[:attributes][:activity_insight_ids].first).to eq('123') @@ -138,7 +138,7 @@ def query_pubs query_pubs end - it 'returns a unique list of publications matching the specified DOI', :skip_before do + it 'returns a unique list of publications matching the specified DOI', skip_before: true do expect(json_response[:data].size).to eq(1) expect(json_response[:data].first[:attributes][:doi]).to eq('https://doi.org/10.26207/46a7-9981') end diff --git a/spec/requests/api/v1/users_spec.rb b/spec/requests/api/v1/users_spec.rb index 87b66adb7..ed274029a 100644 --- a/spec/requests/api/v1/users_spec.rb +++ b/spec/requests/api/v1/users_spec.rb @@ -104,7 +104,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -192,7 +192,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -274,7 +274,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -350,7 +350,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -408,7 +408,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -526,7 +526,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -665,7 +665,7 @@ end it 'returns 404' do - expect(response).to have_http_status :not_found + expect(response.code).to eq '404' end end end @@ -693,7 +693,11 @@ before do person = instance_spy(PsuIdentity::SearchService::Person) allow_any_instance_of(PsuIdentity::SearchService::Client).to receive(:userid).with(webaccess_id).and_return(person) # rubocop:todo RSpec/AnyInstance - allow(person).to receive_messages(as_json: { 'data' => {} }, preferred_given_name: 'Bob', preferred_middle_name: '', middle_name: '', preferred_family_name: 'Testerson') + allow(person).to receive(:as_json).and_return({ 'data' => {} }) + allow(person).to receive(:preferred_given_name).and_return('Bob') + allow(person).to receive(:preferred_middle_name).and_return('') + allow(person).to receive(:middle_name).and_return('') + allow(person).to receive(:preferred_family_name).and_return('Testerson') get "/v1/users/#{webaccess_id}/profile", headers: headers end diff --git a/spec/requests/webhooks_controller_spec.rb b/spec/requests/webhooks_spec.rb similarity index 100% rename from spec/requests/webhooks_controller_spec.rb rename to spec/requests/webhooks_spec.rb diff --git a/spec/support/authentication.rb b/spec/support/authentication.rb index af7067f36..cd24e6ffc 100644 --- a/spec/support/authentication.rb +++ b/spec/support/authentication.rb @@ -7,7 +7,10 @@ module StubbedAuthenticationHelper def sign_in_as(user) person = instance_double(PsuIdentity::SearchService::Person) allow_any_instance_of(PsuIdentity::SearchService::Client).to receive(:userid).with(user.webaccess_id).and_return(person) # rubocop:todo RSpec/AnyInstance - allow(person).to receive_messages(preferred_given_name: 'Test', preferred_middle_name: 'A', preferred_family_name: 'Person', as_json: { 'data' => {} }) + allow(person).to receive(:preferred_given_name).and_return('Test') + allow(person).to receive(:preferred_middle_name).and_return('A') + allow(person).to receive(:preferred_family_name).and_return('Person') + allow(person).to receive(:as_json).and_return({ 'data' => {} }) OmniAuth.config.test_mode = true OmniAuth.config.mock_auth[:azure_oauth] = OmniAuth::AuthHash.new( diff --git a/spec/support/matchers/have_attr_accessor.rb b/spec/support/matchers/have_attr_accessor.rb index ca0231cae..d0dc99332 100644 --- a/spec/support/matchers/have_attr_accessor.rb +++ b/spec/support/matchers/have_attr_accessor.rb @@ -3,7 +3,7 @@ RSpec::Matchers.define :have_attr_accessor do |attribute_name| match do |model| model.respond_to?(attribute_name) && - model.respond_to?(:"#{attribute_name}=") + model.respond_to?("#{attribute_name}=") end failure_message do |model| diff --git a/spec/unit/models/authorship_merge_policy_spec.rb b/spec/unit/models/authorship_merge_policy_spec.rb index e90bad3e8..80ce479eb 100644 --- a/spec/unit/models/authorship_merge_policy_spec.rb +++ b/spec/unit/models/authorship_merge_policy_spec.rb @@ -327,7 +327,7 @@ let(:auth3) { double 'authorship 3', scholarsphere_work_deposits: [] } it 'returns all of the associated ScholarSphere work deposits from each authorship' do - expect(amp.scholarsphere_deposits_to_keep).to contain_exactly(dep1, dep2, dep3) + expect(amp.scholarsphere_deposits_to_keep).to match_array [dep1, dep2, dep3] end end end diff --git a/vendor/cache/erubi-1.13.0.gem b/vendor/cache/erubi-1.13.0.gem new file mode 100644 index 000000000..193fdf64c Binary files /dev/null and b/vendor/cache/erubi-1.13.0.gem differ diff --git a/vendor/cache/erubi-1.13.1.gem b/vendor/cache/erubi-1.13.1.gem deleted file mode 100644 index 2b1dd0305..000000000 Binary files a/vendor/cache/erubi-1.13.1.gem and /dev/null differ diff --git a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/gem_make.out b/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/gem_make.out deleted file mode 100644 index bcac034e5..000000000 --- a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/gem_make.out +++ /dev/null @@ -1,41 +0,0 @@ -current directory: /Users/ajk5603/projects/researcher-metadata/vendor/cache/puma-fba741b91780/ext/puma_http11 -/Users/ajk5603/.rbenv/versions/3.1.6/bin/ruby -I /Users/ajk5603/.rbenv/versions/3.1.6/lib/ruby/3.1.0 extconf.rb -checking for BIO_read() in -llibcrypto... no -checking for BIO_read() in -lcrypto... yes -checking for SSL_CTX_new() in -lssl... yes -checking for openssl/bio.h... yes - -──── Below are yes for 1.0.2 & later ──── -checking for DTLS_method() in openssl/ssl.h... yes -checking for SSL_CTX_set_session_cache_mode(NULL, 0) in openssl/ssl.h... yes - -──── Below are yes for 1.1.0 & later ──── -checking for TLS_server_method() in openssl/ssl.h... yes -checking for SSL_CTX_set_min_proto_version(NULL, 0) in openssl/ssl.h... yes - -──── Below is yes for 1.1.0 and later, but isn't documented until 3.0.0 ──── -checking for SSL_CTX_set_dh_auto(NULL, 0) in openssl/ssl.h... yes - -──── Below is yes for 1.1.1 & later ──── -checking for SSL_CTX_set_ciphersuites(NULL, "") in openssl/ssl.h... yes - -──── Below is yes for 3.0.0 & later ──── -checking for SSL_get1_peer_certificate() in openssl/ssl.h... yes - -checking for Random.bytes... yes -creating Makefile - -current directory: /Users/ajk5603/projects/researcher-metadata/vendor/cache/puma-fba741b91780/ext/puma_http11 -make DESTDIR\= sitearchdir\=./.gem.20241218-65016-9zy7gg sitelibdir\=./.gem.20241218-65016-9zy7gg clean - -current directory: /Users/ajk5603/projects/researcher-metadata/vendor/cache/puma-fba741b91780/ext/puma_http11 -make DESTDIR\= sitearchdir\=./.gem.20241218-65016-9zy7gg sitelibdir\=./.gem.20241218-65016-9zy7gg -compiling http11_parser.c -compiling mini_ssl.c -compiling puma_http11.c -linking shared-object puma/puma_http11.bundle -ld: warning: ignoring duplicate libraries: '-lruby.3.1' - -current directory: /Users/ajk5603/projects/researcher-metadata/vendor/cache/puma-fba741b91780/ext/puma_http11 -make DESTDIR\= sitearchdir\=./.gem.20241218-65016-9zy7gg sitelibdir\=./.gem.20241218-65016-9zy7gg install -/usr/bin/install -c -m 0755 puma_http11.bundle ./.gem.20241218-65016-9zy7gg/puma diff --git a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/mkmf.log b/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/mkmf.log deleted file mode 100644 index 704cee151..000000000 --- a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/mkmf.log +++ /dev/null @@ -1,353 +0,0 @@ -have_library: checking for BIO_read() in -llibcrypto... -------------------- no - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lruby.3.1 " -checked program was: -/* begin */ -1: #include "ruby.h" -2: -3: int main(int argc, char **argv) -4: { -5: return !!argv[argc]; -6: } -/* end */ - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lruby.3.1 -llibcrypto " -conftest.c:14:57: error: use of undeclared identifier 'BIO_read' - 14 | int t(void) { void ((*volatile p)()); p = (void ((*)()))BIO_read; return !p; } - | ^ -1 error generated. -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: /*top*/ - 4: extern int t(void); - 5: int main(int argc, char **argv) - 6: { - 7: if (argc > 1000000) { - 8: int (* volatile tp)(void)=(int (*)(void))&t; - 9: printf("%d", (*tp)()); -10: } -11: -12: return !!argv[argc]; -13: } -14: int t(void) { void ((*volatile p)()); p = (void ((*)()))BIO_read; return !p; } -/* end */ - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lruby.3.1 -llibcrypto " -ld: library 'libcrypto' not found -clang: error: linker command failed with exit code 1 (use -v to see invocation) -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: /*top*/ - 4: extern int t(void); - 5: int main(int argc, char **argv) - 6: { - 7: if (argc > 1000000) { - 8: int (* volatile tp)(void)=(int (*)(void))&t; - 9: printf("%d", (*tp)()); -10: } -11: -12: return !!argv[argc]; -13: } -14: extern void BIO_read(); -15: int t(void) { BIO_read(); return 0; } -/* end */ - --------------------- - -have_library: checking for BIO_read() in -lcrypto... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lruby.3.1 -lcrypto " -conftest.c:14:57: error: use of undeclared identifier 'BIO_read' - 14 | int t(void) { void ((*volatile p)()); p = (void ((*)()))BIO_read; return !p; } - | ^ -1 error generated. -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: /*top*/ - 4: extern int t(void); - 5: int main(int argc, char **argv) - 6: { - 7: if (argc > 1000000) { - 8: int (* volatile tp)(void)=(int (*)(void))&t; - 9: printf("%d", (*tp)()); -10: } -11: -12: return !!argv[argc]; -13: } -14: int t(void) { void ((*volatile p)()); p = (void ((*)()))BIO_read; return !p; } -/* end */ - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lruby.3.1 -lcrypto " -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: /*top*/ - 4: extern int t(void); - 5: int main(int argc, char **argv) - 6: { - 7: if (argc > 1000000) { - 8: int (* volatile tp)(void)=(int (*)(void))&t; - 9: printf("%d", (*tp)()); -10: } -11: -12: return !!argv[argc]; -13: } -14: extern void BIO_read(); -15: int t(void) { BIO_read(); return 0; } -/* end */ - --------------------- - -have_library: checking for SSL_CTX_new() in -lssl... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lcrypto -lruby.3.1 -lssl -lcrypto " -conftest.c:14:57: error: use of undeclared identifier 'SSL_CTX_new' - 14 | int t(void) { void ((*volatile p)()); p = (void ((*)()))SSL_CTX_new; return !p; } - | ^ -1 error generated. -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: /*top*/ - 4: extern int t(void); - 5: int main(int argc, char **argv) - 6: { - 7: if (argc > 1000000) { - 8: int (* volatile tp)(void)=(int (*)(void))&t; - 9: printf("%d", (*tp)()); -10: } -11: -12: return !!argv[argc]; -13: } -14: int t(void) { void ((*volatile p)()); p = (void ((*)()))SSL_CTX_new; return !p; } -/* end */ - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: /*top*/ - 4: extern int t(void); - 5: int main(int argc, char **argv) - 6: { - 7: if (argc > 1000000) { - 8: int (* volatile tp)(void)=(int (*)(void))&t; - 9: printf("%d", (*tp)()); -10: } -11: -12: return !!argv[argc]; -13: } -14: extern void SSL_CTX_new(); -15: int t(void) { SSL_CTX_new(); return 0; } -/* end */ - --------------------- - -have_header: checking for openssl/bio.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe -c conftest.c" -checked program was: -/* begin */ -1: #include "ruby.h" -2: -3: #include -/* end */ - --------------------- - -have_func: checking for DTLS_method() in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: int t(void) { void ((*volatile p)()); p = (void ((*)()))DTLS_method; return !p; } -/* end */ - --------------------- - -have_func: checking for SSL_CTX_set_session_cache_mode(NULL, 0) in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: -17: int t(void) { SSL_CTX_set_session_cache_mode(NULL, 0); return 0; } -/* end */ - --------------------- - -have_func: checking for TLS_server_method() in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: int t(void) { void ((*volatile p)()); p = (void ((*)()))TLS_server_method; return !p; } -/* end */ - --------------------- - -have_func: checking for SSL_CTX_set_min_proto_version(NULL, 0) in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: -17: int t(void) { SSL_CTX_set_min_proto_version(NULL, 0); return 0; } -/* end */ - --------------------- - -have_func: checking for SSL_CTX_set_dh_auto(NULL, 0) in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: -17: int t(void) { SSL_CTX_set_dh_auto(NULL, 0); return 0; } -/* end */ - --------------------- - -have_func: checking for SSL_CTX_set_ciphersuites(NULL, "") in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: -17: int t(void) { char s1[1024]; SSL_CTX_set_ciphersuites(NULL, s1); return 0; } -/* end */ - --------------------- - -have_func: checking for SSL_get1_peer_certificate() in openssl/ssl.h... -------------------- yes - -DYLD_FALLBACK_LIBRARY_PATH=.:/Users/ajk5603/.rbenv/versions/3.1.6/lib "clang -o conftest -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/arm64-darwin24 -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0/ruby/backward -I/Users/ajk5603/.rbenv/versions/3.1.6/include/ruby-3.1.0 -I. -I/opt/homebrew/opt/openssl@3/include -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE -D_DARWIN_UNLIMITED_SELECT -D_REENTRANT -fdeclspec -O3 -fno-fast-math -ggdb3 -Wall -Wextra -Wdeprecated-declarations -Wdivision-by-zero -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wshorten-64-to-32 -Wwrite-strings -Wold-style-definition -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wunused-variable -Wextra-tokens -Wundef -fno-common -pipe conftest.c -L. -L/Users/ajk5603/.rbenv/versions/3.1.6/lib -L/opt/homebrew/opt/openssl@3/lib -L. -fstack-protector-strong -lssl -lcrypto -lruby.3.1 -lssl -lcrypto " -ld: warning: ignoring duplicate libraries: '-lcrypto', '-lssl' -checked program was: -/* begin */ - 1: #include "ruby.h" - 2: - 3: #include - 4: - 5: /*top*/ - 6: extern int t(void); - 7: int main(int argc, char **argv) - 8: { - 9: if (argc > 1000000) { -10: int (* volatile tp)(void)=(int (*)(void))&t; -11: printf("%d", (*tp)()); -12: } -13: -14: return !!argv[argc]; -15: } -16: int t(void) { void ((*volatile p)()); p = (void ((*)()))SSL_get1_peer_certificate; return !p; } -/* end */ - --------------------- - diff --git a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/puma/puma_http11.bundle b/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/puma/puma_http11.bundle deleted file mode 100755 index 6301bdd88..000000000 Binary files a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/puma/puma_http11.bundle and /dev/null differ diff --git a/vendor/cache/ffi-1.16.3.gem b/vendor/cache/ffi-1.16.3.gem new file mode 100644 index 000000000..fbca94826 Binary files /dev/null and b/vendor/cache/ffi-1.16.3.gem differ diff --git a/vendor/cache/ffi-1.17.0-x86_64-linux-gnu.gem b/vendor/cache/ffi-1.17.0-x86_64-linux-gnu.gem deleted file mode 100644 index cf4f489fc..000000000 Binary files a/vendor/cache/ffi-1.17.0-x86_64-linux-gnu.gem and /dev/null differ diff --git a/vendor/cache/json-2.6.3.gem b/vendor/cache/json-2.6.3.gem new file mode 100644 index 000000000..f76ee7480 Binary files /dev/null and b/vendor/cache/json-2.6.3.gem differ diff --git a/vendor/cache/json-2.9.1.gem b/vendor/cache/json-2.9.1.gem deleted file mode 100644 index b8e6fde70..000000000 Binary files a/vendor/cache/json-2.9.1.gem and /dev/null differ diff --git a/vendor/cache/language_server-protocol-3.17.0.3.gem b/vendor/cache/language_server-protocol-3.17.0.3.gem deleted file mode 100644 index c70b95aa2..000000000 Binary files a/vendor/cache/language_server-protocol-3.17.0.3.gem and /dev/null differ diff --git a/vendor/cache/logger-1.6.2.gem b/vendor/cache/logger-1.6.2.gem new file mode 100644 index 000000000..d87cb00e4 Binary files /dev/null and b/vendor/cache/logger-1.6.2.gem differ diff --git a/vendor/cache/logger-1.6.4.gem b/vendor/cache/logger-1.6.4.gem deleted file mode 100644 index 3b4b6d4c0..000000000 Binary files a/vendor/cache/logger-1.6.4.gem and /dev/null differ diff --git a/vendor/cache/mini_portile2-2.8.7.gem b/vendor/cache/mini_portile2-2.8.7.gem new file mode 100644 index 000000000..ffb238a79 Binary files /dev/null and b/vendor/cache/mini_portile2-2.8.7.gem differ diff --git a/vendor/cache/mini_portile2-2.8.8.gem b/vendor/cache/mini_portile2-2.8.8.gem deleted file mode 100644 index 2234073ee..000000000 Binary files a/vendor/cache/mini_portile2-2.8.8.gem and /dev/null differ diff --git a/vendor/cache/minitest-5.25.2.gem b/vendor/cache/minitest-5.25.2.gem new file mode 100644 index 000000000..617fe1170 Binary files /dev/null and b/vendor/cache/minitest-5.25.2.gem differ diff --git a/vendor/cache/minitest-5.25.4.gem b/vendor/cache/minitest-5.25.4.gem deleted file mode 100644 index f07706fbc..000000000 Binary files a/vendor/cache/minitest-5.25.4.gem and /dev/null differ diff --git a/vendor/cache/niftany-0.10.1.gem b/vendor/cache/niftany-0.10.1.gem new file mode 100644 index 000000000..61ba67b71 Binary files /dev/null and b/vendor/cache/niftany-0.10.1.gem differ diff --git a/vendor/cache/niftany-0.11.0.gem b/vendor/cache/niftany-0.11.0.gem deleted file mode 100644 index a95c28a2e..000000000 Binary files a/vendor/cache/niftany-0.11.0.gem and /dev/null differ diff --git a/vendor/cache/nokogiri-1.16.7-x86_64-linux.gem b/vendor/cache/nokogiri-1.16.7-x86_64-linux.gem new file mode 100644 index 000000000..1e787f78e Binary files /dev/null and b/vendor/cache/nokogiri-1.16.7-x86_64-linux.gem differ diff --git a/vendor/cache/nokogiri-1.17.2-x86_64-linux.gem b/vendor/cache/nokogiri-1.17.2-x86_64-linux.gem deleted file mode 100644 index e8a9b85ac..000000000 Binary files a/vendor/cache/nokogiri-1.17.2-x86_64-linux.gem and /dev/null differ diff --git a/vendor/cache/ox-2.14.18.gem b/vendor/cache/ox-2.14.18.gem deleted file mode 100644 index 0b12ee46e..000000000 Binary files a/vendor/cache/ox-2.14.18.gem and /dev/null differ diff --git a/vendor/cache/ox-2.14.6.gem b/vendor/cache/ox-2.14.6.gem new file mode 100644 index 000000000..39036bd94 Binary files /dev/null and b/vendor/cache/ox-2.14.6.gem differ diff --git a/vendor/cache/parallel-1.22.1.gem b/vendor/cache/parallel-1.22.1.gem new file mode 100644 index 000000000..5208c791b Binary files /dev/null and b/vendor/cache/parallel-1.22.1.gem differ diff --git a/vendor/cache/parallel-1.26.3.gem b/vendor/cache/parallel-1.26.3.gem deleted file mode 100644 index e12397d9b..000000000 Binary files a/vendor/cache/parallel-1.26.3.gem and /dev/null differ diff --git a/vendor/cache/parser-3.2.0.0.gem b/vendor/cache/parser-3.2.0.0.gem new file mode 100644 index 000000000..105a202a5 Binary files /dev/null and b/vendor/cache/parser-3.2.0.0.gem differ diff --git a/vendor/cache/parser-3.3.6.0.gem b/vendor/cache/parser-3.3.6.0.gem deleted file mode 100644 index 951d5b5e2..000000000 Binary files a/vendor/cache/parser-3.3.6.0.gem and /dev/null differ diff --git a/vendor/cache/puma-6.5.0.gem b/vendor/cache/puma-6.5.0.gem deleted file mode 100644 index a03e0e79b..000000000 Binary files a/vendor/cache/puma-6.5.0.gem and /dev/null differ diff --git a/vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/gem.build_complete b/vendor/cache/puma-fba741b91780/.bundlecache similarity index 100% rename from vendor/cache/extensions/arm64-darwin-24/3.1.0/puma-fba741b91780/gem.build_complete rename to vendor/cache/puma-fba741b91780/.bundlecache diff --git a/vendor/cache/puma-fba741b91780/.codeclimate.yml b/vendor/cache/puma-fba741b91780/.codeclimate.yml new file mode 100644 index 000000000..8e0dd4aad --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.codeclimate.yml @@ -0,0 +1,2 @@ +exclude_patterns: +- "ext/" diff --git a/vendor/cache/puma-fba741b91780/.devcontainer/Dockerfile b/vendor/cache/puma-fba741b91780/.devcontainer/Dockerfile new file mode 100644 index 000000000..00aa4182d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.devcontainer/Dockerfile @@ -0,0 +1,19 @@ +# See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/ruby/.devcontainer/base.Dockerfile + +# [Choice] Ruby version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.1, 3.0, 2, 2.7, 3-bullseye, 3.1-bullseye, 3.0-bullseye, 2-bullseye, 2.7-bullseye, 3-buster, 3.1-buster, 3.0-buster, 2-buster, 2.7-buster +ARG VARIANT="3.1-bullseye" +FROM mcr.microsoft.com/vscode/devcontainers/ruby:0-${VARIANT} + +# [Choice] Node.js version: none, lts/*, 16, 14, 12, 10 +ARG NODE_VERSION="none" +RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi + +# [Optional] Uncomment this section to install additional OS packages. +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get -y install --no-install-recommends ragel + +# [Optional] Uncomment this line to install additional gems. +# RUN gem install + +# [Optional] Uncomment this line to install global node packages. +# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1 diff --git a/vendor/cache/puma-fba741b91780/.devcontainer/devcontainer.json b/vendor/cache/puma-fba741b91780/.devcontainer/devcontainer.json new file mode 100644 index 000000000..6849d08e4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.devcontainer/devcontainer.json @@ -0,0 +1,37 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: +// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/ruby +{ + "name": "Ruby", + "build": { + "dockerfile": "Dockerfile", + "args": { + // Update 'VARIANT' to pick a Ruby version: 3, 3.1, 3.0, 2, 2.7 + // Append -bullseye or -buster to pin to an OS version. + // Use -bullseye variants on local on arm64/Apple Silicon. + "VARIANT": "3.1-bullseye", + // Options + "NODE_VERSION": "none" + } + }, + + // Configure tool-specific properties. + "customizations": { + // Configure properties specific to VS Code. + "vscode": { + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "rebornix.Ruby" + ] + } + }, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "bundle install && bundle exec rake compile", + + // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. + "remoteUser": "vscode" + +} diff --git a/vendor/cache/puma-fba741b91780/.gitattributes b/vendor/cache/puma-fba741b91780/.gitattributes new file mode 100644 index 000000000..77f597a01 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.gitattributes @@ -0,0 +1,3 @@ +# Auto detect text files and perform LF normalization +* text eol=lf +*.png binary diff --git a/vendor/cache/puma-fba741b91780/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/cache/puma-fba741b91780/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..040bc8009 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,48 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**Puma config:** + +Please copy-paste your Puma config AND your command line options here. + +**To Reproduce** +Please add reproduction steps here. + +Your issue will be solved very quickly if you can reproduce it with a "hello world" rack application. To do this, copy this into a file called `hello.ru`: + +``` +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello World"]] } +``` + +Run it with: + +``` +bundle exec puma -C hello.ru +``` + +If you cannot reproduce with a hello world application or other simple application, we will have a lot more difficulty helping you fix your issue, because it may be application-specific and not a bug in Puma at all. + +There is also a Dockerfile available for reproducing Linux-specific issues. To use: + +``` +$ docker build -f tools/docker/Dockerfile -t puma . +$ docker run -p 9292:9292 -it puma +``` + +This will help you to create a container that reproduces your issue. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Desktop (please complete the following information):** + - OS: [e.g. Mac, Linux] + - Puma Version [e.g. 4.1.1] diff --git a/vendor/cache/puma-fba741b91780/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/cache/puma-fba741b91780/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..bbcbbe7d6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/vendor/cache/puma-fba741b91780/.github/dependabot.yml b/vendor/cache/puma-fba741b91780/.github/dependabot.yml new file mode 100644 index 000000000..5ace4600a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/vendor/cache/puma-fba741b91780/.github/pull_request_template.md b/vendor/cache/puma-fba741b91780/.github/pull_request_template.md new file mode 100644 index 000000000..cc5dd721e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/pull_request_template.md @@ -0,0 +1,13 @@ +### Description +Please describe your pull request. Thank you for contributing! You're the best. + +### Your checklist for this pull request + + +- [ ] I have reviewed the [guidelines for contributing](../blob/master/CONTRIBUTING.md) to this repository. +- [ ] I have added (or updated) appropriate tests if this PR fixes a bug or adds a feature. +- [ ] My pull request is 100 lines added/removed or less so that it can be easily reviewed. +- [ ] If this PR doesn't need tests (docs change), I added `[ci skip]` to the title of the PR. +- [ ] If this closes any issues, I have added "Closes `#issue`" to the PR description or my commit messages. +- [ ] I have updated the documentation accordingly. +- [ ] All new and existing tests passed, including Rubocop. diff --git a/vendor/cache/puma-fba741b91780/.github/workflows/github_actions_info.rb b/vendor/cache/puma-fba741b91780/.github/workflows/github_actions_info.rb new file mode 100644 index 000000000..891809007 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/workflows/github_actions_info.rb @@ -0,0 +1,8 @@ +# logs repo/commit info + +puts "ENV['GITHUB_WORKFLOW_REF'] #{ENV['GITHUB_WORKFLOW_REF']}\n" \ + "ENV['GITHUB_WORKFLOW_SHA'] #{ENV['GITHUB_WORKFLOW_SHA']}\n" \ + "ENV['GITHUB_REPOSITORY'] #{ENV['GITHUB_REPOSITORY']}\n" \ + "ENV['GITHUB_REF_TYPE'] #{ENV['GITHUB_REF_TYPE']}\n" \ + "ENV['GITHUB_REF'] #{ENV['GITHUB_REF']}\n" \ + "ENV['GITHUB_REF_NAME'] #{ENV['GITHUB_REF_NAME']}" diff --git a/vendor/cache/puma-fba741b91780/.github/workflows/rack_conform.yml b/vendor/cache/puma-fba741b91780/.github/workflows/rack_conform.yml new file mode 100644 index 000000000..fa7f55b94 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/workflows/rack_conform.yml @@ -0,0 +1,69 @@ +name: rack-conform + +on: [push, pull_request, workflow_dispatch] + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + skip_duplicate_runs: + uses: ./.github/workflows/skip_duplicate_workflow_runs.yml + + rack-conform: + name: >- + ${{ matrix.os }} Ruby ${{ matrix.ruby }} rack-conform + needs: skip_duplicate_runs + runs-on: ${{ matrix.os }} + if: | + !( contains(github.event.pull_request.title, '[ci skip]') + || contains(github.event.pull_request.title, '[skip ci]') + || (needs.skip_duplicate_runs.outputs.should_skip == 'true')) + strategy: + fail-fast: false + matrix: + include: + - { os: ubuntu-20.04 , ruby: '3.1' } + - { os: ubuntu-20.04 , ruby: '3.2' } + - { os: ubuntu-22.04 , ruby: '3.3' } + - { os: ubuntu-22.04 , ruby: head } + + env: + BUNDLE_GEMFILE: gems/puma-head-rack-v3.rb + RACK_CONFORM_SERVER: puma + RACK_CONFORM_ENDPOINT: http://localhost:9292 + + steps: + - name: checkout rack-conform + uses: actions/checkout@v4 + with: + repository: socketry/rack-conform + + - name: Update gems/puma-head-rack-v3.rb + run: | + # use Puma from current repo (may be a fork) & sha + SRC="gem ['\"]puma['\"].*" + DST="gem 'puma', git: 'https://github.com/$GITHUB_REPOSITORY.git', ref: '$GITHUB_SHA'" + sed -i "s#$SRC#$DST#" gems/puma-head-rack-v3.rb + + - name: load ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + rubygems: latest + bundler-cache: true + timeout-minutes: 10 + + - name: cat gems/puma-head-rack-v3.rb.lock + run: cat gems/puma-head-rack-v3.rb.lock + + - name: rack-conform test + id: test + timeout-minutes: 10 + run: bundle exec bake test + continue-on-error: true + if: success() + + - name: >- + Test outcome: ${{ steps.test.outcome }} + # every step must define a `uses` or `run` key + run: cat server.log diff --git a/vendor/cache/puma-fba741b91780/.github/workflows/ragel.yml b/vendor/cache/puma-fba741b91780/.github/workflows/ragel.yml new file mode 100644 index 000000000..136604a0d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/workflows/ragel.yml @@ -0,0 +1,87 @@ +name: ragel + +on: [push, pull_request, workflow_dispatch] + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + skip_duplicate_runs: + uses: ./.github/workflows/skip_duplicate_workflow_runs.yml + with: + paths: '["ext/**", ".github/workflows/ragel.yml"]' + + ragel: + name: >- + ragel ${{ matrix.os }} ${{ matrix.ruby }} + needs: skip_duplicate_runs + env: + PUMA_NO_RUBOCOP: true + PUMA_TEST_DEBUG: true + + runs-on: ${{ matrix.os }} + if: | + !( contains(github.event.pull_request.title, '[ci skip]') + || contains(github.event.pull_request.title, '[skip ci]')) + strategy: + fail-fast: false + matrix: + include: + - { os: ubuntu-22.04 , ruby: head } + - { os: macos-13 , ruby: head } + # Dec-2023 - incorrect line directives with Windows + # occurs with both MSYS2 and MSFT/vpkg versions of ragel + # - { os: windows-2022 , ruby: ucrt } + + steps: + # windows git will convert \n to \r\n + - name: git config + if: | + startsWith(matrix.os, 'windows') && + (needs.skip_duplicate_runs.outputs.should_skip != 'true') + run: | + git config --global core.autocrlf false + git config --global core.eol lf + + - name: repo checkout + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + uses: actions/checkout@v4 + + - name: load ruby + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + uses: ruby/setup-ruby-pkgs@v1 + with: + ruby-version: ${{ matrix.ruby }} + apt-get: ragel + brew: ragel + bundler-cache: true + timeout-minutes: 10 + + - name: check ragel generation + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + shell: pwsh + run: | + ragel --version + Remove-Item -Path ext/puma_http11/http11_parser.c + Remove-Item -Path ext/puma_http11/org/jruby/puma/Http11Parser.java + bundle exec rake ragel + if ($IsWindows) { + dos2unix ext/puma_http11/http11_parser.c + dos2unix ext/puma_http11/org/jruby/puma/Http11Parser.java + } + $git_out = $(git status --porcelain) + if ($git_out -ne $null) { + echo "** $git_out **`n" + git --no-pager diff + echo "`nbundle exec ragel changes a file" + exit 1 + } + + - name: save ragel generated files on fail + uses: actions/upload-artifact@v4 + if: ${{ failure() }} + with: + name: ${{ matrix.os }}-ragel-generated-files + path: | + ext/puma_http11/http11_parser.c + ext/puma_http11/org/jruby/puma/Http11Parser.java diff --git a/vendor/cache/puma-fba741b91780/.github/workflows/skip_duplicate_workflow_runs.yml b/vendor/cache/puma-fba741b91780/.github/workflows/skip_duplicate_workflow_runs.yml new file mode 100644 index 000000000..3073a8902 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/workflows/skip_duplicate_workflow_runs.yml @@ -0,0 +1,32 @@ +name: Skip Duplicate Workflow Runs + +on: + workflow_call: + inputs: + paths: + description: 'A JSON-array with path patterns' + default: '[]' + required: false + type: string + outputs: + should_skip: + description: "The output from the skip_duplicate_runs job" + value: ${{ jobs.skip_duplicate_runs.outputs.should_skip }} + +permissions: + contents: read + +jobs: + skip_duplicate_runs: + name: 'Skip Duplicate Runs' + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v5.3.1 + with: + paths_ignore: '["**.md"]' + paths: ${{ inputs.paths }} + concurrent_skipping: 'never' # this feature has bugs + skip_after_successful_duplicate: 'true' diff --git a/vendor/cache/puma-fba741b91780/.github/workflows/tests.yml b/vendor/cache/puma-fba741b91780/.github/workflows/tests.yml new file mode 100644 index 000000000..f500688c6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/workflows/tests.yml @@ -0,0 +1,209 @@ +name: Tests + +on: [push, pull_request, workflow_dispatch] + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + skip_duplicate_runs: + uses: ./.github/workflows/skip_duplicate_workflow_runs.yml + + rubocop: + name: RuboCop linting + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: 3.1 + bundler-cache: true # `bundle install` and cache + - name: rubocop + run: bundle exec rake rubocop + + test_mri: + name: >- + ${{ matrix.os }} ${{ matrix.ruby }}${{ matrix.no-ssl }}${{ matrix.yjit }}${{ matrix.rack-v }} + needs: [rubocop, skip_duplicate_runs] + env: + CI: true + PUMA_TEST_DEBUG: true + TESTOPTS: -v + PUMA_NO_RUBOCOP: true + TERM: BugMinitest + + runs-on: ${{ matrix.os }} + if: | + !( contains(github.event.pull_request.title, '[ci skip]') + || contains(github.event.pull_request.title, '[skip ci]')) + strategy: + fail-fast: false + matrix: + os: [ ubuntu-20.04, ubuntu-22.04, ubuntu-24.04, macos-13, macos-14, macos-15, windows-2022 ] + ruby: [ 2.4, 2.5, 2.6, 2.7, '3.0', 3.1, 3.2, 3.3, head ] + no-ssl: [''] + rack-v: [''] + yjit: [''] + include: + - { os: windows-2022 , ruby: ucrt } + - { os: windows-2022 , ruby: mswin } + - { os: windows-2022 , ruby: 2.7 , no-ssl: ' no SSL' } + - { os: ubuntu-20.04 , ruby: 2.7 , no-ssl: ' no SSL' } + - { os: ubuntu-22.04 , ruby: head , yjit: ' yjit' } + - { os: ubuntu-22.04 , ruby: 2.4 , rack-v: ' rack2' } + - { os: ubuntu-22.04 , ruby: 3.2 , rack-v: ' rack2' } + - { os: ubuntu-22.04 , ruby: 2.4 , rack-v: ' rack1' } + + exclude: + - { os: ubuntu-22.04 , ruby: 2.4 } + - { os: ubuntu-22.04 , ruby: 2.5 } + - { os: ubuntu-22.04 , ruby: 2.6 } + - { os: ubuntu-22.04 , ruby: 2.7 } + - { os: ubuntu-22.04 , ruby: '3.0' } + - { os: ubuntu-24.04 , ruby: 2.4 } + - { os: ubuntu-24.04 , ruby: 2.5 } + - { os: ubuntu-24.04 , ruby: 2.6 } + - { os: ubuntu-24.04 , ruby: 2.7 } + - { os: ubuntu-24.04 , ruby: '3.0' } + - { os: macos-14 , ruby: 2.4 } + - { os: macos-14 , ruby: 2.5 } + - { os: macos-14 , ruby: 2.6 } + - { os: macos-14 , ruby: 2.7 } + - { os: macos-15 , ruby: 2.4 } + - { os: macos-15 , ruby: 2.5 } + - { os: macos-15 , ruby: 2.6 } + - { os: macos-15 , ruby: 2.7 } + - { os: macos-15 , ruby: '3.0' } + - { os: macos-15 , ruby: 3.2 } + - { os: windows-2022 , ruby: head } + + steps: + - name: repo checkout + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + uses: actions/checkout@v4 + + - name: Compile Puma without SSL support + if: | + (matrix.no-ssl == ' no SSL') && + (needs.skip_duplicate_runs.outputs.should_skip != 'true') + shell: bash + run: echo 'PUMA_DISABLE_SSL=true' >> $GITHUB_ENV + + - name: Set Rack version, see Gemfile + shell: bash + run: echo 'PUMA_CI_RACK=${{ matrix.rack-v }}' >> $GITHUB_ENV + + - name: load ruby + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + uses: ruby/setup-ruby-pkgs@v1 + with: + ruby-version: ${{ matrix.ruby }} + apt-get: ragel + brew: ragel + # below is only needed for Ruby 2.4 + mingw: openssl + rubygems: latest + bundler-cache: true + timeout-minutes: 10 + + - name: Repo & Commit Info + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + run: ruby .github/workflows/github_actions_info.rb + + - name: set WERRORFLAG + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + shell: bash + run: echo 'PUMA_MAKE_WARNINGS_INTO_ERRORS=true' >> $GITHUB_ENV + + - name: compile + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + run: bundle exec rake compile + + - name: Use yjit + if: | + (matrix.yjit == ' yjit') && + (needs.skip_duplicate_runs.outputs.should_skip != 'true') + shell: bash + run: echo 'RUBYOPT=--yjit' >> $GITHUB_ENV + + - name: test + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + timeout-minutes: 10 + run: test/runner --verbose + + test_non_mri: + name: >- + ${{ matrix.os }} ${{ matrix.ruby }}${{ matrix.no-ssl }} + needs: [rubocop, skip_duplicate_runs] + env: + CI: true + PUMA_TEST_DEBUG: true + TESTOPTS: -v + PUMA_NO_RUBOCOP: true + TERM: BugMinitest + + runs-on: ${{ matrix.os }} + if: | + !( contains(github.event.pull_request.title, '[ci skip]') + || contains(github.event.pull_request.title, '[skip ci]')) + strategy: + fail-fast: false + matrix: + include: + # tto - test timeout + - { tto: 8 , os: ubuntu-22.04 , ruby: jruby } + - { tto: 8 , os: ubuntu-22.04 , ruby: jruby, no-ssl: ' no SSL' } + - { tto: 8 , os: ubuntu-22.04 , ruby: jruby-head, allow-failure: true } + - { tto: 8 , os: ubuntu-20.04 , ruby: truffleruby, allow-failure: true } # Until https://github.com/oracle/truffleruby/issues/2700 is solved + - { tto: 8 , os: ubuntu-20.04 , ruby: truffleruby-head, allow-failure: true } + - { tto: 8 , os: ubuntu-22.04 , ruby: truffleruby, allow-failure: true } # Until https://github.com/oracle/truffleruby/issues/2700 is solved + - { tto: 8 , os: ubuntu-22.04 , ruby: truffleruby-head, allow-failure: true } + - { tto: 8 , os: macos-13 , ruby: jruby } + - { tto: 8 , os: macos-14 , ruby: jruby } + - { tto: 8 , os: macos-13 , ruby: truffleruby, allow-failure: true } + + steps: + - name: repo checkout + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + uses: actions/checkout@v4 + + - name: load ruby, ragel + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + uses: ruby/setup-ruby-pkgs@v1 + with: + ruby-version: ${{ matrix.ruby }} + apt-get: ragel + brew: ragel + bundler: none + bundler-cache: true + timeout-minutes: 10 + + - name: Compile Puma without SSL support + if: | + (matrix.no-ssl == ' no SSL') && + (needs.skip_duplicate_runs.outputs.should_skip != 'true') + shell: bash + run: echo 'PUMA_DISABLE_SSL=true' >> $GITHUB_ENV + + - name: set WERRORFLAG + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + shell: bash + run: echo 'PUMA_MAKE_WARNINGS_INTO_ERRORS=true' >> $GITHUB_ENV + + - name: compile + if: ${{ needs.skip_duplicate_runs.outputs.should_skip != 'true' }} + run: bundle exec rake compile + + - name: test + id: test + timeout-minutes: ${{ matrix.tto }} + continue-on-error: ${{ matrix.allow-failure || false }} + if: | # only run if previous steps have succeeded + success() && + (needs.skip_duplicate_runs.outputs.should_skip != 'true') + run: test/runner --verbose + + - name: >- + Test outcome: ${{ steps.test.outcome }} + # every step must define a `uses` or `run` key + run: echo NOOP diff --git a/vendor/cache/puma-fba741b91780/.github/workflows/turbo-rails.yml b/vendor/cache/puma-fba741b91780/.github/workflows/turbo-rails.yml new file mode 100644 index 000000000..d80486649 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.github/workflows/turbo-rails.yml @@ -0,0 +1,83 @@ +name: turbo-rails + +# Note: turbo-rails often returns an ActionDispatch::Response::RackBody for the +# body. Also, Rack::BodyProxy or Sprockets::Asset + +on: [push, pull_request, workflow_dispatch] + +permissions: + contents: read # to fetch code (actions/checkout) + +jobs: + skip_duplicate_runs: + uses: ./.github/workflows/skip_duplicate_workflow_runs.yml + + turbo-rails: + name: >- + ${{ matrix.os }} Ruby ${{ matrix.ruby }} Rails ${{ matrix.rails }} + needs: skip_duplicate_runs + runs-on: ${{ matrix.os }} + if: | + !( contains(github.event.pull_request.title, '[ci skip]') + || contains(github.event.pull_request.title, '[skip ci]') + || (needs.skip_duplicate_runs.outputs.should_skip == 'true')) + strategy: + fail-fast: false + matrix: + include: + - { os: ubuntu-20.04 , ruby: '3.1', rails: '7.0' } + - { os: ubuntu-20.04 , ruby: '3.2', rails: '7.0' } + - { os: ubuntu-22.04 , ruby: '3.3', rails: '7.0' } + - { os: ubuntu-20.04 , ruby: '3.1', rails: '7.1' } + - { os: ubuntu-20.04 , ruby: '3.2', rails: '7.1' } + - { os: ubuntu-22.04 , ruby: '3.3', rails: '7.1' } + - { os: ubuntu-22.04 , ruby: head , rails: '7.1' } + env: + CI: true + FERRUM_PROCESS_TIMEOUT: 60 + FERRUM_DEFAULT_TIMEOUT: 60 + RAILS_VERSION: "${{ matrix.rails }}" + + steps: + - name: checkout hotwired/turbo-rails + uses: actions/checkout@v4 + with: + repository: hotwired/turbo-rails + ref: main + + - name: turbo-rails updates + run: | + # use repo & commit being tested, $GITHUB_REPOSITORY allows forks to work + SRC="gem ['\"]puma['\"].*" + DST="gem 'puma', git: 'https://github.com/$GITHUB_REPOSITORY.git', ref: '$GITHUB_SHA'" + sed -i "s#$SRC#$DST#" Gemfile + SRC="gem ['\"]sqlite3['\"].*" + DST="gem 'sqlite3', '~> 1.4'" + sed -i "s#$SRC#$DST#" Gemfile + # + # allow using capybara from the repo, either a branch or a commit + # comment out if CI works with current release + # SRC="gem ['\"]capybara['\"].*" + # DST="kw =\n if RUBY_VERSION.start_with? '3'\n {git: 'https://github.com/teamcapybara/capybara.git', ref: '43e32a8495'}\n else\n {}\n end\n gem 'capybara', **kw" + # sed -i "s#$SRC#$DST#" Gemfile + # + # use `stdio` for log_writer, always have one thread existing + SRC="Silent: true" + DST="Silent: false, Threads: '1:4'" + sed -i "s/$SRC/$DST/" test/application_system_test_case.rb + cat Gemfile + + - name: load ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + rubygems: latest + bundler-cache: true + timeout-minutes: 10 + + - name: turbo-rails Gemfile.lock + run: cat Gemfile.lock + + - name: turbo-rails test + id: test + run: bin/test test/**/*_test.rb -vd diff --git a/vendor/cache/puma-fba741b91780/.gitignore b/vendor/cache/puma-fba741b91780/.gitignore new file mode 100644 index 000000000..e48aa2855 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.gitignore @@ -0,0 +1,29 @@ +scratch/ +*.bundle +*.log +*.o +*.so +*.jar +*.rbc +doc +log +pkg +tmp +t/ +.rbx/ +Gemfile.lock +.idea/ +vendor/ +/test/test_puma.state +/test/test_server.sock +/test/test_control.sock +.DS_Store + +# windows local build artifacts +/win_gem_test/shared/ +/win_gem_test/packages/ +/win_gem_test/test_logs/ +/Rakefile_wintest +*.gem +/lib/puma/puma_http11.rb +/lib/puma/puma_http11.su diff --git a/vendor/cache/puma-fba741b91780/.rubocop.yml b/vendor/cache/puma-fba741b91780/.rubocop.yml new file mode 100644 index 000000000..5aa61d875 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.rubocop.yml @@ -0,0 +1,84 @@ +require: + - rubocop-performance + +AllCops: + DisabledByDefault: true + TargetRubyVersion: 2.4 + DisplayCopNames: true + StyleGuideCopsOnly: false + Exclude: + - 'tmp/**/*' + - '**/vendor/bundle/**/*' + - 'examples/**/*' + - 'pkg/**/*' + - 'Rakefile' + SuggestExtensions: false + NewCops: enable + +# enable all Performance cops +Performance: + Enabled: true + +# ————————————————————————————————————————— disabled cops + +# ————————————————————————————————————————— enabled cops +Layout/AccessModifierIndentation: + EnforcedStyle: indent + +Layout/IndentationStyle: + Enabled: true + +Layout/SpaceAfterColon: + Enabled: true + +Layout/SpaceAroundKeyword: + Enabled: true + +Layout/SpaceBeforeBlockBraces: + EnforcedStyleForEmptyBraces: no_space + Enabled: true + +Layout/SpaceBeforeFirstArg: + Enabled: true + +Layout/SpaceInsideParens: + Enabled: true + +Layout/TrailingEmptyLines: + Enabled: true + +Layout/TrailingWhitespace: + Enabled: true + +Lint/Debugger: + Enabled: true + +Metrics/ParameterLists: + Max: 7 + +Naming/ConstantName: + Enabled: true + +Naming/MethodName: + Enabled: true + EnforcedStyle: snake_case + Exclude: + - 'test/**/**' + +Naming/VariableName: + Enabled: true + +Style/MethodDefParentheses: + Enabled: true + +Style/SafeNavigation: + Enabled: true + +Style/TernaryParentheses: + Enabled: true + +Style/TrailingCommaInArguments: + Enabled: true + +Style/WhileUntilModifier: + Enabled: true diff --git a/vendor/cache/puma-fba741b91780/.rubocop_todo.yml b/vendor/cache/puma-fba741b91780/.rubocop_todo.yml new file mode 100644 index 000000000..80ba5ae4c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/.rubocop_todo.yml @@ -0,0 +1,54 @@ +inherit_from: "./.rubocop.yml" + +# 29 offenses +Layout/SpaceAroundOperators: + Enabled: true + +# 21 offenses +Layout/SpaceInsideBlockBraces: + Enabled: true + +# 16 offenses +Layout/SpaceAroundEqualsInParameterDefault: + Enabled: true + EnforcedStyle: no_space + +# 15 offenses +Layout/SpaceInsideHashLiteralBraces: + Enabled: true + EnforcedStyle: no_space + +# 8 offenses +Layout/EmptyLines: + Enabled: true + +# 4 offenses +Layout/EmptyLinesAroundClassBody: + Enabled: true + Exclude: + - 'test/**/*' + +# 6 offenses +Layout/EmptyLinesAroundMethodBody: + Enabled: true + +# 5 offenses +Layout/EmptyLinesAroundModuleBody: + Enabled: true + +# 5 offenses +Layout/IndentationWidth: + Enabled: true + +# >200 offenses for 80 +# 58 offenses for 100 +# 18 offenses for 120 +Metrics/LineLength: + Max: 120 + AllowHeredoc: true + AllowURI: true + URISchemes: + - http + - https + IgnoreCopDirectives: false + IgnoredPatterns: [] diff --git a/vendor/cache/puma-fba741b91780/5.0-Upgrade.md b/vendor/cache/puma-fba741b91780/5.0-Upgrade.md new file mode 100644 index 000000000..06041adc1 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/5.0-Upgrade.md @@ -0,0 +1,98 @@ +# Welcome to Puma 5: Spoony Bard. + +![Spoony Bard](https://i1.kym-cdn.com/entries/icons/original/000/006/385/Spoony_Bard.jpg "Spoony Bard") + +>Note: Puma 5 now automatically uses `WEB_CONCURRENCY` env var if set see [this post for an explanation](https://github.com/puma/puma/issues/2393#issuecomment-702352208). If your memory use goes up after upgrading to Puma 5 it indicates you're now running with multiple workers (processes). You can decrease memory use by tuning this number to be lower. + +Puma 5 brings new experimental performance features, a few quality-of-life features and loads of bugfixes. Here's what you should do: + +1. Review the Upgrade section below to see if any of 5.0's breaking changes will affect you. +2. Upgrade to version 5.0 in your Gemfile and deploy. +3. Try the new performance experiments outlined below and report your results back to the Puma issue tracker. + +Puma 5 was named Spoony Bard by our newest supercontributor, [@wjordan](https://github.com/puma/puma/commits?author=wjordan). Will brought you one of our new perf features for this release, as well as [many other fixes and refactors.](https://github.com/puma/puma/commits?author=wjordan) If you'd like to name a Puma release in the future, take a look at [CONTRIBUTING.md](CONTRIBUTING.md) and get started helping us out :) + +Puma 5 also welcomes [@MSP-Greg](https://github.com/puma/puma/commits?author=MSP-Greg) as our newest committer. Greg has been instrumental in improving our CI setup and SSL features. Greg also [named our 4.3.0 release](https://github.com/puma/puma/releases/tag/v4.3.0): Mysterious Traveller. + +## What's New + +Puma 5 contains three new "experimental" performance features for cluster-mode Pumas running on MRI. + +If you try any of these features, please report your results to [our report issue](https://github.com/puma/puma/issues/2258). + +Part of the reason we're calling them _experimental_ is because we're not sure if they'll actually have any benefit. People's workloads in the real world are often not what we anticipate, and synthetic benchmarks are usually not of any help in figuring out if a change will be beneficial or not. + +We do not believe any of the new features will have a negative effect or impact the stability of your application. This is either a "it works" or "it does nothing" experiment. + +If any of the features turn out to be particularly beneficial, we may make them defaults in future versions of Puma. + +### Lower latency, better throughput + +From our friends at GitLab, the new experimental `wait_for_less_busy_worker` config option may reduce latency and improve throughput for high-load Puma apps on MRI. See the [pull request](https://github.com/puma/puma/pull/2079) for more discussion. + +Users of this option should see reduced request queue latency and possibly less overall latency. + +Add the following to your `puma.rb` to try it: + +```ruby +wait_for_less_busy_worker +# or +wait_for_less_busy_worker 0.001 +``` + +Production testing at GitLab suggests values between `0.001` and `0.010` are best. + +### Better memory usage + +5.0 brings two new options to your config which may improve memory usage. + +#### nakayoshi_fork + +`nakayoshi_fork` calls GC a handful of times and compacts the heap on Ruby 2.7+ before forking. This may reduce memory usage of Puma on MRI with preload enabled. It's inspired by [Koichi Sasada's work](https://github.com/ko1/nakayoshi_fork). + +To use it, you can add this to your `puma.rb`: + +```ruby +nakayoshi_fork +``` + +#### fork_worker + +Puma 5 introduces an experimental new cluster-mode configuration option, `fork_worker` (`--fork-worker` from the CLI). This mode causes Puma to fork additional workers from worker 0, instead of directly from the master process: + +``` +10000 \_ puma 4.3.3 (tcp://0.0.0.0:9292) [puma] +10001 \_ puma: cluster worker 0: 10000 [puma] +10002 \_ puma: cluster worker 1: 10000 [puma] +10003 \_ puma: cluster worker 2: 10000 [puma] +10004 \_ puma: cluster worker 3: 10000 [puma] +``` + +It is compatible with phased restarts. It also may improve memory usage because the worker process loads additional code after processing requests. + +To learn more about using `refork` and `fork_worker`, see ['Fork Worker'](docs/fork_worker.md). + +### What else is new? + +* **Loads of bugfixes**. +* Faster phased restarts and worker timeouts. +* pumactl now has a `thread-backtraces` command to print thread backtraces, bringing thread backtrace printing to all platforms, not just *BSD and Mac. (#2053) +* Added incrementing `requests_count` to `Puma.stats`. (#2106) +* Faster phased restart and worker timeout. (#2220) +* Added `state_permission` to config DSL to set state file permissions (#2238) +* Ruby 2.2 support will be dropped in Puma 6. This is the final major release series for Ruby 2.2. + +## Upgrade + +* Setting the `WEB_CONCURRENCY` environment variable will now configure the number of workers (processes) that Puma will boot and enable preloading of the application. +* If you did not explicitly set `environment` before, Puma now checks `RAILS_ENV` and will use that, if available in addition to `RACK_ENV`. +* If you have been using the `--control` CLI option, update your scripts to use `--control-url`. +* If you are using `worker_directory` in your config file, change it to `directory`. +* If you are running MRI, default thread count on Puma is now 5, not 16. This may change the amount of threads running in your threadpool. We believe 5 is a better default for most Ruby web applications on MRI. Higher settings increase latency by causing GVL contention. +* If you are using a worker count of more than 1, set using `WEB_CONCURRENCY`, Puma will now preload the application by default (disable with `preload_app! false`). We believe this is a better default, but may cause issues in non-Rails applications if you do not have the proper `before` and `after` fork hooks configured. See documentation for your framework. Rails users do not need to change anything. **Please note that it is not possible to use [the phased restart](docs/restart.md) with preloading.** +* tcp mode and daemonization have been removed without replacement. For daemonization, please use a modern process management solution, such as systemd or monit. +* `connected_port` was renamed to `connected_ports` and now returns an Array, not an Integer. + +Then, update your Gemfile: + +`gem 'puma', '< 6'` diff --git a/vendor/cache/puma-fba741b91780/6.0-Upgrade.md b/vendor/cache/puma-fba741b91780/6.0-Upgrade.md new file mode 100644 index 000000000..7e77f6a34 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/6.0-Upgrade.md @@ -0,0 +1,56 @@ +# Welcome to Puma 6: Sunflower. + +![Image by Todd Trapani, Unsplash](https://user-images.githubusercontent.com/845662/192706685-774d3d0d-f4a9-4b93-b27b-5a3b7f44ff31.jpg) + +Puma 6 brings performance improvements for most applications, experimental Rack 3 support, support for Sidekiq 7 Capsules, and more. + +Here's what you should do: + +1. Review the Upgrade section below to look for breaking changes that could affect you. +2. Upgrade to version 6.0 in your Gemfile and deploy. +3. Open up a new bug issue if you find any problems. +4. Join us in building Puma! We welcome first-timers. See [CONTRIBUTING.md](./CONTRIBUTING.md). + +For a complete list of changes, see [History.md](./History.md). + +## What's New + +Puma 6 is mostly about a few nice-to-have performance changes, and then a few breaking API changes we've been putting off for a while. + +### Improved Performance + +We've improved throughput and latency in Puma 6 in a few areas. + +1. **Large chunked response body throughput 3-10x higher** Chunked response bodies >100kb should be 3 to 10 times faster than in Puma 5. String response bodies should be ~10% faster. +2. **File response throughput is 3x higher.** File responses (e.g. assets) should be about 3x faster. +3. **wait_for_less_busy_worker is now default, meaning lower latencies for high-utilization servers** `wait_for_less_busy_worker` was an experimental feature in Puma 5 and it's now the default in Puma 6. This feature makes each Puma child worker in cluster mode wait before listening on the socket, and that wait time is proportional to N * `number_of_threads_responding_to_requests`. This means that it's more likely that a request is picked up by the least-loaded Puma child worker listening on the socket. Many users reported back that this option was stable and decreased average latency, particularly in environments with high load and utilization. + +### Experimental Rack 3 Support + +[Rack 3 is now out](https://github.com/rack/rack/blob/main/UPGRADE-GUIDE.md) and we've started on Rack 3 support. Please open a bug if you find any incompatibilites. + +### Sidekiq 7 Capsules + +Sidekiq 7 (releasing soon) introduces Capsules, which allows you to run a Sidekiq server inside your Puma server (or any other Ruby process for that matter). We've added support by allowing you to pass data into `run_hooks`, see [issue #2915](https://github.com/puma/puma/issues/2915). + +## Upgrade + +Check the following list to see if you're depending on any of these behaviors: + +1. Configuration constants like `DefaultRackup` removed, see [#2928](https://github.com/puma/puma/pull/2928/files#diff-2dc4e3e83be7fd97cebc482ae07d6a8216944003de82458783fb00b5ae9524c8) for the full list. +1. We have changed the names of the following environment variables: `DISABLE_SSL` is now `PUMA_DISABLE_SSL`, `MAKE_WARNINGS_INTO_ERRORS` is now `PUMA_MAKE_WARNINGS_INTO_ERRORS`, and `WAIT_FOR_LESS_BUSY_WORKERS` is now `PUMA_WAIT_FOR_LESS_BUSY_WORKERS`. +1. Nakayoshi GC (`nakayoshi_fork` option in config) has been removed without replacement. +1. `wait_for_less_busy_worker` is now on by default. If you don't want to use this feature, you must add `wait_for_less_busy_worker false` in your config. +1. We've removed the following public methods on Puma::Server: `Puma::Server#min_threads`, `Puma::Server#max_threads`. Instead, you can pass in configuration as an option to Puma::Server#new. This might make certain gems break (`capybara` for example). +1. We've removed the following constants: `Puma::StateFile::FIELDS`, `Puma::CLI::KEYS_NOT_TO_PERSIST_IN_STATE` and `Puma::Launcher::KEYS_NOT_TO_PERSIST_IN_STATE`, and `Puma::ControlCLI::COMMANDS`. +1. We no longer support Ruby 2.2, 2.3, or JRuby on Java 1.7 or below. +1. The behavior of `remote_addr` has changed. When using the set_remote_address header: "header_name" functionality, if the header is not passed, REMOTE_ADDR is now set to the physical peeraddr instead of always being set to 127.0.0.1. When an error occurs preventing the physical peeraddr from being fetched, REMOTE_ADDR is now set to the unspecified source address ('0.0.0.0') instead of to '127.0.0.1' +1. Previously, Puma supported anything as an HTTP method and passed it to the app. We now only accept the following 8 HTTP methods, based on [RFC 9110, section 9.1](https://www.rfc-editor.org/rfc/rfc9110.html#section-9.1). The [IANA HTTP Method Registry](https://www.iana.org/assignments/http-methods/http-methods.xhtml) contains a full list of HTTP methods. + ``` + HEAD GET POST PUT DELETE OPTIONS TRACE PATCH + ``` + As of Puma 6.2, these can be overridden by `supported_http_methods` in your config file, see `Puma::DSL#supported_http_methods`. + +Then, update your Gemfile: + +`gem 'puma', '< 7'` diff --git a/vendor/cache/puma-fba741b91780/CODE_OF_CONDUCT.md b/vendor/cache/puma-fba741b91780/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..7963b260c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/CODE_OF_CONDUCT.md @@ -0,0 +1,77 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at nate.berkopec@speedshop.co, +richard.schneeman+no-recruiters@gmail.com, or evan@phx.io. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/cache/puma-fba741b91780/CONTRIBUTING.md b/vendor/cache/puma-fba741b91780/CONTRIBUTING.md new file mode 100644 index 000000000..7aaf43562 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/CONTRIBUTING.md @@ -0,0 +1,245 @@ +# Contributing to Puma + +By participating in this project, you agree to follow the [code of conduct]. + +[code of conduct]: https://github.com/puma/puma/blob/master/CODE_OF_CONDUCT.md + +There are lots of ways to contribute to Puma. Some examples include: + +* creating a [bug report] or [feature request] +* verifying [existing bug reports] and adding [reproduction steps] +* reviewing [pull requests] and testing the changes locally on your machine +* writing or editing [documentation] +* improving test coverage +* fixing a [reproducing bug] or adding a new feature + +[bug report]: https://github.com/puma/puma/issues/new?template=bug_report.md +[feature request]: https://github.com/puma/puma/issues/new?template=feature_request.md +[existing bug reports]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Aneeds-repro +[pull requests]: https://github.com/puma/puma/pulls +[documentation]: https://github.com/puma/puma/tree/master/docs +[reproduction steps]: https://github.com/puma/puma/blob/CONTRIBUTING.md#reproduction-steps +[reproducing bug]: https://github.com/puma/puma/issues?utf8=%E2%9C%93&q=is%3Aopen+is%3Aissue+label%3Abug + +Newbies welcome! We would be happy to help you make your first contribution to a F/OSS project. + +## Setup + +Any questions about contributing may be asked in our [Discussions](https://github.com/puma/puma/discussions). + +**If you're nervous, get stuck, need help, or want to know where to start and where you can help**, please don't hesitate to [book 30 minutes with maintainer @nateberkopec here](https://calendly.com/nateberkopec/30min). He is happy to help! + +Nate also [gave a 40 minute conference talk in 2022](https://www.youtube.com/watch?v=w4X_oBuPmTM) detailing how Puma works, a brief overview of its internals, and a quick guide on how to contribute. + +#### Clone the repo + +Clone the Puma repository: +```sh +git clone git@github.com:puma/puma.git && cd puma +``` + +#### Ragel + +You need to install [ragel] (use Ragel version 7.0.0.9) to generate Puma's extension code. + +macOS: + +```sh +brew install ragel +``` + +Linux: +```sh +apt-get install ragel +``` + +Windows (Ruby 2.5 and later): +```sh +ridk exec pacman -S mingw-w64-x86_64-openssl mingw-w64-x86_64-ragel +``` + +#### Install Ruby dependencies + +Install the Ruby dependencies: +```sh +bundle install +``` + +#### Compile the native extensions + +To run Puma locally, you must compile the native extension. Running the `test` rake task does this automatically, but you may need to manually run the compile command if you want to run Puma and haven't run the tests yet: + +Ubuntu, macOS, etc: +```sh +bundle exec rake compile +``` + +Windows: +```sh +bundle exec rake -rdevkit compile +``` + +#### Run your local Puma + +Now, you should be able to run Puma locally: + +```sh +bundle exec bin/puma test/rackup/hello.ru +# -or- +bundle exec ruby -Ilib bin/puma test/rackup/hello.ru +``` + +Alternatively, you can reference your local copy in a project's `Gemfile`: + +```ruby +gem "puma", path: "/path/to/local/puma" +``` + +See the [Bundler docs](https://bundler.io/man/gemfile.5.html#PATH) for more details. + +[ragel]: https://www.colm.net/open-source/ragel/ + +## Running tests + +To run rubocop + tests: + +```sh +bundle exec rake +``` + +To run the test suite only: +```sh +bundle exec rake test +``` + +To run a single test file: +```sh +bundle exec ruby test/test_binder.rb +``` + +You can also run tests with [`m`](https://github.com/qrush/m): +```sh +bundle exec m test/test_binder.rb +``` + +To run a single test: +```sh +bundle exec m test/test_binder.rb:37 +``` + +To run a single test with 5 seconds as the test case timeout: +```sh +TEST_CASE_TIMEOUT=5 bundle exec m test/test_binder.rb:37 +``` + +If you would like more information about extension building, SSL versions, your local Ruby version, and more, use the PUMA_TEST_DEBUG env variable: + +```sh +PUMA_TEST_DEBUG=1 bundle exec rake test +``` + +Puma also has a helper file for running tests, see the comments at the top of the `test/runner` file. Example: +``` +test/runner -v test_puma_server.rb +``` + +#### File limits + +Puma's test suite opens up a lot of sockets. This may exceed the default limit of your operating system. If your file limits are low, you may experience "too many open file" errors when running the Puma test suite. + +``` +# check your file limit +ulimit -S -n + +# change file limit for the current session +ulimit -S -n +``` + +We find that values of 4000 or more work well. [Learn more about your file limits and how to change them here.](https://wilsonmar.github.io/maximum-limits/) + +## How to contribute + +Puma could use your help in several areas! + +**Don't worry about "claiming an issue". No issues are "claimed" in Puma.** Just start working on it. The issue tracker is almost always kept updated, so if there is an open issue, it is ready for you to contribute (unless you have questions about how to close issue - then please ask!). Once you have a few lines of code, post a draft PR. We are more than happy to help once you have a draft PR up. + +**New to systems programming? That's ok!** Puma deals with concepts you may not have been familiar with before, like sockets, TCP, UDP, SSL, and Threads. That's ok! You can learn by contributing. Also, see the "Bibliography" section at the end of this document. + +**The [contrib-wanted] label indicates that an issue might approachable to first-time contributors.** + +**Reproducing bug reports**: The [needs-repro] label indicates than an issue lacks reproduction steps. You can help by reproducing the issue and sharing the steps you took in the comments. + +**Helping with our native extensions**: If you are interested in writing C or Java, we could really use your help. Check out the issue labels for [c-ext] and [JRuby]. + +**Fixing bugs**: Issues with the [bug] label have working reproduction steps, which you can use to write a test and submit a patch. + +**Writing features**: The [feature] label highlights requests for new functionality. Write tests and code up our new feature! + +**Code review**: Take a look at open pull requests and offer your feedback. Code review is not just for maintainers. We need your help and eyeballs! + +**Write documentation**: Puma needs more docs in many areas, especially where we have open issues with the [docs] label. + +[bug]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Abug +[c-ext]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Ac-ext +[contrib-wanted]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Acontrib-wanted +[docs]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Adocs +[feature]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Afeature +[jruby]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Ajruby +[needs-repro]: https://github.com/puma/puma/issues?q=is%3Aopen+is%3Aissue+label%3Aneeds-repro + +## Reproduction steps + +Reproducing a bug helps identify the root cause of that bug so it can be fixed. + +To get started, create a rackup file and config file and then run your test app +with: +```sh +bundle exec puma -C +``` + +For example, using a test rack app ([`test/rackup/hello.ru`][rackup]) and a +test config file ([`test/config/settings.rb`][config]): +```sh +bundle exec puma -C test/config/settings.rb test/rackup/hello.ru +``` + +There is also a Dockerfile available for reproducing Linux-specific issues: +```sh +docker build -f tools/Dockerfile -t puma . +docker run -p 9292:9292 -it puma +``` + +[rackup]: https://github.com/puma/puma/blob/master/test/rackup/hello.ru +[config]: https://github.com/puma/puma/blob/master/test/config/settings.rb + +## Pull requests + +Please open draft PRs as soon as you are ready for feedback from the community. + +Code contributions should generally include test coverage. If you aren't sure how to +test your changes, please open a pull request and leave a comment asking for +help. + +There's no need to update the changelog ([`History.md`](History.md)); that is done [when a new release is made](Release.md). + +Puma uses [GitHub Actions](https://docs.github.com/en/actions) for CI testing. Please consider running the workflows in your fork before creating a PR. It is possible to enable GitHub Actions on your fork in the repositories' `Actions` tab. + +## Backports + +Puma does not have a backport "policy" - maintainers will not consistently backport bugfixes to previous minor or major versions (we do treat security differently, see [`SECURITY.md`](SECURITY.md). + +As a contributor, you may make pull requests against `-stable` branches to backport fixes, and maintainers will release them once they're merged. For example, if you'd like to make a backport for 4.3.x, you can make a pull request against `4-3-stable`. If there is no appropriate branch for the release you'd like to backport against, please just open an issue and we'll make one for you. + +## Join the community + +If you're looking to contribute to Puma, please join us in [Discussions](https://github.com/puma/puma/discussions). + +## Bibliography/Reading + +Puma can be a bit intimidating for your first contribution because there's a lot of concepts here that you've probably never had to think about before - Rack, sockets, forking, threads etc. Here are some helpful links for learning more about things related to Puma: + +* [Puma's Architecture docs](https://github.com/puma/puma/blob/master/docs/architecture.md) +* [The Rack specification](https://github.com/rack/rack/blob/master/SPEC.rdoc) +* [Working with...](https://workingwithruby.com/) "Working With" is a excellent (and now free) Ruby book series about working with Threads, TCP and Unix Sockets. +* The Ruby docs for IO.pipe, TCPServer/Socket. +* [nio4r documentation](https://github.com/socketry/nio4r/wiki/Getting-Started) diff --git a/vendor/cache/puma-fba741b91780/Gemfile b/vendor/cache/puma-fba741b91780/Gemfile new file mode 100644 index 000000000..0bfa58c14 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/Gemfile @@ -0,0 +1,38 @@ +source "https://rubygems.org" + +gemspec + +gem "rake-compiler" + +gem "json", "~> 2.3" +gem "nio4r", "~> 2.0" +gem "minitest", "~> 5.11" +gem "minitest-retry" +gem "minitest-proveit" +gem "minitest-stub-const" +gem "concurrent-ruby", "~> 1.3" + +case ENV['PUMA_CI_RACK']&.strip +when 'rack2' + gem "rackup", '~> 1.0' + gem "rack" , '~> 2.2' +when 'rack1' + gem "rack" , '~> 1.6' +else + gem "rackup", '>= 2.0' + gem "rack" , '>= 2.2' +end + +gem "jruby-openssl", :platform => "jruby" + +unless ENV['PUMA_NO_RUBOCOP'] || RUBY_PLATFORM.include?('mswin') + gem "rubocop" + gem 'rubocop-performance', require: false +end + +if RUBY_VERSION == '2.4.1' + gem "stopgap_13632", "~> 1.0", :platforms => ["mri", "mingw", "x64_mingw"] +end + +gem 'm' +gem "localhost", require: false diff --git a/vendor/cache/puma-fba741b91780/History.md b/vendor/cache/puma-fba741b91780/History.md new file mode 100644 index 000000000..e887f0b7d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/History.md @@ -0,0 +1,2897 @@ +## 6.4.3 / 2024-09-19 + +* Security + * Discards any headers using underscores if the non-underscore version also exists. Without this, an attacker could overwrite values set by intermediate proxies (e.g. X-Forwarded-For). ([CVE-2024-45614](https://github.com/puma/puma/security/advisories/GHSA-9hf4-67fc-4vf4)/GHSA-9hf4-67fc-4vf4) + +## 6.4.2 / 2024-01-08 + +* Security + * Limit the size of chunk extensions. Without this limit, an attacker could cause unbounded resource (CPU, network bandwidth) consumption. ([GHSA-c2f4-cvqm-65w2](https://github.com/puma/puma/security/advisories/GHSA-c2f4-cvqm-65w2)) + +## 6.4.1 / 2024-01-03 + +* Bugfixes + * DSL#warn_if_in_single_mode - fixup when workers set via CLI ([#3256]) + * Fix `idle-timeout` not working in cluster mode ([#3235], [#3228], [#3282], [#3283]) + * Fix worker 0 timing out during phased restart ([#3225], [#2786]) + * context_builder.rb - require openssl if verify_mode != 'none' ([#3179]) + * Make puma cluster process suitable as PID 1 ([#3255]) + * Improve Puma::NullIO consistency with real IO ([#3276]) + * extconf.rb - fixup to detect openssl info in Ruby build ([#3271], [#3266]) + * MiniSSL.java - set serialVersionUID, fix RaiseException deprecation ([#3270]) + * dsl.rb - fix warn_if_in_single_mode when WEB_CONCURRENCY is set ([#3265], [#3264]) + +* Maintenance + * LOTS of test refactoring to make tests more stable and easier to write - thanks to @MSP-Greg! + * Fix bug in tests re: TestPuma::HOST4 ([#3254]) + * Dockerfile for minimal repros: use Ruby 3.2, expect bundler installed ([#3245]) + * fix define_method calls, use Symbol parameter instead of String ([#3293]) + +* Docs + * README.md - add the puma-acme plugin ([#3301]) + * Remove `--keep-file-descriptors` flag from systemd docs ([#3248]) + * Note symlink mechanism in restart documentation for hot restart ([#3298]) + +## 6.4.0 / 2023-09-21 + +* Features + * on_thread_exit hook ([#2920]) + * on_thread_start_hook ([#3195]) + * Shutdown on idle ([#3209], [#2580]) + * New error message when control server port taken ([#3204]) + +* Refactor + * Remove `Forwardable` dependency ([#3191], #3190) + * Update URLMap Regexp usage for Ruby v3.3 ([#3165]) + +* Bugfixes + * Bring the cert_pem: parameter into parity with the cert: parameter to ssl_bind. ([#3174]) + * Fix using control server with IPv6 host ([#3181]) + * control_cli.rb - add require_relative 'log_writer' ([#3187]) + * Fix cases where fallback Rack response wasn't sent to the client ([#3094]) + +## 6.3.1 / 2023-08-18 + +* Security + * Address HTTP request smuggling vulnerabilities with zero-length Content Length header and trailer fields ([GHSA-68xg-gqqm-vgj8](https://github.com/puma/puma/security/advisories/GHSA-68xg-gqqm-vgj8)) + +## 6.3.0 / 2023-05-31 + +* Features + * Add dsl method `supported_http_methods` ([#3106], [#3014]) + * Puma error responses no longer have any fingerprints to indicate Puma ([#3161], [#3037]) + * Support decryption of SSL key ([#3133], [#3132]) + +* Bugfixes + * Don't send 103 early hints response when only invalid headers are used ([#3163]) + * Handle malformed request path ([#3155], [#3148]) + * Misc lib file fixes - trapping additional errors, CI helper ([#3129]) + * Fixup req form data file upload with "r\n" line endings ([#3137]) + * Restore rack 1.6 compatibility ([#3156]) + +* Refactor + * const.rb - Update Puma::HTTP_STATUS_CODES ([#3162]) + * Clarify Reactor#initialize ([#3151]) + +## 6.2.2 / 2023-04-17 + +* Bugfixes + * Fix Rack-related NameError by adding :: operator ([#3118], [#3117]) + +## 6.2.1 / 2023-03-31 + +* Bugfixes + * Fix java 8 compatibility ([#3109], [#3108]) + * Always write io_buffer when in "enum bodies" branch. ([#3113], [#3112]) + * Fix warn_if_in_single_mode incorrect message ([#3111]) + +## 6.2.0 / 2023-03-29 + +* Features + * Ability to supply a custom logger ([#2770], [#2511]) + * Warn when clustered-only hooks are defined in single mode ([#3089]) + * Adds the on_booted event ([#2709]) + +* Bugfixes + * Loggers - internal_write - catch Errno::EINVAL ([#3091]) + * commonlogger.rb - fix HIJACK time format, use constants, not strings ([#3074]) + * Fixed some edge cases regarding request hijacking ([#3072]) + +## 6.1.1 / 2023-02-28 + +* Bugfixes + * We no longer try to use the systemd plugin for JRuby ([#3079]) + * Allow ::Rack::Handler::Puma.run to work regardless of whether Rack/Rackup are loaded ([#3080]) + +## 6.1.0 / 2023-02-12 + +* Features + * WebSocket support via partial hijack ([#3058], [#3007]) + * Add built-in systemd notify support ([#3011]) + * Periodically send status to systemd ([#3006], [#2604]) + * Introduce the ability to return 413: payload too large for requests ([#3040]) + * Log loaded extensions when `PUMA_DEBUG` is set ([#3036], [#3020]) + +* Bugfixes + * Fix issue with rack 3 compatibility re: rackup ([#3061], [#3057]) + * Allow setting TCP low_latency with SSL listener ([#3065]) + +* Performance + * Reduce memory usage for large file uploads ([#3062]) + +## 6.0.2 / 2023-01-01 + +* Refactor + * Remove use of etc and time gems in Puma ([#3035], [#3033]) + * Refactor const.rb - freeze ([#3016]) + +## 6.0.1 / 2022-12-20 + +* Bugfixes + * Handle waking up a closed selector in Reactor#add ([#3005]) + * Fixup response processing, enumerable bodies ([#3004], [#3000]) + * Correctly close app body for all code paths ([#3002], [#2999]) +* Refactor + * Add IOBuffer to Client, remove from ThreadPool thread instances ([#3013]) + +## 6.0.0 / 2022-10-14 + +* Breaking Changes + * Dropping Ruby 2.2 and 2.3 support (now 2.4+) ([#2919]) + * Remote_addr functionality has changed ([#2652], [#2653]) + * No longer supporting Java 1.7 or below (JRuby 9.1 was the last release to support this) ([#2849]) + * Remove nakayoshi GC ([#2933], [#2925]) + * wait_for_less_busy_worker is now default on ([#2940]) + * Prefix all environment variables with `PUMA_` ([#2924], [#2853]) + * Removed some constants ([#2957], [#2958], [#2959], [#2960]) + * The following classes are now part of Puma's private API: `Client`, `Cluster::Worker`, `Cluster::Worker`, `HandleRequest`. ([#2988]) + * Configuration constants like `DefaultRackup` removed ([#2928]) + * Extracted `LogWriter` from `Events` ([#2798]) + * Only accept the standard 8 HTTP methods, others rejected with 501. ([#2932]) + +* Features + * Increase throughput on large (100kb+) response bodies by 3-10x ([#2896], [#2892]) + * Increase throughput on file responses ([#2923]) + * Add support for streaming bodies in Rack. ([#2740]) + * Allow OpenSSL session reuse via a 'reuse' ssl_bind method or bind string query parameter ([#2845]) + * Allow `run_hooks` to pass a hash to blocks for use later ([#2917], [#2915]) + * Allow using `preload_app!` with `fork_worker` ([#2907]) + * Support request_body_wait metric with higher precision ([#2953]) + * Allow header values to be arrays (Rack 3) ([#2936], [#2931]) + * Export Puma/Ruby versions in /stats ([#2875]) + * Allow configuring request uri max length & request path max length ([#2840]) + * Add a couple of public accessors ([#2774]) + * Log entire backtrace when worker start fails ([#2891]) + * [jruby] Enable TLSv1.3 support ([#2886]) + * [jruby] support setting TLS protocols + rename ssl_cipher_list ([#2899]) + * [jruby] Support a truststore option ([#2849], [#2904], [#2884]) + +* Bugfixes + * Load the configuration before passing it to the binder ([#2897]) + * Do not raise error raised on HTTP methods we don't recognize or support, like CONNECT ([#2932], [#1441]) + * Fixed a memory leak when creating a new SSL listener ([#2956]) + +* Refactor + * log_writer.rb - add internal_write method ([#2888]) + * Extract prune_bundler code into it's own class. ([#2797]) + * Refactor Launcher#run to increase readability (no logic change) ([#2795]) + * Ruby 3.2 will have native IO#wait_* methods, don't require io/wait ([#2903]) + * Various internal API refactorings ([#2942], [#2921], [#2922], [#2955]) + +## 5.6.9 / 2024-09-19 + +* Security + * Discards any headers using underscores if the non-underscore version also exists. Without this, an attacker could overwrite values set by intermediate proxies (e.g. X-Forwarded-For). ([CVE-2024-45614](https://github.com/puma/puma/security/advisories/GHSA-9hf4-67fc-4vf4)/GHSA-9hf4-67fc-4vf4) +* JRuby + * Must use at least Java >= 9 to compile. You can no longer build from source on Java 8. + + +## 5.6.8 / 2024-01-08 + +* Security + * Limit the size of chunk extensions. Without this limit, an attacker could cause unbounded resource (CPU, network bandwidth) consumption. ([GHSA-c2f4-cvqm-65w2](https://github.com/puma/puma/security/advisories/GHSA-c2f4-cvqm-65w2)) + +## 5.6.7 / 2023-08-18 + +* Security + * Address HTTP request smuggling vulnerabilities with zero-length Content Length header and trailer fields ([GHSA-68xg-gqqm-vgj8](https://github.com/puma/puma/security/advisories/GHSA-68xg-gqqm-vgj8)) + +## 5.6.6 / 2023-06-21 + +* Bugfix + * Prevent loading with rack 3 ([#3166]) + +## 5.6.5 / 2022-08-23 + +* Feature + * Puma::ControlCLI - allow refork command to be sent as a request ([#2868], [#2866]) + +* Bugfixes + * NullIO#closed should return false ([#2883]) + * [jruby] Fix TLS verification hang ([#2890], [#2729]) + * extconf.rb - don't use pkg_config('openssl') if '--with-openssl-dir' is used ([#2885], [#2839]) + * MiniSSL - detect SSL_CTX_set_dh_auto ([#2864], [#2863]) + * Fix rack.after_reply exceptions breaking connections ([#2861], [#2856]) + * Escape SSL cert and filenames ([#2855]) + * Fail hard if SSL certs or keys are invalid ([#2848]) + * Fail hard if SSL certs or keys cannot be read by user ([#2847]) + * Fix build with Opaque DH in LibreSSL 3.5. ([#2838]) + * Pre-existing socket file removed when TERM is issued after USR2 (if puma is running in cluster mode) ([#2817]) + * Fix Puma::StateFile#load incompatibility ([#2810]) + +## 5.6.4 / 2022-03-30 + +* Security + * Close several HTTP Request Smuggling exploits (CVE-2022-24790) + +## 5.6.2 / 2022-02-11 + +* Bugfix/Security + * Response body will always be `close`d. (GHSA-rmj8-8hhh-gv5h, related to [#2809]) + +## 5.6.1 / 2022-01-26 + +* Bugfixes + * Reverted a commit which appeared to be causing occasional blank header values ([#2809]) + +## 5.6.0 / 2022-01-25 + +* Features + * Support `localhost` integration in `ssl_bind` ([#2764], [#2708]) + * Allow backlog parameter to be set with ssl_bind DSL ([#2780]) + * Remove yaml (psych) requirement in StateFile ([#2784]) + * Allow culling of oldest workers, previously was only youngest ([#2773], [#2794]) + * Add worker_check_interval configuration option ([#2759]) + * Always send lowlevel_error response to client ([#2731], [#2341]) + * Support for cert_pem and key_pem with ssl_bind DSL ([#2728]) + +* Bugfixes + * Keep thread names under 15 characters, prevents breakage on some OSes ([#2733]) + * Fix two 'old-style-definition' compile warning ([#2807], [#2806]) + * Log environment correctly using option value ([#2799]) + * Fix warning from Ruby master (will be 3.2.0) ([#2785]) + * extconf.rb - fix openssl with old Windows builds ([#2757]) + * server.rb - rescue handling (`Errno::EBADF`) for `@notify.close` ([#2745]) + +* Refactor + * server.rb - refactor code using @options[:remote_address] ([#2742]) + * [jruby] a couple refactorings - avoid copy-ing bytes ([#2730]) + +## 5.5.2 / 2021-10-12 + +* Bugfixes + * Allow UTF-8 in HTTP header values + +## 5.5.1 / 2021-10-12 + +* Feature (added as mistake - we don't normally do this on bugfix releases, sorry!) + * Allow setting APP_ENV in preference to RACK_ENV or RAILS_ENV ([#2702]) + +* Security + * Do not allow LF as a line ending in a header (CVE-2021-41136) + +## 5.5.0 / 2021-09-19 + +* Features + * Automatic SSL certificate provisioning for localhost, via localhost gem ([#2610], [#2257]) + * add support for the PROXY protocol (v1 only) ([#2654], [#2651]) + * Add a semantic CLI option for no config file ([#2689]) + +* Bugfixes + * More elaborate exception handling - lets some dead pumas die. ([#2700], [#2699]) + * allow multiple after_worker_fork hooks ([#2690]) + * Preserve BUNDLE_APP_CONFIG on worker fork ([#2688], [#2687]) + +* Performance + * Fix performance of server-side SSL connection close. ([#2675]) + +## 5.4.0 / 2021-07-28 + +* Features + * Better/expanded names for threadpool threads ([#2657]) + * Allow pkg_config for OpenSSL ([#2648], [#1412]) + * Add `rack_url_scheme` to Puma::DSL, allows setting of `rack.url_scheme` header ([#2586], [#2569]) + +* Bugfixes + * `Binder#parse` - allow for symlinked unix path, add create_activated_fds debug ENV ([#2643], [#2638]) + * Fix deprecation warning: minissl.c - Use Random.bytes if available ([#2642]) + * Client certificates: set session id context while creating SSLContext ([#2633]) + * Fix deadlock issue in thread pool ([#2656]) + +* Refactor + * Replace `IO.select` with `IO#wait_*` when checking a single IO ([#2666]) + +## 5.3.2 / 2021-05-21 + +* Bugfixes + * Gracefully handle Rack not accepting CLI options ([#2630], [#2626]) + * Fix sigterm misbehavior ([#2629]) + * Improvements to keepalive-connection shedding ([#2628]) + +## 5.3.1 / 2021-05-11 + +* Security + * Close keepalive connections after the maximum number of fast inlined requests (CVE-2021-29509) ([#2625]) + +## 5.3.0 / 2021-05-07 + +* Features + * Add support for Linux's abstract sockets ([#2564], [#2526]) + * Add debug to worker timeout and startup ([#2559], [#2528]) + * Print warning when running one-worker cluster ([#2565], [#2534]) + * Don't close systemd activated socket on pumactl restart ([#2563], [#2504]) + +* Bugfixes + * systemd - fix event firing ([#2591], [#2572]) + * Immediately unlink temporary files ([#2613]) + * Improve parsing of HTTP_HOST header ([#2605], [#2584]) + * Handle fatal error that has no backtrace ([#2607], [#2552]) + * Fix timing out requests too early ([#2606], [#2574]) + * Handle segfault in Ruby 2.6.6 on thread-locals ([#2567], [#2566]) + * Server#closed_socket? - parameter may be a MiniSSL::Socket ([#2596]) + * Define UNPACK_TCP_STATE_FROM_TCP_INFO in the right place ([#2588], [#2556]) + * request.rb - fix chunked assembly for ascii incompatible encodings, add test ([#2585], [#2583]) + +* Performance + * Reset peerip only if remote_addr_header is set ([#2609]) + * Reduce puma_parser struct size ([#2590]) + +* Refactor + * Refactor drain on shutdown ([#2600]) + * Micro optimisations in `wait_for_less_busy_worker` feature ([#2579]) + * Lots of test fixes + +## 5.2.2 / 2021-02-22 + +* Bugfixes + * Add `#flush` and `#sync` methods to `Puma::NullIO` ([#2553]) + * Restore `sync=true` on `STDOUT` and `STDERR` streams ([#2557]) + +## 5.2.1 / 2021-02-05 + +* Bugfixes + * Fix TCP cork/uncork operations to work with ssl clients ([#2550]) + * Require rack/common_logger explicitly if :verbose is true ([#2547]) + * MiniSSL::Socket#write - use data.byteslice(wrote..-1) ([#2543]) + * Set `@env[CONTENT_LENGTH]` value as string. ([#2549]) + +## 5.2.0 / 2021-01-27 + +* Features + * 10x latency improvement for MRI on ssl connections by reducing overhead ([#2519]) + * Add option to specify the desired IO selector backend for libev ([#2522]) + * Add ability to set OpenSSL verification flags (MRI only) ([#2490]) + * Uses `flush` after writing messages to avoid mutating $stdout and $stderr using `sync=true` ([#2486]) + +* Bugfixes + * MiniSSL - Update dhparam to 2048 bit for use with SSL_CTX_set_tmp_dh ([#2535]) + * Change 'Goodbye!' message to be output after listeners are closed ([#2529]) + * Fix ssl bind logging with 0.0.0.0 and localhost ([#2533]) + * Fix compiler warnings, but skipped warnings related to ragel state machine generated code ([#1953]) + * Fix phased restart errors related to nio4r gem when using the Puma control server ([#2516]) + * Add `#string` method to `Puma::NullIO` ([#2520]) + * Fix binding via Rack handler to IPv6 addresses ([#2521]) + +* Refactor + * Refactor MiniSSL::Context on MRI, fix MiniSSL::Socket#write ([#2519]) + * Remove `Server#read_body` ([#2531]) + * Fail build if compiling extensions raises warnings on GH Actions, configurable via `MAKE_WARNINGS_INTO_ERRORS` ([#1953]) + +## 5.1.1 / 2020-12-10 + +* Bugfixes + * Fix over eager matching against banned header names ([#2510]) + +## 5.1.0 / 2020-11-30 + +* Features + * Phased restart availability is now always logged, even if it is not available. + * Prints the loaded configuration if the environment variable `PUMA_LOG_CONFIG` is present ([#2472]) + * Integrate with systemd's watchdog and notification features ([#2438]) + * Adds max_fast_inline as a configuration option for the Server object ([#2406]) + * You can now fork workers from worker 0 using SIGURG w/o fork_worker enabled [#2449] + * Add option to bind to systemd activated sockets ([#2362]) + * Add compile option to change the `QUERY_STRING` max length ([#2485]) + +* Bugfixes + * Fix JRuby handling in Puma::DSL#ssl_bind ([#2489]) + * control_cli.rb - all normal output should be to @stdout ([#2487]) + * Catch 'Error in reactor loop escaped: mode not supported for this object: r' ([#2477]) + * Ignore Rails' reaper thread (and any thread marked forksafe) for warning ([#2475]) + * Ignore illegal (by Rack spec) response header ([#2439]) + * Close idle connections immediately on shutdown ([#2460]) + * Fix some instances of phased restart errors related to the `json` gem ([#2473]) + * Remove use of `json` gem to fix phased restart errors ([#2479]) + * Fix grouping regexp of ILLEGAL_HEADER_KEY_REGEX ([#2495]) + +## 5.0.4 / 2020-10-27 + +* Bugfixes + * Pass preloaded application into new workers if available when using `preload_app` ([#2461], [#2454]) + +## 5.0.3 / 2020-10-26 + +* Bugfixes + * Add Client#io_ok?, check before Reactor#register ([#2432]) + * Fix hang on shutdown in refork ([#2442]) + * Fix `Bundler::GemNotFound` errors for `nio4r` gem during phased restarts ([#2427], [#2018]) + * Server run thread safety fix ([#2435]) + * Fire `on_booted` after server starts ([#2431], [#2212]) + * Cleanup daemonization in rc.d script ([#2409]) + +* Refactor + * Remove accept_nonblock.rb, add test_integration_ssl.rb ([#2448]) + * Refactor status.rb - dry it up a bit ([#2450]) + * Extract req/resp methods to new request.rb from server.rb ([#2419]) + * Refactor Reactor and Client request buffering ([#2279]) + * client.rb - remove JRuby specific 'finish' code ([#2412]) + * Consolidate fast_write calls in Server, extract early_hints assembly ([#2405]) + * Remove upstart from docs ([#2408]) + * Extract worker process into separate class ([#2374]) + * Consolidate option handling in Server, Server small refactors, doc changes ([#2389]) + +## 5.0.2 / 2020-09-28 + +* Bugfixes + * Reverted API changes to Server. + +## 5.0.1 / 2020-09-28 + +* Bugfixes + * Fix LoadError in CentOS 8 ([#2381]) + * Better error handling during force shutdown ([#2271]) + * Prevent connections from entering Reactor after shutdown begins ([#2377]) + * Fix error backtrace debug logging && Do not log request dump if it is not parsed ([#2376]) + * Split TCP_CORK and TCP_INFO ([#2372]) + * Do not log EOFError when a client connection is closed without write ([#2384]) + +* Refactor + * Change Events#ssl_error signature from (error, peeraddr, peercert) to (error, ssl_socket) ([#2375]) + * Consolidate option handling in Server, Server small refactors, doc chang ([#2373]) + +## 5.0.0 / 2020-09-17 + +* Features + * Allow compiling without OpenSSL and dynamically load files needed for SSL, add 'no ssl' CI ([#2305]) + * EXPERIMENTAL: Add `fork_worker` option and `refork` command for reduced memory usage by forking from a worker process instead of the master process. ([#2099]) + * EXPERIMENTAL: Added `wait_for_less_busy_worker` config. This may reduce latency on MRI through inserting a small delay before re-listening on the socket if worker is busy ([#2079]). + * EXPERIMENTAL: Added `nakayoshi_fork` option. Reduce memory usage in preloaded cluster-mode apps by GCing before fork and compacting, where available. ([#2093], [#2256]) + * Added pumactl `thread-backtraces` command to print thread backtraces ([#2054]) + * Added incrementing `requests_count` to `Puma.stats`. ([#2106]) + * Increased maximum URI path length from 2048 to 8192 bytes ([#2167], [#2344]) + * `lowlevel_error_handler` is now called during a forced threadpool shutdown, and if a callable with 3 arguments is set, we now also pass the status code ([#2203]) + * Faster phased restart and worker timeout ([#2220]) + * Added `state_permission` to config DSL to set state file permissions ([#2238]) + * Added `Puma.stats_hash`, which returns a stats in Hash instead of a JSON string ([#2086], [#2253]) + * `rack.multithread` and `rack.multiprocess` now dynamically resolved by `max_thread` and `workers` respectively ([#2288]) + +* Deprecations, Removals and Breaking API Changes + * `--control` has been removed. Use `--control-url` ([#1487]) + * `worker_directory` has been removed. Use `directory`. + * min_threads now set by environment variables PUMA_MIN_THREADS and MIN_THREADS. ([#2143]) + * max_threads now set by environment variables PUMA_MAX_THREADS and MAX_THREADS. ([#2143]) + * max_threads default to 5 in MRI or 16 for all other interpreters. ([#2143]) + * `preload_app!` is on by default if number of workers > 1 and set via `WEB_CONCURRENCY` ([#2143]) + * Puma::Plugin.workers_supported? has been removed. Use Puma.forkable? instead. ([#2143]) + * `tcp_mode` has been removed without replacement. ([#2169]) + * Daemonization has been removed without replacement. ([#2170]) + * Changed #connected_port to #connected_ports ([#2076]) + * Configuration: `environment` is read from `RAILS_ENV`, if `RACK_ENV` can't be found ([#2022]) + * Log binding on http:// for TCP bindings to make it clickable ([#2300]) + +* Bugfixes + * Fix JSON loading issues on phased-restarts ([#2269]) + * Improve shutdown reliability ([#2312], [#2338]) + * Close client http connections made to an ssl server with TLSv1.3 ([#2116]) + * Do not set user_config to quiet by default to allow for file config ([#2074]) + * Always close SSL connection in Puma::ControlCLI ([#2211]) + * Windows update extconf.rb for use with ssp and varied Ruby/MSYS2 combinations ([#2069]) + * Ensure control server Unix socket is closed on shutdown ([#2112]) + * Preserve `BUNDLE_GEMFILE` env var when using `prune_bundler` ([#1893]) + * Send 408 request timeout even when queue requests is disabled ([#2119]) + * Rescue IO::WaitReadable instead of EAGAIN for blocking read ([#2121]) + * Ensure `BUNDLE_GEMFILE` is unspecified in workers if unspecified in master when using `prune_bundler` ([#2154]) + * Rescue and log exceptions in hooks defined by users (on_worker_boot, after_worker_fork etc) ([#1551]) + * Read directly from the socket in #read_and_drop to avoid raising further SSL errors ([#2198]) + * Set `Connection: closed` header when queue requests is disabled ([#2216]) + * Pass queued requests to thread pool on server shutdown ([#2122]) + * Fixed a few minor concurrency bugs in ThreadPool that may have affected non-GVL Rubies ([#2220]) + * Fix `out_of_band` hook never executed if the number of worker threads is > 1 ([#2177]) + * Fix ThreadPool#shutdown timeout accuracy ([#2221]) + * Fix `UserFileDefaultOptions#fetch` to properly use `default` ([#2233]) + * Improvements to `out_of_band` hook ([#2234]) + * Prefer the rackup file specified by the CLI ([#2225]) + * Fix for spawning subprocesses with fork_worker option ([#2267]) + * Set `CONTENT_LENGTH` for chunked requests ([#2287]) + * JRuby - Add Puma::MiniSSL::Engine#init? and #teardown methods, run all SSL tests ([#2317]) + * Improve shutdown reliability ([#2312]) + * Resolve issue with threadpool waiting counter decrement when thread is killed + * Constrain rake-compiler version to 0.9.4 to fix `ClassNotFound` exception when using MiniSSL with Java8. + * Fix recursive `prune_bundler` ([#2319]). + * Ensure that TCP_CORK is usable + * Fix corner case when request body is chunked ([#2326]) + * Fix filehandle leak in MiniSSL ([#2299]) + +* Refactor + * Remove unused loader argument from Plugin initializer ([#2095]) + * Simplify `Configuration.random_token` and remove insecure fallback ([#2102]) + * Simplify `Runner#start_control` URL parsing ([#2111]) + * Removed the IOBuffer extension and replaced with Ruby ([#1980]) + * Update `Rack::Handler::Puma.run` to use `**options` ([#2189]) + * ThreadPool concurrency refactoring ([#2220]) + * JSON parse cluster worker stats instead of regex ([#2124]) + * Support parallel tests in verbose progress reporting ([#2223]) + * Refactor error handling in server accept loop ([#2239]) + +## 4.3.12 / 2022-03-30 + +* Security + * Close several HTTP Request Smuggling exploits (CVE-2022-24790) + +## 4.3.11 / 2022-02-11 + +* Security + * Always close the response body (GHSA-rmj8-8hhh-gv5h) + +## 4.3.10 / 2021-10-12 + +* Bugfixes + * Allow UTF-8 in HTTP header values + +## 4.3.9 / 2021-10-12 + +* Security + * Do not allow LF as a line ending in a header (CVE-2021-41136) + +## 4.3.8 / 2021-05-11 + +* Security + * Close keepalive connections after the maximum number of fast inlined requests (CVE-2021-29509) ([#2625]) + +## 4.3.7 / 2020-11-30 + +* Bugfixes + * Backport set CONTENT_LENGTH for chunked requests (Originally: [#2287], backport: [#2496]) + +## 4.3.6 / 2020-09-05 + +* Bugfixes + * Explicitly include ctype.h to fix compilation warning and build error on macOS with Xcode 12 ([#2304]) + * Don't require json at boot ([#2269]) + +## 4.3.4/4.3.5 and 3.12.5/3.12.6 / 2020-05-22 + +Each patchlevel release contains a separate security fix. We recommend simply upgrading to 4.3.5/3.12.6. + +* Security + * Fix: Fixed two separate HTTP smuggling vulnerabilities that used the Transfer-Encoding header. CVE-2020-11076 and CVE-2020-11077. + +## 4.3.3 and 3.12.4 / 2020-02-28 + +* Bugfixes + * Fix: Fixes a problem where we weren't splitting headers correctly on newlines ([#2132]) +* Security + * Fix: Prevent HTTP Response splitting via CR in early hints. CVE-2020-5249. + +## 4.3.2 and 3.12.3 / 2020-02-27 (YANKED) + +* Security + * Fix: Prevent HTTP Response splitting via CR/LF in header values. CVE-2020-5247. + +## 4.3.1 and 3.12.2 / 2019-12-05 + +* Security + * Fix: a poorly-behaved client could use keepalive requests to monopolize Puma's reactor and create a denial of service attack. CVE-2019-16770. + +## 4.3.0 / 2019-11-07 + +* Features + * Strip whitespace at end of HTTP headers ([#2010]) + * Optimize HTTP parser for JRuby ([#2012]) + * Add SSL support for the control app and cli ([#2046], [#2052]) + +* Bugfixes + * Fix Errno::EINVAL when SSL is enabled and browser rejects cert ([#1564]) + * Fix pumactl defaulting puma to development if an environment was not specified ([#2035]) + * Fix closing file stream when reading pid from pidfile ([#2048]) + * Fix a typo in configuration option `--extra_runtime_dependencies` ([#2050]) + +## 4.2.1 / 2019-10-07 + +* 3 bugfixes + * Fix socket activation of systemd (pre-existing) unix binder files ([#1842], [#1988]) + * Deal with multiple calls to bind correctly ([#1986], [#1994], [#2006]) + * Accepts symbols for `verify_mode` ([#1222]) + +## 4.2.0 / 2019-09-23 + +* 6 features + * Pumactl has a new -e environment option and reads `config/puma/.rb` config files ([#1885]) + * Semicolons are now allowed in URL paths (MRI only), useful for Angular or Redmine ([#1934]) + * Allow extra dependencies to be defined when using prune_bundler ([#1105]) + * Puma now reports the correct port when binding to port 0, also reports other listeners when binding to localhost ([#1786]) + * Sending SIGINFO to any Puma worker now prints currently active threads and their backtraces ([#1320]) + * Puma threads all now have their name set on Ruby 2.3+ ([#1968]) +* 4 bugfixes + * Fix some misbehavior with phased restart and externally SIGTERMed workers ([#1908], [#1952]) + * Fix socket closing on error ([#1941]) + * Removed unnecessary SIGINT trap for JRuby that caused some race conditions ([#1961]) + * Fix socket files being left around after process stopped ([#1970]) +* Absolutely thousands of lines of test improvements and fixes thanks to @MSP-Greg + +## 4.1.1 / 2019-09-05 + +* 3 bugfixes + * Revert our attempt to not dup STDOUT/STDERR ([#1946]) + * Fix socket close on error ([#1941]) + * Fix workers not shutting down correctly ([#1908]) + +## 4.1.0 / 2019-08-08 + +* 4 features + * Add REQUEST_PATH on parse error message ([#1831]) + * You can now easily add custom log formatters with the `log_formatter` config option ([#1816]) + * Puma.stats now provides process start times ([#1844]) + * Add support for disabling TLSv1.1 ([#1836]) + +* 7 bugfixes + * Fix issue where Puma was creating zombie process entries ([#1887]) + * Fix bugs with line-endings and chunked encoding ([#1812]) + * RACK_URL_SCHEME is now set correctly in all conditions ([#1491]) + * We no longer mutate global STDOUT/STDERR, particularly the sync setting ([#1837]) + * SSL read_nonblock no longer blocks ([#1857]) + * Swallow connection errors when sending early hints ([#1822]) + * Backtrace no longer dumped when invalid pumactl commands are run ([#1863]) + +* 5 other + * Avoid casting worker_timeout twice ([#1838]) + * Removed a call to private that wasn't doing anything ([#1882]) + * README, Rakefile, docs and test cleanups ([#1848], [#1847], [#1846], [#1853], #1859, [#1850], [#1866], [#1870], [#1872], [#1833], [#1888]) + * Puma.io has proper documentation now (https://puma.io/puma/) + * Added the Contributor Covenant CoC + +* 1 known issue + * Some users are still experiencing issues surrounding socket activation and Unix sockets ([#1842]) + +## 4.0.1 / 2019-07-11 + +* 2 bugfixes + * Fix socket removed after reload - should fix problems with systemd socket activation. ([#1829]) + * Add extconf tests for DTLS_method & TLS_server_method, use in minissl.rb. Should fix "undefined symbol: DTLS_method" when compiling against old OpenSSL versions. ([#1832]) +* 1 other + * Removed unnecessary RUBY_VERSION checks. ([#1827]) + +## 4.0.0 / 2019-06-25 + +* 9 features + * Add support for disabling TLSv1.0 ([#1562]) + * Request body read time metric ([#1569]) + * Add out_of_band hook ([#1648]) + * Re-implement (native) IOBuffer for JRuby ([#1691]) + * Min worker timeout ([#1716]) + * Add option to suppress SignalException on SIGTERM ([#1690]) + * Allow mutual TLS CA to be set using `ssl_bind` DSL ([#1689]) + * Reactor now uses nio4r instead of `select` ([#1728]) + * Add status to pumactl with pidfile ([#1824]) + +* 10 bugfixes + * Do not accept new requests on shutdown ([#1685], [#1808]) + * Fix 3 corner cases when request body is chunked ([#1508]) + * Change pid existence check's condition branches ([#1650]) + * Don't call .stop on a server that doesn't exist ([#1655]) + * Implemented NID_X9_62_prime256v1 (P-256) curve over P-521 ([#1671]) + * Fix @notify.close can't modify frozen IOError (RuntimeError) ([#1583]) + * Fix Java 8 support ([#1773]) + * Fix error `uninitialized constant Puma::Cluster` ([#1731]) + * Fix `not_token` being able to be set to true ([#1803]) + * Fix "Hang on SIGTERM with ruby 2.6 in clustered mode" (PR [#1741], [#1674], [#1720], [#1730], [#1755]) + +## 3.12.1 / 2019-03-19 + +* 1 features + * Internal strings are frozen ([#1649]) +* 3 bugfixes + * Fix chunked ending check ([#1607]) + * Rack handler should use provided default host ([#1700]) + * Better support for detecting runtimes that support `fork` ([#1630]) + +## 3.12.0 / 2018-07-13 + +* 5 features: + * You can now specify which SSL ciphers the server should support, default is unchanged ([#1478]) + * The setting for Puma's `max_threads` is now in `Puma.stats` ([#1604]) + * Pool capacity is now in `Puma.stats` ([#1579]) + * Installs restricted to Ruby 2.2+ ([#1506]) + * `--control` is now deprecated in favor of `--control-url` ([#1487]) + +* 2 bugfixes: + * Workers will no longer accept more web requests than they have capacity to process. This prevents an issue where one worker would accept lots of requests while starving other workers ([#1563]) + * In a test env puma now emits the stack on an exception ([#1557]) + +## 3.11.4 / 2018-04-12 + +* 2 features: + * Manage puma as a service using rc.d ([#1529]) + * Server stats are now available from a top level method ([#1532]) +* 5 bugfixes: + * Fix parsing CLI options ([#1482]) + * Order of stderr and stdout is made before redirecting to a log file ([#1511]) + * Init.d fix of `ps -p` to check if pid exists ([#1545]) + * Early hints bugfix ([#1550]) + * Purge interrupt queue when closing socket fails ([#1553]) + +## 3.11.3 / 2018-03-05 + +* 3 bugfixes: + * Add closed? to MiniSSL::Socket for use in reactor ([#1510]) + * Handle EOFError at the toplevel of the server threads ([#1524]) ([#1507]) + * Deal with zero sized bodies when using SSL ([#1483]) + +## 3.11.2 / 2018-01-19 + +* 1 bugfix: + * Deal with read\_nonblock returning nil early + +## 3.11.1 / 2018-01-18 + +* 1 bugfix: + * Handle read\_nonblock returning nil when the socket close ([#1502]) + +## 3.11.0 / 2017-11-20 + +* 2 features: + * HTTP 103 Early Hints ([#1403]) + * 421/451 status codes now have correct status messages attached ([#1435]) + +* 9 bugfixes: + * Environment config files (/config/puma/.rb) load correctly ([#1340]) + * Specify windows dependencies correctly ([#1434], [#1436]) + * puma/events required in test helper ([#1418]) + * Correct control CLI's option help text ([#1416]) + * Remove a warning for unused variable in mini_ssl ([#1409]) + * Correct pumactl docs argument ordering ([#1427]) + * Fix an uninitialized variable warning in server.rb ([#1430]) + * Fix docs typo/error in Launcher init ([#1429]) + * Deal with leading spaces in RUBYOPT ([#1455]) + +* 2 other: + * Add docs about internals ([#1425], [#1452]) + * Tons of test fixes from @MSP-Greg ([#1439], [#1442], [#1464]) + +## 3.10.0 / 2017-08-17 + +* 3 features: + * The status server has a new /gc and /gc-status command. ([#1384]) + * The persistent and first data timeouts are now configurable ([#1111]) + * Implemented RFC 2324 ([#1392]) + +* 12 bugfixes: + * Not really a Puma bug, but @NickolasVashchenko created a gem to workaround a Ruby bug that some users of Puma may be experiencing. See README for more. ([#1347]) + * Fix hangups with SSL and persistent connections. ([#1334]) + * Fix Rails double-binding to a port ([#1383]) + * Fix incorrect thread names ([#1368]) + * Fix issues with /etc/hosts and JRuby where localhost addresses were not correct. ([#1318]) + * Fix compatibility with RUBYOPT="--enable-frozen-string-literal" ([#1376]) + * Fixed some compiler warnings ([#1388]) + * We actually run the integration tests in CI now ([#1390]) + * No longer shipping unnecessary directories in the gemfile ([#1391]) + * If RUBYOPT is nil, we no longer blow up on restart. ([#1385]) + * Correct response to SIGINT ([#1377]) + * Proper exit code returned when we receive a TERM signal ([#1337]) + +* 3 refactors: + * Various test improvements from @grosser + * Rubocop ([#1325]) + * Hoe has been removed ([#1395]) + +* 1 known issue: + * Socket activation doesn't work in JRuby. Their fault, not ours. ([#1367]) + +## 3.9.1 / 2017-06-03 + +* 2 bugfixes: + * Fixed compatibility with older Bundler versions ([#1314]) + * Some internal test/development cleanup ([#1311], [#1313]) + +## 3.9.0 / 2017-06-01 + +* 2 features: + * The ENV is now reset to its original values when Puma restarts via USR1/USR2 ([#1260]) (MRI only, no JRuby support) + * Puma will no longer accept more clients than the maximum number of threads. ([#1278]) + +* 9 bugfixes: + * Reduce information leakage by preventing HTTP parse errors from writing environment hashes to STDERR ([#1306]) + * Fix SSL/WebSocket compatibility ([#1274]) + * HTTP headers with empty values are no longer omitted from responses. ([#1261]) + * Fix a Rack env key which was set to nil. ([#1259]) + * peercert has been implemented for JRuby ([#1248]) + * Fix port settings when using rails s ([#1277], [#1290]) + * Fix compat w/LibreSSL ([#1285]) + * Fix restarting Puma w/symlinks and a new Gemfile ([#1282]) + * Replace Dir.exists? with Dir.exist? ([#1294]) + +* 1 known issue: + * A bug in MRI 2.2+ can result in IOError: stream closed. See [#1206]. This issue has existed since at least Puma 3.6, and probably further back. + +* 1 refactor: + * Lots of test fixups from @grosser. + +## 3.8.2 / 2017-03-14 + +* 1 bugfix: + * Deal with getsockopt with TCP\_INFO failing for sockets that say they're TCP but aren't really. ([#1241]) + +## 3.8.1 / 2017-03-10 + +* 1 bugfix: + * Remove method call to method that no longer exists ([#1239]) + +## 3.8.0 / 2017-03-09 + +* 2 bugfixes: + * Port from rack handler does not take precedence over config file in Rails 5.1.0.beta2+ and 5.0.1.rc3+ ([#1234]) + * The `tmp/restart.txt` plugin no longer restricts the user from running more than one server from the same folder at a time ([#1226]) + +* 1 feature: + * Closed clients are aborted to save capacity ([#1227]) + +* 1 refactor: + * Bundler is no longer a dependency from tests ([#1213]) + +## 3.7.1 / 2017-02-20 + +* 2 bugfixes: + * Fix typo which blew up MiniSSL ([#1182]) + * Stop overriding command-line options with the config file ([#1203]) + +## 3.7.0 / 2017-01-04 + +* 6 minor features: + * Allow rack handler to accept ssl host. ([#1129]) + * Refactor TTOU processing. TTOU now handles multiple signals at once. ([#1165]) + * Pickup any remaining chunk data as the next request. + * Prevent short term thread churn - increased auto trim default to 30 seconds. + * Raise error when `stdout` or `stderr` is not writable. ([#1175]) + * Add Rack 2.0 support to gemspec. ([#1068]) + +* 5 refactors: + * Compare host and server name only once per call. ([#1091]) + * Minor refactor on Thread pool ([#1088]) + * Removed a ton of unused constants, variables and files. + * Use MRI macros when allocating heap memory + * Use hooks for on\_booted event. ([#1160]) + +* 14 bugfixes: + * Add eof? method to NullIO? ([#1169]) + * Fix Puma startup in provided init.d script ([#1061]) + * Fix default SSL mode back to none. ([#1036]) + * Fixed the issue of @listeners getting nil io ([#1120]) + * Make `get_dh1024` compatible with OpenSSL v1.1.0 ([#1178]) + * More gracefully deal with SSL sessions. Fixes [#1002] + * Move puma.rb to just autoloads. Fixes [#1063] + * MiniSSL: Provide write as <<. Fixes [#1089] + * Prune bundler should inherit fds ([#1114]) + * Replace use of Process.getpgid which does not behave as intended on all platforms ([#1110]) + * Transfer encoding header should be downcased before comparison ([#1135]) + * Use same write log logic for hijacked requests. ([#1081]) + * Fix `uninitialized constant Puma::StateFile` ([#1138]) + * Fix access priorities of each level in LeveledOptions ([#1118]) + +* 3 others: + + * Lots of tests added/fixed/improved. Switched to Minitest from Test::Unit. Big thanks to @frodsan. + * Lots of documentation added/improved. + * Add license indicators to the HTTP extension. ([#1075]) + +## 3.6.2 / 2016-11-22 + +* 1 bug fix: + + * Revert [#1118]/Fix access priorities of each level in LeveledOptions. This + had an unintentional side effect of changing the importance of command line + options, such as -p. + +## 3.6.1 / 2016-11-21 + +* 8 bug fixes: + + * Fix Puma start in init.d script. + * Fix default SSL mode back to none. Fixes [#1036] + * Fixed the issue of @listeners getting nil io, fix rails restart ([#1120]) + * More gracefully deal with SSL sessions. Fixes [#1002] + * Prevent short term thread churn. + * Provide write as <<. Fixes [#1089] + * Fix access priorities of each level in LeveledOptions - fixes TTIN. + * Stub description files updated for init.d. + +* 2 new project committers: + + * Nate Berkopec (@nateberkopec) + * Richard Schneeman (@schneems) + +## 3.6.0 / 2016-07-24 + +* 12 bug fixes: + * Add ability to detect a shutting down server. Fixes [#932] + * Add support for Expect: 100-continue. Fixes [#519] + * Check SSLContext better. Fixes [#828] + * Clarify behavior of '-t num'. Fixes [#984] + * Don't default to VERIFY_PEER. Fixes [#1028] + * Don't use ENV['PWD'] on windows. Fixes [#1023] + * Enlarge the scope of catching app exceptions. Fixes [#1027] + * Execute background hooks after daemonizing. Fixes [#925] + * Handle HUP as a stop unless there is IO redirection. Fixes [#911] + * Implement chunked request handling. Fixes [#620] + * Just rescue exception to return a 500. Fixes [#1027] + * Redirect IO in the jruby daemon mode. Fixes [#778] + +## 3.5.2 / 2016-07-20 + +* 1 bug fix: + * Don't let persistent_timeout be nil + +* 1 PR merged: + * Merge pull request [#1021] from benzrf/patch-1 + +## 3.5.1 / 2016-07-20 + +* 1 bug fix: + * Be sure to only listen on host:port combos once. Fixes [#1022] + +## 3.5.0 / 2016-07-18 + +* 1 minor features: + * Allow persistent_timeout to be configured via the dsl. + +* 9 bug fixes: + * Allow a bare % in a query string. Fixes [#958] + * Explicitly listen on all localhost addresses. Fixes [#782] + * Fix `TCPLogger` log error in tcp cluster mode. + * Fix puma/puma[#968] Cannot bind SSL port due to missing verify_mode option + * Fix puma/puma[#968] Default verify_mode to peer + * Log any exceptions in ThreadPool. Fixes [#1010] + * Silence connection errors in the reactor. Fixes [#959] + * Tiny fixes in hook documentation for [#840] + * It should not log requests if we want it to be quiet + +* 5 doc fixes: + * Add How to stop Puma on Heroku using plugins to the example directory + * Provide both hot and phased restart in jungle script + * Update reference to the instances management script + * Update default number of threads + * Fix typo in example config + +* 14 PRs merged: + * Merge pull request [#1007] from willnet/patch-1 + * Merge pull request [#1014] from jeznet/patch-1 + * Merge pull request [#1015] from bf4/patch-1 + * Merge pull request [#1017] from jorihardman/configurable_persistent_timeout + * Merge pull request [#954] from jf/master + * Merge pull request [#955] from jf/add-request-info-to-standard-error-rescue + * Merge pull request [#956] from maxkwallace/master + * Merge pull request [#960] from kmayer/kmayer-plugins-heroku-restart + * Merge pull request [#969] from frankwong15/master + * Merge pull request [#970] from willnet/delete-blank-document + * Merge pull request [#974] from rocketjob/feature/name_threads + * Merge pull request [#977] from snow/master + * Merge pull request [#981] from zach-chai/patch-1 + * Merge pull request [#993] from scorix/master + +## 3.4.0 / 2016-04-07 + +* 2 minor features: + * Add ability to force threads to stop on shutdown. Fixes [#938] + * Detect and commit seppuku when fork(2) fails. Fixes [#529] + +* 3 unknowns: + * Ignore errors trying to update the backport tables. Fixes [#788] + * Invoke the lowlevel_error in more places to allow for exception tracking. Fixes [#894] + * Update the query string when an absolute URI is used. Fixes [#937] + +* 5 doc fixes: + * Add Process Monitors section to top-level README + * Better document the hooks. Fixes [#840] + * docs/system.md sample config refinements and elaborations + * Fix typos at couple of places. + * Cleanup warnings + +* 3 PRs merged: + * Merge pull request [#945] from dekellum/systemd-docs-refined + * Merge pull request [#946] from vipulnsward/rm-pid + * Merge pull request [#947] from vipulnsward/housekeeping-typos + +## 3.3.0 / 2016-04-05 + +* 2 minor features: + * Allow overriding options of Configuration object + * Rename to inherit_ssl_listener like inherit_tcp|unix + +* 2 doc fixes: + * Add docs/systemd.md (with socket activation sub-section) + * Document UNIX signals with cluster on README.md + +* 3 PRs merged: + * Merge pull request [#936] from prathamesh-sonpatki/allow-overriding-config-options + * Merge pull request [#940] from kyledrake/signalsdoc + * Merge pull request [#942] from dekellum/socket-activate-improve + +## 3.2.0 / 2016-03-20 + +* 1 deprecation removal: + * Delete capistrano.rb + +* 3 bug fixes: + * Detect gems.rb as well as Gemfile + * Simplify and fix logic for directory to use when restarting for all phases + * Speed up phased-restart start + +* 2 PRs merged: + * Merge pull request [#927] from jlecour/gemfile_variants + * Merge pull request [#931] from joneslee85/patch-10 + +## 3.1.1 / 2016-03-17 + +* 4 bug fixes: + * Disable USR1 usage on JRuby + * Fixes [#922] - Correctly define file encoding as UTF-8 + * Set a more explicit SERVER_SOFTWARE Rack variable + * Show RUBY_ENGINE_VERSION if available. Fixes [#923] + +* 3 PRs merged: + * Merge pull request [#912] from tricknotes/fix-allow-failures-in-travis-yml + * Merge pull request [#921] from swrobel/patch-1 + * Merge pull request [#924] from tbrisker/patch-1 + +## 3.1.0 / 2016-03-05 + +* 1 minor feature: + * Add 'import' directive to config file. Fixes [#916] + +* 5 bug fixes: + * Add 'fetch' to options. Fixes [#913] + * Fix jruby daemonization. Fixes [#918] + * Recreate the proper args manually. Fixes [#910] + * Require 'time' to get iso8601. Fixes [#914] + +## 3.0.2 / 2016-02-26 + +* 5 bug fixes: + + * Fix 'undefined local variable or method `pid` for #' when execute pumactl with `--pid` option. + * Fix 'undefined method `windows?` for Puma:Module' when execute pumactl. + * Harden tmp_restart against errors related to the restart file + * Make `plugin :tmp_restart` behavior correct in Windows. + * fix uninitialized constant Puma::ControlCLI::StateFile + +* 3 PRs merged: + + * Merge pull request [#901] from mitto/fix-pumactl-uninitialized-constant-statefile + * Merge pull request [#902] from corrupt952/fix_undefined_method_and_variable_when_execute_pumactl + * Merge pull request [#905] from Eric-Guo/master + +## 3.0.1 / 2016-02-25 + +* 1 bug fix: + + * Removed the experimental support for async.callback as it broke + websockets entirely. Seems no server has both hijack and async.callback + and thus faye is totally confused what to do and doesn't work. + +## 3.0.0 / 2016-02-25 + +* 2 major changes: + + * Ruby pre-2.0 is no longer supported. We'll do our best to not add + features that break those rubies but will no longer be testing + with them. + * Don't log requests by default. Fixes [#852] + +* 2 major features: + + * Plugin support! Plugins can interact with configuration as well + as provide augment server functionality! + * Experimental env['async.callback'] support + +* 4 minor features: + + * Listen to unix socket with provided backlog if any + * Improves the clustered stats to report worker stats + * Pass the env to the lowlevel_error handler. Fixes [#854] + * Treat path-like hosts as unix sockets. Fixes [#824] + +* 5 bug fixes: + + * Clean thread locals when using keepalive. Fixes [#823] + * Cleanup compiler warnings. Fixes [#815] + * Expose closed? for use by the reactor. Fixes [#835] + * Move signal handlers to separate method to prevent space leak. Fixes [#798] + * Signal not full on worker exit [#876] + +* 5 doc fixes: + + * Update README.md with various grammar fixes + * Use newest version of Minitest + * Add directory configuration docs, fix typo [ci skip] + * Remove old COPYING notice. Fixes [#849] + +* 10 merged PRs: + + * Merge pull request [#871] from deepj/travis + * Merge pull request [#874] from wallclockbuilder/master + * Merge pull request [#883] from dadah89/igor/trim_only_worker + * Merge pull request [#884] from uistudio/async-callback + * Merge pull request [#888] from mlarraz/tick_minitest + * Merge pull request [#890] from todd/directory_docs + * Merge pull request [#891] from ctaintor/improve_clustered_status + * Merge pull request [#893] from spastorino/add_missing_require + * Merge pull request [#897] from zendesk/master + * Merge pull request [#899] from kch/kch-readme-fixes + +## 2.16.0 / 2016-01-27 + +* 7 minor features: + + * Add 'set_remote_address' config option + * Allow to run puma in silent mode + * Expose cli options in DSL + * Support passing JRuby keystore info in ssl_bind DSL + * Allow umask for unix:/// style control urls + * Expose `old_worker_count` in stats url + * Support TLS client auth (verify_mode) in jruby + +* 7 bug fixes: + + * Don't persist before_fork hook in state file + * Reload bundler before pulling in rack. Fixes [#859] + * Remove NEWRELIC_DISPATCHER env variable + * Cleanup C code + * Use Timeout.timeout instead of Object.timeout + * Make phased restarts faster + * Ignore the case of certain headers, because HTTP + +* 1 doc changes: + + * Test against the latest Ruby 2.1, 2.2, 2.3, head and JRuby 9.0.4.0 on Travis + +* 12 merged PRs + * Merge pull request [#822] from kwugirl/remove_NEWRELIC_DISPATCHER + * Merge pull request [#833] from joemiller/jruby-client-tls-auth + * Merge pull request [#837] from YuriSolovyov/ssl-keystore-jruby + * Merge pull request [#839] from mezuka/master + * Merge pull request [#845] from deepj/timeout-deprecation + * Merge pull request [#846] from sriedel/strip_before_fork + * Merge pull request [#850] from deepj/travis + * Merge pull request [#853] from Jeffrey6052/patch-1 + * Merge pull request [#857] from zendesk/faster_phased_restarts + * Merge pull request [#858] from mlarraz/fix_some_warnings + * Merge pull request [#860] from zendesk/expose_old_worker_count + * Merge pull request [#861] from zendesk/allow_control_url_umask + +## 2.15.3 / 2015-11-07 + +* 1 bug fix: + + * Fix JRuby parser + +## 2.15.2 / 2015-11-06 + +* 2 bug fixes: + * ext/puma_http11: handle duplicate headers as per RFC + * Only set ctx.ca iff there is a params['ca'] to set with. + +* 2 PRs merged: + * Merge pull request [#818] from unleashed/support-duplicate-headers + * Merge pull request [#819] from VictorLowther/fix-ca-and-verify_null-exception + +## 2.15.1 / 2015-11-06 + +* 1 bug fix: + + * Allow older openssl versions + +## 2.15.0 / 2015-11-06 + +* 6 minor features: + * Allow setting ca without setting a verify mode + * Make jungle for init.d support rbenv + * Use SSL_CTX_use_certificate_chain_file for full chain + * cluster: add worker_boot_timeout option + * configuration: allow empty tags to mean no tag desired + * puma/cli: support specifying STD{OUT,ERR} redirections and append mode + +* 5 bug fixes: + * Disable SSL Compression + * Fix bug setting worker_directory when using a symlink directory + * Fix error message in DSL that was slightly inaccurate + * Pumactl: set correct process name. Fixes [#563] + * thread_pool: fix race condition when shutting down workers + +* 10 doc fixes: + * Add before_fork explanation in Readme.md + * Correct spelling in DEPLOYMENT.md + * Correct spelling in docs/nginx.md + * Fix spelling errors. + * Fix typo in deployment description + * Fix typos (it's -> its) in events.rb and server.rb + * fixing for typo mentioned in [#803] + * Spelling correction for README + * thread_pool: fix typos in comment + * More explicit docs for worker_timeout + +* 18 PRs merged: + * Merge pull request [#768] from nathansamson/patch-1 + * Merge pull request [#773] from rossta/spelling_corrections + * Merge pull request [#774] from snow/master + * Merge pull request [#781] from sunsations/fix-typo + * Merge pull request [#791] from unleashed/allow_empty_tags + * Merge pull request [#793] from robdimarco/fix-working-directory-symlink-bug + * Merge pull request [#794] from peterkeen/patch-1 + * Merge pull request [#795] from unleashed/redirects-from-cmdline + * Merge pull request [#796] from cschneid/fix_dsl_message + * Merge pull request [#799] from annafw/master + * Merge pull request [#800] from liamseanbrady/fix_typo + * Merge pull request [#801] from scottjg/ssl-chain-file + * Merge pull request [#802] from scottjg/ssl-crimes + * Merge pull request [#804] from burningTyger/patch-2 + * Merge pull request [#809] from unleashed/threadpool-fix-race-in-shutdown + * Merge pull request [#810] from vlmonk/fix-pumactl-restart-bug + * Merge pull request [#814] from schneems/schneems/worker_timeout-docs + * Merge pull request [#817] from unleashed/worker-boot-timeout + +## 2.14.0 / 2015-09-18 + +* 1 minor feature: + * Make building with SSL support optional + +* 1 bug fix: + * Use Rack::Builder if available. Fixes [#735] + +## 2.13.4 / 2015-08-16 + +* 1 bug fix: + * Use the environment possible set by the config early and from + the config file later (if set). + +## 2.13.3 / 2015-08-15 + +Seriously, I need to revamp config with tests. + +* 1 bug fix: + * Fix preserving options before cleaning for state. Fixes [#769] + +## 2.13.2 / 2015-08-15 + +The "clearly I don't have enough tests for the config" release. + +* 1 bug fix: + * Fix another place binds wasn't initialized. Fixes [#767] + +## 2.13.1 / 2015-08-15 + +* 2 bug fixes: + * Fix binds being masked in config files. Fixes [#765] + * Use options from the config file properly in pumactl. Fixes [#764] + +## 2.13.0 / 2015-08-14 + +* 1 minor feature: + * Add before_fork hooks option. + +* 3 bug fixes: + * Check for OPENSSL_NO_ECDH before using ECDH + * Eliminate logging overhead from JRuby SSL + * Prefer cli options over config file ones. Fixes [#669] + +* 1 deprecation: + * Add deprecation warning to capistrano.rb. Fixes [#673] + +* 4 PRs merged: + * Merge pull request [#668] from kcollignon/patch-1 + * Merge pull request [#754] from nathansamson/before_boot + * Merge pull request [#759] from BenV/fix-centos6-build + * Merge pull request [#761] from looker/no-log + +## 2.12.3 / 2015-08-03 + +* 8 minor bugs fixed: + * Fix Capistrano 'uninitialized constant Puma' error. + * Fix some ancient and incorrect error handling code + * Fix uninitialized constant error + * Remove toplevel rack interspection, require rack on load instead + * Skip empty parts when chunking + * Switch from inject to each in config_ru_binds iteration + * Wrap SSLv3 spec in version guard. + * ruby 1.8.7 compatibility patches + +* 4 PRs merged: + * Merge pull request [#742] from deivid-rodriguez/fix_missing_require + * Merge pull request [#743] from matthewd/skip-empty-chunks + * Merge pull request [#749] from huacnlee/fix-cap-uninitialized-puma-error + * Merge pull request [#751] from costi/compat_1_8_7 + +* 1 test fix: + * Add 1.8.7, rbx-1 (allow failures) to Travis. + +## 2.12.2 / 2015-07-17 + +* 2 bug fix: + * Pull over and use Rack::URLMap. Fixes [#741] + * Stub out peercert on JRuby for now. Fixes [#739] + +## 2.12.1 / 2015-07-16 + +* 2 bug fixes: + * Use a constant format. Fixes [#737] + * Use strerror for Windows sake. Fixes [#733] + +* 1 doc change: + * typo fix: occured -> occurred + +* 1 PR merged: + * Merge pull request [#736] from paulanunda/paulanunda/typo-fix + +## 2.12.0 / 2015-07-14 + +* 13 bug fixes: + * Add thread reaping to thread pool + * Do not automatically use chunked responses when hijacked + * Do not suppress Content-Length on partial hijack + * Don't allow any exceptions to terminate a thread + * Handle ENOTCONN client disconnects when setting REMOTE_ADDR + * Handle very early exit of cluster mode. Fixes [#722] + * Install rack when running tests on travis to use rack/lint + * Make puma -v and -h return success exit code + * Make pumactl load config/puma.rb by default + * Pass options from pumactl properly when pruning. Fixes [#694] + * Remove rack dependency. Fixes [#705] + * Remove the default Content-Type: text/plain + * Add Client Side Certificate Auth + +* 8 doc/test changes: + * Added example sourcing of environment vars + * Added tests for bind configuration on rackup file + * Fix example config text + * Update DEPLOYMENT.md + * Update Readme with example of custom error handler + * ci: Improve Travis settings + * ci: Start running tests against JRuby 9k on Travis + * ci: Convert to container infrastructure for travisci + +* 2 ops changes: + * Check for system-wide rbenv + * capistrano: Add additional env when start rails + +* 16 PRs merged: + * Merge pull request [#686] from jjb/patch-2 + * Merge pull request [#693] from rob-murray/update-example-config + * Merge pull request [#697] from spk/tests-bind-on-rackup-file + * Merge pull request [#699] from deees/fix/require_rack_builder + * Merge pull request [#701] from deepj/master + * Merge pull request [#702] from Jimdo/thread-reaping + * Merge pull request [#703] from deepj/travis + * Merge pull request [#704] from grega/master + * Merge pull request [#709] from lian/master + * Merge pull request [#711] from julik/master + * Merge pull request [#712] from yakara-ltd/pumactl-default-config + * Merge pull request [#715] from RobotJiang/master + * Merge pull request [#725] from rwz/master + * Merge pull request [#726] from strenuus/handle-client-disconnect + * Merge pull request [#729] from allaire/patch-1 + * Merge pull request [#730] from iamjarvo/container-infrastructure + +## 2.11.3 / 2015-05-18 + +* 5 bug fixes: + * Be sure to unlink tempfiles after a request. Fixes [#690] + * Coerce the key to a string before checking. (thar be symbols). Fixes [#684] + * Fix hang on bad SSL handshake + * Remove `enable_SSLv3` support from JRuby + +* 1 PR merged: + * Merge pull request [#698] from looker/hang-handshake + +## 2.11.2 / 2015-04-11 + +* 2 minor features: + * Add `on_worker_fork` hook, which allows to mimic Unicorn's behavior + * Add shutdown_debug config option + +* 4 bug fixes: + * Fix the Config constants not being available in the DSL. Fixes [#683] + * Ignore multiple port declarations + * Proper 'Connection' header handling compatible with HTTP 1.[01] protocols + * Use "Puma" instead of "puma" to reporting to New Relic + +* 1 doc fixes: + * Add Gitter badge. + +* 6 PRs merged: + * Merge pull request [#657] from schneems/schneems/puma-once-port + * Merge pull request [#658] from Tomohiro/newrelic-dispatcher-default-update + * Merge pull request [#662] from basecrm/connection-compatibility + * Merge pull request [#664] from fxposter/on-worker-fork + * Merge pull request [#667] from JuanitoFatas/doc/gemspec + * Merge pull request [#672] from chulkilee/refactor + +## 2.11.1 / 2015-02-11 + +* 2 bug fixes: + * Avoid crash in strange restart conditions + * Inject the GEM_HOME that bundler into puma-wild's env. Fixes [#653] + +* 2 PRs merged: + * Merge pull request [#644] from bpaquet/master + * Merge pull request [#646] from mkonecny/master + +## 2.11.0 / 2015-01-20 + +* 9 bug fixes: + * Add mode as an additional bind option to unix sockets. Fixes [#630] + * Advertise HTTPS properly after a hot restart + * Don't write lowlevel_error_handler to state + * Fix phased restart with stuck requests + * Handle spaces in the path properly. Fixes [#622] + * Set a default REMOTE_ADDR to avoid using peeraddr on unix sockets. Fixes [#583] + * Skip device number checking on jruby. Fixes [#586] + * Update extconf.rb to compile correctly on OS X + * redirect io right after daemonizing so startup errors are shown. Fixes [#359] + +* 6 minor features: + * Add a configuration option that prevents puma from queueing requests. + * Add reload_worker_directory + * Add the ability to pass environment variables to the init script (for Jungle). + * Add the proctitle tag to the worker. Fixes [#633] + * Infer a proctitle tag based on the directory + * Update lowlevel error message to be more meaningful. + +* 10 PRs merged: + * Merge pull request [#478] from rubencaro/master + * Merge pull request [#610] from kwilczynski/master + * Merge pull request [#611] from jasonl/better-lowlevel-message + * Merge pull request [#616] from jc00ke/master + * Merge pull request [#623] from raldred/patch-1 + * Merge pull request [#628] from rdpoor/master + * Merge pull request [#634] from deepj/master + * Merge pull request [#637] from raskhadafi/patch-1 + * Merge pull request [#639] from ebeigarts/fix-phased-restarts + * Merge pull request [#640] from codehotter/issue-612-dependent-requests-deadlock + +## 2.10.2 / 2014-11-26 + +* 1 bug fix: + * Conditionalize thread local cleaning, fixes perf degradation fix + The code to clean out all Thread locals adds pretty significant + overhead to a each request, so it has to be turned on explicitly + if a user needs it. + +## 2.10.1 / 2014-11-24 + +* 1 bug fix: + * Load the app after daemonizing because the app might start threads. + + This change means errors loading the app are now reported only in the redirected + stdout/stderr. + + If you're app has problems starting up, start it without daemon mode initially + to test. + +## 2.10.0 / 2014-11-23 + +* 3 minor features: + * Added on_worker_shutdown hook mechanism + * Allow binding to ipv6 addresses for ssl URIs + * Warn about any threads started during app preload + +* 5 bug fixes: + * Clean out a threads local data before doing work + * Disable SSLv3. Fixes [#591] + * First change the directory to use the correct Gemfile. + * Only use config.ru binds if specified. Fixes [#606] + * Strongish cipher suite with FS support for some browsers + +* 2 doc changes: + * Change umask examples to more permissive values + * fix typo in README.md + +* 9 Merged PRs: + * Merge pull request [#560] from raskhadafi/prune_bundler-bug + * Merge pull request [#566] from sheltond/master + * Merge pull request [#593] from andruby/patch-1 + * Merge pull request [#594] from hassox/thread-cleanliness + * Merge pull request [#596] from burningTyger/patch-1 + * Merge pull request [#601] from sorentwo/friendly-umask + * Merge pull request [#602] from 1334/patch-1 + * Merge pull request [#608] from Gu1/master + * Merge pull request [#538] from memiux/? + +## 2.9.2 / 2014-10-25 + +* 8 bug fixes: + * Fix puma-wild handling a restart properly. Fixes [#550] + * JRuby SSL POODLE update + * Keep deprecated features warnings + * Log the current time when Puma shuts down. + * Fix cross-platform extension library detection + * Use the correct Windows names for OpenSSL. + * Better error logging during startup + * Fixing sexist error messages + +* 6 PRs merged: + * Merge pull request [#549] from bsnape/log-shutdown-time + * Merge pull request [#553] from lowjoel/master + * Merge pull request [#568] from mariuz/patch-1 + * Merge pull request [#578] from danielbuechele/patch-1 + * Merge pull request [#581] from alexch/slightly-better-logging + * Merge pull request [#590] from looker/jruby_disable_sslv3 + +## 2.9.1 / 2014-09-05 + +* 4 bug fixes: + * Cleanup the SSL related structures properly, fixes memory leak + * Fix thread spawning edge case. + * Force a worker check after a worker boots, don't wait 5sec. Fixes [#574] + * Implement SIGHUP for logs reopening + +* 2 PRs merged: + * Merge pull request [#561] from theoldreader/sighup + * Merge pull request [#570] from havenwood/spawn-thread-edge-case + +## 2.9.0 / 2014-07-12 + +* 1 minor feature: + * Add SSL support for JRuby + +* 3 bug fixes: + * Typo BUNDLER_GEMFILE -> BUNDLE_GEMFILE + * Use fast_write because we can't trust syswrite + * pumactl - do not modify original ARGV + +* 4 doc fixes: + * BSD-3-Clause over BSD to avoid confusion + * Deploy doc: clarification of the GIL + * Fix typo in DEPLOYMENT.md + * Update README.md + +* 6 PRs merged: + * Merge pull request [#520] from misfo/patch-2 + * Merge pull request [#530] from looker/jruby-ssl + * Merge pull request [#537] from vlmonk/patch-1 + * Merge pull request [#540] from allaire/patch-1 + * Merge pull request [#544] from chulkilee/bsd-3-clause + * Merge pull request [#551] from jcxplorer/patch-1 + +## 2.8.2 / 2014-04-12 + +* 4 bug fixes: + * During upgrade, change directory in main process instead of workers. + * Close the client properly on error + * Capistrano: fallback from phased restart to start when not started + * Allow tag option in conf file + +* 4 doc fixes: + * Fix Puma daemon service README typo + * `preload_app!` instead of `preload_app` + * add preload_app and prune_bundler to example config + * allow changing of worker_timeout in config file + +* 11 PRs merged: + * Merge pull request [#487] from ckuttruff/master + * Merge pull request [#492] from ckuttruff/master + * Merge pull request [#493] from alepore/config_tag + * Merge pull request [#503] from mariuz/patch-1 + * Merge pull request [#505] from sammcj/patch-1 + * Merge pull request [#506] from FlavourSys/config_worker_timeout + * Merge pull request [#510] from momer/rescue-block-handle-servers-fix + * Merge pull request [#511] from macool/patch-1 + * Merge pull request [#514] from edogawaconan/refactor_env + * Merge pull request [#517] from misfo/patch-1 + * Merge pull request [#518] from LongMan/master + +## 2.8.1 / 2014-03-06 + +* 1 bug fixes: + * Run puma-wild with proper deps for prune_bundler + +* 2 doc changes: + * Described the configuration file finding behavior added in 2.8.0 and how to disable it. + * Start the deployment doc + +* 6 PRs merged: + * Merge pull request [#471] from arthurnn/fix_test + * Merge pull request [#485] from joneslee85/patch-9 + * Merge pull request [#486] from joshwlewis/patch-1 + * Merge pull request [#490] from tobinibot/patch-1 + * Merge pull request [#491] from brianknight10/clarify-no-config + +## 2.8.0 / 2014-02-28 + +* 8 minor features: + * Add ability to autoload a config file. Fixes [#438] + * Add ability to detect and terminate hung workers. Fixes [#333] + * Add booted_workers to stats response + * Add config to customize the default error message + * Add prune_bundler option + * Add worker indexes, expose them via on_worker_boot. Fixes [#440] + * Add pretty process name + * Show the ruby version in use + +* 7 bug fixes: + * Added 408 status on timeout. + * Be more hostile with sockets that write block. Fixes [#449] + * Expect at_exit to exclusively remove the pidfile. Fixes [#444] + * Expose latency and listen backlog via bind query. Fixes [#370] + * JRuby raises IOError if the socket is there. Fixes [#377] + * Process requests fairly. Fixes [#406] + * Rescue SystemCallError as well. Fixes [#425] + +* 4 doc changes: + * Add 2.1.0 to the matrix + * Add Code Climate badge to README + * Create signals.md + * Set the license to BSD. Fixes [#432] + +* 14 PRs merged: + * Merge pull request [#428] from alexeyfrank/capistrano_default_hooks + * Merge pull request [#429] from namusyaka/revert-const_defined + * Merge pull request [#431] from mrb/master + * Merge pull request [#433] from alepore/process-name + * Merge pull request [#437] from ibrahima/master + * Merge pull request [#446] from sudara/master + * Merge pull request [#451] from pwiebe/status_408 + * Merge pull request [#453] from joevandyk/patch-1 + * Merge pull request [#470] from arthurnn/fix_458 + * Merge pull request [#472] from rubencaro/master + * Merge pull request [#480] from jjb/docs-on-running-test-suite + * Merge pull request [#481] from schneems/master + * Merge pull request [#482] from prathamesh-sonpatki/signals-doc-cleanup + * Merge pull request [#483] from YotpoLtd/master + +## 2.7.1 / 2013-12-05 + +* 1 bug fix: + * Keep STDOUT/STDERR the right mode. Fixes [#422] + +## 2.7.0 / 2013-12-03 + +* 1 minor feature: + * Adding TTIN and TTOU to increment/decrement workers + +* N bug fixes: + * Always use our Process.daemon because it's not busted + * Add capistrano restart failback to start. + * Change position of `cd` so that rvm gemset is loaded + * Clarify some platform specifics + * Do not close the pipe sockets when retrying + * Fix String#byteslice for Ruby 1.9.1, 1.9.2 + * Fix compatibility with 1.8.7. + * Handle IOError closed stream in IO.select + * Increase the max URI path length to 2048 chars from 1024 chars + * Upstart jungle use config/puma.rb instead + +## 2.6.0 / 2013-09-13 + +* 2 minor features: + * Add support for event hooks + ** Add a hook for state transitions + * Add phased restart to capistrano recipe. + +* 4 bug fixes: + * Convince workers to stop by SIGKILL after timeout + * Define RSTRING_NOT_MODIFIED for Rubinius performance + * Handle BrokenPipe, StandardError and IOError in fat_wrote and break out + * Return success status to the invoking environment + +## 2.5.1 / 2013-08-13 + +* 2 bug fixes: + * Keep jruby daemon mode from retrying on a hot restart + * Extract version from const.rb in gemspec + +## 2.5.0 / 2013-08-08 + +* 2 minor features: + * Allow configuring pumactl with config.rb + * make `pumactl restart` start puma if not running + +* 6 bug fixes: + * Autodetect ruby managers and home directory in upstart script + * Convert header values to string before sending. + * Correctly report phased-restart availability + * Fix pidfile creation/deletion race on jruby daemonization + * Use integers when comparing thread counts + * Fix typo in using lopez express (raw tcp) mode + +* 6 misc changes: + * Fix typo in phased-restart response + * Uncomment setuid/setgid by default in upstart + * Use Puma::Const::PUMA_VERSION in gemspec + * Update upstart comments to reflect new commandline + * Remove obsolete pumactl instructions; refer to pumactl for details + * Make Bundler used puma.gemspec version agnostic + +## 2.4.1 / 2013-08-07 + +* 1 experimental feature: + * Support raw tcp servers (aka Lopez Express mode) + +## 2.4.0 / 2013-07-22 + +* 5 minor features: + * Add PUMA_JRUBY_DAEMON_OPTS to get around agent starting twice + * Add ability to drain accept socket on shutdown + * Add port to DSL + * Adds support for using puma config file in capistrano deploys. + * Make phased_restart fallback to restart if not available + +* 10 bug fixes: + + * Be sure to only delete the pid in the master. Fixes [#334] + * Call out -C/--config flags + * Change parser symbol names to avoid clash. Fixes [#179] + * Convert thread pool sizes to integers + * Detect when the jruby daemon child doesn't start properly + * Fix typo in CLI help + * Improve the logging output when hijack is used. Fixes [#332] + * Remove unnecessary thread pool size conversions + * Setup :worker_boot as an Array. Fixes [#317] + * Use 127.0.0.1 as REMOTE_ADDR of unix client. Fixes [#309] + + +## 2.3.2 / 2013-07-08 + +* 1 bug fix: + * Move starting control server to after daemonization. + +## 2.3.1 / 2013-07-06 + +* 2 bug fixes: + * Include the right files in the Manifest. + * Disable inheriting connections on restart on windows. Fixes [#166] + +* 1 doc change: + * Better document some platform constraints + +## 2.3.0 / 2013-07-05 + +* 1 major bug fix: + * Stabilize control server, add support in cluster mode + +* 5 minor bug fixes: + * Add ability to cleanup stale unix sockets + * Check status data better. Fixes [#292] + * Convert raw IO errors to ConnectionError. Fixes [#274] + * Fix sending Content-Type and Content-Length for no body status. Fixes [#304] + * Pass state path through to `pumactl start`. Fixes [#287] + +* 2 internal changes: + * Refactored modes into seperate classes that CLI uses + * Changed CLI to take an Events object instead of stdout/stderr (API change) + +## 2.2.2 / 2013-07-02 + +* 1 bug fix: + * Fix restart_command in the config + +## 2.2.1 / 2013-07-02 + +* 1 minor feature: + * Introduce preload flag + +* 1 bug fix: + * Pass custom restart command in JRuby + +## 2.2.0 / 2013-07-01 + +* 1 major feature: + * Add ability to preload rack app + +* 2 minor bugfixes: + * Don't leak info when not in development. Fixes [#256] + * Load the app, then bind the ports + +## 2.1.1 / 2013-06-20 + +* 2 minor bug fixes: + + * Fix daemonization on jruby + * Load the application before daemonizing. Fixes [#285] + +## 2.1.0 / 2013-06-18 + +* 3 minor features: + * Allow listening socket to be configured via Capistrano variable + * Output results from 'stat's command when using pumactl + * Support systemd socket activation + +* 15 bug fixes: + * Deal with pipes closing while stopping. Fixes [#270] + * Error out early if there is no app configured + * Handle ConnectionError rather than the lowlevel exceptions + * tune with `-C` config file and `on_worker_boot` + * use `-w` + * Fixed some typos in upstart scripts + * Make sure to use bytesize instead of size (MiniSSL write) + * Fix an error in puma-manager.conf + * fix: stop leaking sockets on restart (affects ruby 1.9.3 or before) + * Ignore errors on the cross-thread pipe. Fixes [#246] + * Ignore errors while uncorking the socket (it might already be closed) + * Ignore the body on a HEAD request. Fixes [#278] + * Handle all engine data when possible. Fixes [#251]. + * Handle all read exceptions properly. Fixes [#252] + * Handle errors from the server better + +* 3 doc changes: + * Add note about on_worker_boot hook + * Add some documentation for Clustered mode + * Added quotes to /etc/puma.conf + +## 2.0.1 / 2013-04-30 + +* 1 bug fix: + * Fix not starting on JRuby properly + +## 2.0.0 / 2013-04-29 + +RailsConf 2013 edition! + +* 2 doc changes: + * Start with rackup -s Puma, NOT rackup -s puma. + * Minor doc fixes in the README.md, Capistrano section + +* 2 bug fixes: + * Fix reading RACK_ENV properly. Fixes [#234] + * Make cap recipe handle tmp/sockets; fixes [#228] + +* 3 minor changes: + * Fix capistrano recipe + * Fix stdout/stderr logs to sync outputs + * allow binding to IPv6 addresses + +## 2.0.0.b7 / 2013-03-18 + +* 5 minor enhancements: + * Add -q option for :start + * Add -V, --version + * Add default Rack handler helper + * Upstart support + * Set worker directory from configuration file + +* 12 bug fixes: + * Close the binder in the right place. Fixes [#192] + * Handle early term in workers. Fixes [#206] + * Make sure that the default port is 80 when the request doesn't include HTTP_X_FORWARDED_PROTO. + * Prevent Errno::EBADF errors on restart when running ruby 2.0 + * Record the proper @master_pid + * Respect the header HTTP_X_FORWARDED_PROTO when the host doesn't include a port number. + * Retry EAGAIN/EWOULDBLOCK during syswrite + * Run exec properly to restart. Fixes [#154] + * Set Rack run_once to false + * Syncronize all access to @timeouts. Fixes [#208] + * Write out the state post-daemonize. Fixes [#189] + * Prevent crash when all workers are gone + +## 2.0.0.b6 / 2013-02-06 + +* 2 minor enhancements: + * Add hook for running when a worker boots + * Advertise the Configuration object for apps to use. + +* 1 bug fix: + * Change directory in working during upgrade. Fixes [#185] + +## 2.0.0.b5 / 2013-02-05 + +* 2 major features: + * Add phased worker upgrade + * Add support for the rack hijack protocol + +* 2 minor features: + * Add -R to specify the restart command + * Add config file option to specify the restart command + +* 5 bug fixes: + * Cleanup pipes properly. Fixes [#182] + * Daemonize earlier so that we don't lose app threads. Fixes [#183] + * Drain the notification pipe. Fixes [#176], thanks @cryo28 + * Move write_pid to after we daemonize. Fixes [#180] + * Redirect IO properly and emit message for checkpointing + +## 2.0.0.b4 / 2012-12-12 + +* 4 bug fixes: + * Properly check #syswrite's value for variable sized buffers. Fixes [#170] + * Shutdown status server properly + * Handle char vs byte and mixing syswrite with write properly + * made MiniSSL validate key/cert file existence + +## 2.0.0.b3 / 2012-11-22 + +* 1 bug fix: + * Package right files in gem + +## 2.0.0.b2 / 2012-11-18 +* 5 minor feature: + * Now Puma is bundled with an capistrano recipe. Just require + 'puma/capistrano' in you deploy.rb + * Only inject CommonLogger in development mode + * Add -p option to pumactl + * Add ability to use pumactl to start a server + * Add options to daemonize puma + +* 7 bug fixes: + * Reset the IOBuffer properly. Fixes [#148] + * Shutdown gracefully on JRuby with Ctrl-C + * Various methods to get newrelic to start. Fixes [#128] + * fixing syntax error at capistrano recipe + * Force ECONNRESET when read returns nil + * Be sure to empty the drain the todo before shutting down. Fixes [#155] + * allow for alternate locations for status app + +## 2.0.0.b1 / 2012-09-11 + +* 1 major feature: + * Optional worker process mode (-w) to allow for process scaling in + addition to thread scaling + +* 1 bug fix: + * Introduce Puma::MiniSSL to be able to properly control doing + nonblocking SSL + +NOTE: SSL support in JRuby is not supported at present. Support will +be added back in a future date when a java Puma::MiniSSL is added. + +## 1.6.3 / 2012-09-04 + +* 1 bug fix: + * Close sockets waiting in the reactor when a hot restart is performed + so that browsers reconnect on the next request + +## 1.6.2 / 2012-08-27 + +* 1 bug fix: + * Rescue StandardError instead of IOError to handle SystemCallErrors + as well as other application exceptions inside the reactor. + +## 1.6.1 / 2012-07-23 + +* 1 packaging bug fixed: + * Include missing files + +## 1.6.0 / 2012-07-23 + +* 1 major bug fix: + * Prevent slow clients from starving the server by introducing a + dedicated IO reactor thread. Credit for reporting goes to @meh. + +## 1.5.0 / 2012-07-19 + +* 7 contributors to this release: + * Christian Mayer + * Darío Javier Cravero + * Dirkjan Bussink + * Gianluca Padovani + * Santiago Pastorino + * Thibault Jouan + * tomykaira + +* 6 bug fixes: + * Define RSTRING_NOT_MODIFIED for Rubinius + * Convert status to integer. Fixes [#123] + * Delete pidfile when stopping the server + * Allow compilation with -Werror=format-security option + * Fix wrong HTTP version for a HTTP/1.0 request + * Use String#bytesize instead of String#length + +* 3 minor features: + * Added support for setting RACK_ENV via the CLI, config file, and rack app + * Allow Server#run to run sync. Fixes [#111] + * Puma can now run on windows + +## 1.4.0 / 2012-06-04 + +* 1 bug fix: + * SCRIPT_NAME should be passed from env to allow mounting apps + +* 1 experimental feature: + * Add puma.socket key for direct socket access + +## 1.3.1 / 2012-05-15 + +* 2 bug fixes: + * use #bytesize instead of #length for Content-Length header + * Use StringIO properly. Fixes [#98] + +## 1.3.0 / 2012-05-08 + +* 2 minor features: + * Return valid Rack responses (passes Lint) from status server + * Add -I option to specify $LOAD_PATH directories + +* 4 bug fixes: + * Don't join the server thread inside the signal handle. Fixes [#94] + * Make NullIO#read mimic IO#read + * Only stop the status server if it's started. Fixes [#84] + * Set RACK_ENV early in cli also. Fixes [#78] + +* 1 new contributor: + * Jesse Cooke + +## 1.2.2 / 2012-04-28 + +* 4 bug fixes: + * Report a lowlevel error to stderr + * Set a fallback SERVER_NAME and SERVER_PORT + * Keep the encoding of the body correct. Fixes [#79] + * show error.to_s along with backtrace for low-level error + +## 1.2.1 / 2012-04-11 + +* 1 bug fix: + * Fix rack.url_scheme for SSL servers. Fixes [#65] + +## 1.2.0 / 2012-04-11 + +* 1 major feature: + * When possible, the internal restart does a "hot restart" meaning + the server sockets remains open, so no connections are lost. + +* 1 minor feature: + * More helpful fallback error message + +* 6 bug fixes: + * Pass the proper args to unknown_error. Fixes [#54], [#58] + * Stop the control server before restarting. Fixes [#61] + * Fix reporting https only on a true SSL connection + * Set the default content type to 'text/plain'. Fixes [#63] + * Use REUSEADDR. Fixes [#60] + * Shutdown gracefully on SIGTERM. Fixes [#53] + +* 2 new contributors: + * Seamus Abshere + * Steve Richert + +## 1.1.1 / 2012-03-30 + +* 1 bugfix: + * Include puma/compat.rb in the gem (oops!) + +## 1.1.0 / 2012-03-30 + +* 1 bugfix: + * Make sure that the unix socket has the perms 0777 by default + +* 1 minor feature: + * Add umask param to the unix:// bind to set the umask + +## 1.0.0 / 2012-03-29 + +* Released! + +## Ignore - this is for maintainers to copy-paste during release +## Master + +* Features + * Your feature goes here (#Github Number) + +* Bugfixes + * Your bugfix goes here (#Github Number) + +[#3256]:https://github.com/puma/puma/pull/3256 "PR by @MSP-Greg, merged 2023-10-16" +[#3235]:https://github.com/puma/puma/pull/3235 "PR by @joshuay03, merged 2023-10-03" +[#3228]:https://github.com/puma/puma/issues/3228 "Issue by @davidalejandroaguilar, closed 2023-10-03" +[#3282]:https://github.com/puma/puma/issues/3282 "Issue by @bensheldon, closed 2024-01-02" +[#3283]:https://github.com/puma/puma/pull/3283 "PR by @joshuay03, merged 2024-01-02" +[#3225]:https://github.com/puma/puma/pull/3225 "PR by @joshuay03, merged 2023-09-27" +[#2786]:https://github.com/puma/puma/issues/2786 "Issue by @vitiokss, closed 2023-09-27" +[#3179]:https://github.com/puma/puma/pull/3179 "PR by @MSP-Greg, merged 2023-09-26" +[#3255]:https://github.com/puma/puma/pull/3255 "PR by @casperisfine, merged 2023-10-19" +[#3276]:https://github.com/puma/puma/pull/3276 "PR by @casperisfine, merged 2023-11-16" +[#3271]:https://github.com/puma/puma/pull/3271 "PR by @MSP-Greg, merged 2023-10-30" +[#3266]:https://github.com/puma/puma/issues/3266 "Issue by @Dragonicity, closed 2023-10-30" +[#3270]:https://github.com/puma/puma/pull/3270 "PR by @MSP-Greg, merged 2023-10-30" +[#3265]:https://github.com/puma/puma/pull/3265 "PR by @MSP-Greg, merged 2023-10-25" +[#3264]:https://github.com/puma/puma/issues/3264 "Issue by @dentarg, closed 2023-10-25" +[#3254]:https://github.com/puma/puma/pull/3254 "PR by @casperisfine, merged 2023-10-11" +[#3245]:https://github.com/puma/puma/pull/3245 "PR by @olleolleolle, merged 2023-10-02" +[#3293]:https://github.com/puma/puma/pull/3293 "PR by @MSP-Greg, merged 2023-12-21" +[#3301]:https://github.com/puma/puma/pull/3301 "PR by @benburkert, merged 2023-12-29" +[#3248]:https://github.com/puma/puma/pull/3248 "PR by @dentarg, merged 2023-10-04" +[#3298]:https://github.com/puma/puma/pull/3298 "PR by @til, merged 2023-12-26" +[#2920]:https://github.com/puma/puma/pull/2920 "PR by @biinari, merged 2023-07-11" +[#3195]:https://github.com/puma/puma/pull/3195 "PR by @binarygit, merged 2023-08-15" +[#3209]:https://github.com/puma/puma/pull/3209 "PR by @joshuay03, merged 2023-09-04" +[#2580]:https://github.com/puma/puma/issues/2580 "Issue by @schuetzm, closed 2023-09-04" +[#3204]:https://github.com/puma/puma/pull/3204 "PR by @dhavalsingh, merged 2023-08-25" +[#3191]:https://github.com/puma/puma/pull/3191 "PR by @MSP-Greg, merged 2023-08-31" +[#3165]:https://github.com/puma/puma/pull/3165 "PR by @fallwith, merged 2023-06-06" +[#3174]:https://github.com/puma/puma/pull/3174 "PR by @copiousfreetime, merged 2023-06-11" +[#3181]:https://github.com/puma/puma/pull/3181 "PR by @MSP-Greg, merged 2023-06-23" +[#3187]:https://github.com/puma/puma/pull/3187 "PR by @MSP-Greg, merged 2023-06-30" +[#3094]:https://github.com/puma/puma/pull/3094 "PR by @Vuta, merged 2023-07-23" +[#3106]:https://github.com/puma/puma/pull/3106 "PR by @MSP-Greg, merged 2023-05-29" +[#3014]:https://github.com/puma/puma/issues/3014 "Issue by @kyledrake, closed 2023-05-29" +[#3161]:https://github.com/puma/puma/pull/3161 "PR by @MSP-Greg, merged 2023-05-27" +[#3037]:https://github.com/puma/puma/issues/3037 "Issue by @daisy1754, closed 2023-05-27" +[#3133]:https://github.com/puma/puma/pull/3133 "PR by @stanhu, merged 2023-04-30" +[#3132]:https://github.com/puma/puma/issues/3132 "Issue by @stanhu, closed 2023-04-30" +[#3163]:https://github.com/puma/puma/pull/3163 "PR by @MSP-Greg, merged 2023-05-27" +[#3155]:https://github.com/puma/puma/pull/3155 "PR by @dentarg, merged 2023-05-14" +[#3148]:https://github.com/puma/puma/issues/3148 "Issue by @dentarg, closed 2023-05-14" +[#3129]:https://github.com/puma/puma/pull/3129 "PR by @MSP-Greg, merged 2023-05-02" +[#3137]:https://github.com/puma/puma/pull/3137 "PR by @MSP-Greg, merged 2023-04-30" +[#3156]:https://github.com/puma/puma/pull/3156 "PR by @severin, merged 2023-05-16" +[#3162]:https://github.com/puma/puma/pull/3162 "PR by @MSP-Greg, merged 2023-05-23" +[#3151]:https://github.com/puma/puma/pull/3151 "PR by @nateberkopec, merged 2023-05-12" +[#3118]:https://github.com/puma/puma/pull/3118 "PR by @ninoseki, merged 2023-04-01" +[#3117]:https://github.com/puma/puma/issues/3117 "Issue by @ninoseki, closed 2023-04-01" +[#3109]:https://github.com/puma/puma/pull/3109 "PR by @ahorek, merged 2023-03-31" +[#3108]:https://github.com/puma/puma/issues/3108 "Issue by @treviateo, closed 2023-03-31" +[#3113]:https://github.com/puma/puma/pull/3113 "PR by @collinsauve, merged 2023-03-31" +[#3112]:https://github.com/puma/puma/issues/3112 "Issue by @dmke, closed 2023-03-31" +[#3111]:https://github.com/puma/puma/pull/3111 "PR by @adzap, merged 2023-03-30" +[#2770]:https://github.com/puma/puma/pull/2770 "PR by @vzajkov, merged 2023-03-29" +[#2511]:https://github.com/puma/puma/issues/2511 "Issue by @jchristie55332, closed 2021-12-12" +[#3089]:https://github.com/puma/puma/pull/3089 "PR by @Vuta, merged 2023-03-06" +[#2709]:https://github.com/puma/puma/pull/2709 "PR by @rodzyn, merged 2023-02-20" +[#3091]:https://github.com/puma/puma/pull/3091 "PR by @MSP-Greg, merged 2023-03-28" +[#3074]:https://github.com/puma/puma/pull/3074 "PR by @MSP-Greg, merged 2023-03-14" +[#3072]:https://github.com/puma/puma/pull/3072 "PR by @MSP-Greg, merged 2023-02-17" +[#3079]:https://github.com/puma/puma/pull/3079 "PR by @mohamedhafez, merged 2023-02-24" +[#3080]:https://github.com/puma/puma/pull/3080 "PR by @MSP-Greg, merged 2023-02-16" +[#3058]:https://github.com/puma/puma/pull/3058 "PR by @dentarg, merged 2023-01-29" +[#3007]:https://github.com/puma/puma/issues/3007 "Issue by @MSP-Greg, closed 2023-01-29" +[#3011]:https://github.com/puma/puma/pull/3011 "PR by @joaomarcos96, merged 2023-01-03" +[#3006]:https://github.com/puma/puma/pull/3006 "PR by @QWYNG, merged 2023-02-09" +[#2604]:https://github.com/puma/puma/issues/2604 "Issue by @dgoetz, closed 2023-02-09" +[#3040]:https://github.com/puma/puma/pull/3040 "PR by @shayonj, merged 2023-01-02" +[#3036]:https://github.com/puma/puma/pull/3036 "PR by @MSP-Greg, merged 2023-01-13" +[#3020]:https://github.com/puma/puma/issues/3020 "Issue by @dentarg, closed 2023-01-13" +[#3061]:https://github.com/puma/puma/pull/3061 "PR by @MSP-Greg, merged 2023-02-12" +[#3057]:https://github.com/puma/puma/issues/3057 "Issue by @mmarvb8h, closed 2023-02-12" +[#3065]:https://github.com/puma/puma/pull/3065 "PR by @MSP-Greg, merged 2023-02-11" +[#3062]:https://github.com/puma/puma/pull/3062 "PR by @willkoehler, merged 2023-01-29" +[#3035]:https://github.com/puma/puma/pull/3035 "PR by @MSP-Greg, merged 2022-12-24" +[#3033]:https://github.com/puma/puma/issues/3033 "Issue by @jules-w2, closed 2022-12-24" +[#3016]:https://github.com/puma/puma/pull/3016 "PR by @MSP-Greg, merged 2022-12-24" +[#3005]:https://github.com/puma/puma/pull/3005 "PR by @JuanitoFatas, merged 2022-11-04" +[#3004]:https://github.com/puma/puma/pull/3004 "PR by @MSP-Greg, merged 2022-11-24" +[#3000]:https://github.com/puma/puma/issues/3000 "Issue by @dentarg, closed 2022-11-24" +[#3002]:https://github.com/puma/puma/pull/3002 "PR by @MSP-Greg, merged 2022-11-03" +[#2999]:https://github.com/puma/puma/issues/2999 "Issue by @aymeric-ledorze, closed 2022-11-03" +[#3013]:https://github.com/puma/puma/pull/3013 "PR by @MSP-Greg, merged 2022-11-13" +[#2919]:https://github.com/puma/puma/pull/2919 "PR by @MSP-Greg, merged 2022-08-30" +[#2652]:https://github.com/puma/puma/issues/2652 "Issue by @Roguelazer, closed 2022-09-04" +[#2653]:https://github.com/puma/puma/pull/2653 "PR by @Roguelazer, closed 2022-03-07" +[#2849]:https://github.com/puma/puma/pull/2849 "PR by @kares, merged 2022-04-09" +[#2933]:https://github.com/puma/puma/pull/2933 "PR by @cafedomancer, merged 2022-09-09" +[#2925]:https://github.com/puma/puma/issues/2925 "Issue by @nateberkopec, closed 2022-09-09" +[#2940]:https://github.com/puma/puma/pull/2940 "PR by @cafedomancer, merged 2022-09-10" +[#2924]:https://github.com/puma/puma/pull/2924 "PR by @cafedomancer, merged 2022-09-07" +[#2853]:https://github.com/puma/puma/issues/2853 "Issue by @nateberkopec, closed 2022-09-07" +[#2957]:https://github.com/puma/puma/pull/2957 "PR by @JuanitoFatas, merged 2022-09-16" +[#2958]:https://github.com/puma/puma/pull/2958 "PR by @JuanitoFatas, merged 2022-09-16" +[#2959]:https://github.com/puma/puma/pull/2959 "PR by @JuanitoFatas, merged 2022-09-16" +[#2960]:https://github.com/puma/puma/pull/2960 "PR by @JuanitoFatas, merged 2022-09-16" +[#2988]:https://github.com/puma/puma/pull/2988 "PR by @MSP-Greg, merged 2022-10-12" +[#2928]:https://github.com/puma/puma/pull/2928 "PR by @nateberkopec, merged 2022-09-10" +[#2798]:https://github.com/puma/puma/pull/2798 "PR by @johnnyshields, merged 2022-02-05" +[#2932]:https://github.com/puma/puma/pull/2932 "PR by @mrzasa, merged 2022-09-12" +[#2896]:https://github.com/puma/puma/pull/2896 "PR by @MSP-Greg, merged 2022-09-13" +[#2892]:https://github.com/puma/puma/pull/2892 "PR by @guilleiguaran, closed 2022-09-13" +[#2923]:https://github.com/puma/puma/pull/2923 "PR by @nateberkopec, merged 2022-09-09" +[#2740]:https://github.com/puma/puma/pull/2740 "PR by @ioquatix, merged 2022-01-29" +[#2845]:https://github.com/puma/puma/issues/2845 "Issue by @donv, closed 2022-03-22" +[#2917]:https://github.com/puma/puma/pull/2917 "PR by @MSP-Greg, merged 2022-09-19" +[#2915]:https://github.com/puma/puma/issues/2915 "Issue by @mperham, closed 2022-09-19" +[#2907]:https://github.com/puma/puma/pull/2907 "PR by @casperisfine, merged 2022-09-15" +[#2953]:https://github.com/puma/puma/pull/2953 "PR by @JuanitoFatas, merged 2022-09-14" +[#2936]:https://github.com/puma/puma/pull/2936 "PR by @MSP-Greg, merged 2022-09-09" +[#2931]:https://github.com/puma/puma/issues/2931 "Issue by @dentarg, closed 2022-09-09" +[#2875]:https://github.com/puma/puma/pull/2875 "PR by @ylecuyer, merged 2022-05-19" +[#2840]:https://github.com/puma/puma/pull/2840 "PR by @LukaszMaslej, merged 2022-04-13" +[#2774]:https://github.com/puma/puma/pull/2774 "PR by @ob-stripe, merged 2022-01-31" +[#2891]:https://github.com/puma/puma/pull/2891 "PR by @gingerlime, merged 2022-06-02" +[#2886]:https://github.com/puma/puma/pull/2886 "PR by @kares, merged 2022-05-30" +[#2899]:https://github.com/puma/puma/pull/2899 "PR by @kares, merged 2022-07-04" +[#2904]:https://github.com/puma/puma/pull/2904 "PR by @kares, merged 2022-08-27" +[#2884]:https://github.com/puma/puma/pull/2884 "PR by @kares, merged 2022-05-30" +[#2897]:https://github.com/puma/puma/pull/2897 "PR by @Edouard-chin, merged 2022-08-27" +[#1441]:https://github.com/puma/puma/issues/1441 "Issue by @nirvdrum, closed 2022-09-12" +[#2956]:https://github.com/puma/puma/pull/2956 "PR by @MSP-Greg, merged 2022-09-15" +[#2888]:https://github.com/puma/puma/pull/2888 "PR by @MSP-Greg, merged 2022-06-01" +[#2797]:https://github.com/puma/puma/pull/2797 "PR by @johnnyshields, merged 2022-02-01" +[#2795]:https://github.com/puma/puma/pull/2795 "PR by @johnnyshields, merged 2022-01-31" +[#2903]:https://github.com/puma/puma/pull/2903 "PR by @MSP-Greg, merged 2022-08-27" +[#2942]:https://github.com/puma/puma/pull/2942 "PR by @nateberkopec, merged 2022-09-15" +[#2921]:https://github.com/puma/puma/issues/2921 "Issue by @MSP-Greg, closed 2022-09-15" +[#2922]:https://github.com/puma/puma/issues/2922 "Issue by @MSP-Greg, closed 2022-09-10" +[#2955]:https://github.com/puma/puma/pull/2955 "PR by @cafedomancer, merged 2022-09-15" +[#3166]:https://github.com/puma/puma/pull/3166 "PR by @JoeDupuis, merged 2023-06-08" +[#2868]:https://github.com/puma/puma/pull/2868 "PR by @MSP-Greg, merged 2022-06-02" +[#2866]:https://github.com/puma/puma/issues/2866 "Issue by @slondr, closed 2022-06-02" +[#2883]:https://github.com/puma/puma/pull/2883 "PR by @MSP-Greg, merged 2022-06-02" +[#2890]:https://github.com/puma/puma/pull/2890 "PR by @kares, merged 2022-06-01" +[#2729]:https://github.com/puma/puma/issues/2729 "Issue by @kares, closed 2022-06-01" +[#2885]:https://github.com/puma/puma/pull/2885 "PR by @MSP-Greg, merged 2022-05-30" +[#2839]:https://github.com/puma/puma/issues/2839 "Issue by @wlipa, closed 2022-05-30" +[#2864]:https://github.com/puma/puma/pull/2864 "PR by @MSP-Greg, merged 2022-04-26" +[#2863]:https://github.com/puma/puma/issues/2863 "Issue by @eradman, closed 2022-04-26" +[#2861]:https://github.com/puma/puma/pull/2861 "PR by @BlakeWilliams, merged 2022-04-17" +[#2856]:https://github.com/puma/puma/issues/2856 "Issue by @nateberkopec, closed 2022-04-17" +[#2855]:https://github.com/puma/puma/pull/2855 "PR by @stanhu, merged 2022-04-09" +[#2848]:https://github.com/puma/puma/pull/2848 "PR by @stanhu, merged 2022-04-02" +[#2847]:https://github.com/puma/puma/pull/2847 "PR by @stanhu, merged 2022-04-02" +[#2838]:https://github.com/puma/puma/pull/2838 "PR by @epsilon-0, merged 2022-03-03" +[#2817]:https://github.com/puma/puma/pull/2817 "PR by @khustochka, merged 2022-02-20" +[#2810]:https://github.com/puma/puma/pull/2810 "PR by @kzkn, merged 2022-01-27" +[#2809]:https://github.com/puma/puma/pull/2809 "PR by @dentarg, merged 2022-01-26" +[#2764]:https://github.com/puma/puma/pull/2764 "PR by @dentarg, merged 2022-01-18" +[#2708]:https://github.com/puma/puma/issues/2708 "Issue by @erikaxel, closed 2022-01-18" +[#2780]:https://github.com/puma/puma/pull/2780 "PR by @dalibor, merged 2022-01-01" +[#2784]:https://github.com/puma/puma/pull/2784 "PR by @MSP-Greg, merged 2022-01-01" +[#2773]:https://github.com/puma/puma/pull/2773 "PR by @ob-stripe, merged 2022-01-01" +[#2794]:https://github.com/puma/puma/pull/2794 "PR by @johnnyshields, merged 2022-01-10" +[#2759]:https://github.com/puma/puma/pull/2759 "PR by @ob-stripe, merged 2021-12-11" +[#2731]:https://github.com/puma/puma/pull/2731 "PR by @baelter, merged 2021-11-02" +[#2341]:https://github.com/puma/puma/issues/2341 "Issue by @cjlarose, closed 2023-07-23" +[#2728]:https://github.com/puma/puma/pull/2728 "PR by @dalibor, merged 2021-10-31" +[#2733]:https://github.com/puma/puma/pull/2733 "PR by @ob-stripe, merged 2021-12-12" +[#2807]:https://github.com/puma/puma/pull/2807 "PR by @MSP-Greg, merged 2022-01-25" +[#2806]:https://github.com/puma/puma/issues/2806 "Issue by @olleolleolle, closed 2022-01-25" +[#2799]:https://github.com/puma/puma/pull/2799 "PR by @ags, merged 2022-01-22" +[#2785]:https://github.com/puma/puma/pull/2785 "PR by @MSP-Greg, merged 2022-01-02" +[#2757]:https://github.com/puma/puma/pull/2757 "PR by @MSP-Greg, merged 2021-11-24" +[#2745]:https://github.com/puma/puma/pull/2745 "PR by @MSP-Greg, merged 2021-11-03" +[#2742]:https://github.com/puma/puma/pull/2742 "PR by @MSP-Greg, merged 2021-12-12" +[#2730]:https://github.com/puma/puma/pull/2730 "PR by @kares, merged 2021-11-01" +[#2702]:https://github.com/puma/puma/pull/2702 "PR by @jacobherrington, merged 2021-09-21" +[#2610]:https://github.com/puma/puma/pull/2610 "PR by @ye-lin-aung, merged 2021-08-18" +[#2257]:https://github.com/puma/puma/issues/2257 "Issue by @nateberkopec, closed 2021-08-18" +[#2654]:https://github.com/puma/puma/pull/2654 "PR by @Roguelazer, merged 2021-09-07" +[#2651]:https://github.com/puma/puma/issues/2651 "Issue by @Roguelazer, closed 2021-09-07" +[#2689]:https://github.com/puma/puma/pull/2689 "PR by @jacobherrington, merged 2021-09-05" +[#2700]:https://github.com/puma/puma/pull/2700 "PR by @ioquatix, merged 2021-09-16" +[#2699]:https://github.com/puma/puma/issues/2699 "Issue by @ioquatix, closed 2021-09-16" +[#2690]:https://github.com/puma/puma/pull/2690 "PR by @doits, merged 2021-09-06" +[#2688]:https://github.com/puma/puma/pull/2688 "PR by @jdelStrother, merged 2021-09-03" +[#2687]:https://github.com/puma/puma/issues/2687 "Issue by @jdelStrother, closed 2021-09-03" +[#2675]:https://github.com/puma/puma/pull/2675 "PR by @devwout, merged 2021-09-08" +[#2657]:https://github.com/puma/puma/pull/2657 "PR by @olivierbellone, merged 2021-07-13" +[#2648]:https://github.com/puma/puma/pull/2648 "PR by @MSP-Greg, merged 2021-06-27" +[#1412]:https://github.com/puma/puma/issues/1412 "Issue by @x-yuri, closed 2021-06-27" +[#2586]:https://github.com/puma/puma/pull/2586 "PR by @MSP-Greg, merged 2021-05-26" +[#2569]:https://github.com/puma/puma/issues/2569 "Issue by @tarragon, closed 2021-05-26" +[#2643]:https://github.com/puma/puma/pull/2643 "PR by @MSP-Greg, merged 2021-06-27" +[#2638]:https://github.com/puma/puma/issues/2638 "Issue by @gingerlime, closed 2021-06-27" +[#2642]:https://github.com/puma/puma/pull/2642 "PR by @MSP-Greg, merged 2021-06-16" +[#2633]:https://github.com/puma/puma/pull/2633 "PR by @onlined, merged 2021-06-04" +[#2656]:https://github.com/puma/puma/pull/2656 "PR by @olivierbellone, merged 2021-07-07" +[#2666]:https://github.com/puma/puma/pull/2666 "PR by @MSP-Greg, merged 2021-07-25" +[#2630]:https://github.com/puma/puma/pull/2630 "PR by @seangoedecke, merged 2021-05-20" +[#2626]:https://github.com/puma/puma/issues/2626 "Issue by @rorymckinley, closed 2021-05-20" +[#2629]:https://github.com/puma/puma/pull/2629 "PR by @ye-lin-aung, merged 2021-05-20" +[#2628]:https://github.com/puma/puma/pull/2628 "PR by @wjordan, merged 2021-05-20" +[#2625]:https://github.com/puma/puma/issues/2625 "Issue by @jarthod, closed 2021-05-11" +[#2564]:https://github.com/puma/puma/pull/2564 "PR by @MSP-Greg, merged 2021-04-24" +[#2526]:https://github.com/puma/puma/issues/2526 "Issue by @nerdrew, closed 2021-04-24" +[#2559]:https://github.com/puma/puma/pull/2559 "PR by @ylecuyer, merged 2021-03-11" +[#2528]:https://github.com/puma/puma/issues/2528 "Issue by @cjlarose, closed 2021-03-11" +[#2565]:https://github.com/puma/puma/pull/2565 "PR by @CGA1123, merged 2021-03-09" +[#2534]:https://github.com/puma/puma/issues/2534 "Issue by @nateberkopec, closed 2021-03-09" +[#2563]:https://github.com/puma/puma/pull/2563 "PR by @MSP-Greg, merged 2021-03-06" +[#2504]:https://github.com/puma/puma/issues/2504 "Issue by @fsateler, closed 2021-03-06" +[#2591]:https://github.com/puma/puma/pull/2591 "PR by @MSP-Greg, merged 2021-05-05" +[#2572]:https://github.com/puma/puma/issues/2572 "Issue by @josef-krabath, closed 2021-05-05" +[#2613]:https://github.com/puma/puma/pull/2613 "PR by @smcgivern, merged 2021-04-27" +[#2605]:https://github.com/puma/puma/pull/2605 "PR by @pascalbetz, merged 2021-04-26" +[#2584]:https://github.com/puma/puma/issues/2584 "Issue by @kaorihinata, closed 2021-04-26" +[#2607]:https://github.com/puma/puma/pull/2607 "PR by @calvinxiao, merged 2021-04-23" +[#2552]:https://github.com/puma/puma/issues/2552 "Issue by @feliperaul, closed 2021-05-24" +[#2606]:https://github.com/puma/puma/pull/2606 "PR by @wjordan, merged 2021-04-20" +[#2574]:https://github.com/puma/puma/issues/2574 "Issue by @darkhelmet, closed 2021-04-20" +[#2567]:https://github.com/puma/puma/pull/2567 "PR by @kddnewton, merged 2021-04-19" +[#2566]:https://github.com/puma/puma/issues/2566 "Issue by @kddnewton, closed 2021-04-19" +[#2596]:https://github.com/puma/puma/pull/2596 "PR by @MSP-Greg, merged 2021-04-18" +[#2588]:https://github.com/puma/puma/pull/2588 "PR by @dentarg, merged 2021-04-02" +[#2556]:https://github.com/puma/puma/issues/2556 "Issue by @gamecreature, closed 2021-04-02" +[#2585]:https://github.com/puma/puma/pull/2585 "PR by @MSP-Greg, merged 2021-03-26" +[#2583]:https://github.com/puma/puma/issues/2583 "Issue by @jboler, closed 2021-03-26" +[#2609]:https://github.com/puma/puma/pull/2609 "PR by @calvinxiao, merged 2021-04-26" +[#2590]:https://github.com/puma/puma/pull/2590 "PR by @calvinxiao, merged 2021-04-05" +[#2600]:https://github.com/puma/puma/pull/2600 "PR by @wjordan, merged 2021-04-30" +[#2579]:https://github.com/puma/puma/pull/2579 "PR by @ghiculescu, merged 2021-03-17" +[#2553]:https://github.com/puma/puma/pull/2553 "PR by @olivierbellone, merged 2021-02-10" +[#2557]:https://github.com/puma/puma/pull/2557 "PR by @cjlarose, merged 2021-02-22" +[#2550]:https://github.com/puma/puma/pull/2550 "PR by @MSP-Greg, merged 2021-02-05" +[#2547]:https://github.com/puma/puma/pull/2547 "PR by @wildmaples, merged 2021-02-03" +[#2543]:https://github.com/puma/puma/pull/2543 "PR by @MSP-Greg, merged 2021-02-01" +[#2549]:https://github.com/puma/puma/pull/2549 "PR by @nmb, merged 2021-02-04" +[#2519]:https://github.com/puma/puma/pull/2519 "PR by @MSP-Greg, merged 2021-01-26" +[#2522]:https://github.com/puma/puma/pull/2522 "PR by @jcmfernandes, merged 2021-01-12" +[#2490]:https://github.com/puma/puma/pull/2490 "PR by @Bonias, merged 2020-12-07" +[#2486]:https://github.com/puma/puma/pull/2486 "PR by @karloscodes, merged 2020-12-02" +[#2535]:https://github.com/puma/puma/pull/2535 "PR by @MSP-Greg, merged 2021-01-27" +[#2529]:https://github.com/puma/puma/pull/2529 "PR by @MSP-Greg, merged 2021-01-24" +[#2533]:https://github.com/puma/puma/pull/2533 "PR by @MSP-Greg, merged 2021-01-24" +[#1953]:https://github.com/puma/puma/issues/1953 "Issue by @nateberkopec, closed 2020-12-01" +[#2516]:https://github.com/puma/puma/pull/2516 "PR by @cjlarose, merged 2020-12-17" +[#2520]:https://github.com/puma/puma/pull/2520 "PR by @dentarg, merged 2021-01-04" +[#2521]:https://github.com/puma/puma/pull/2521 "PR by @ojab, merged 2021-01-04" +[#2531]:https://github.com/puma/puma/pull/2531 "PR by @wjordan, merged 2021-01-19" +[#2510]:https://github.com/puma/puma/pull/2510 "PR by @micke, merged 2020-12-10" +[#2472]:https://github.com/puma/puma/pull/2472 "PR by @karloscodes, merged 2020-11-02" +[#2438]:https://github.com/puma/puma/pull/2438 "PR by @ekohl, merged 2020-10-26" +[#2406]:https://github.com/puma/puma/pull/2406 "PR by @fdel15, merged 2020-10-19" +[#2449]:https://github.com/puma/puma/pull/2449 "PR by @MSP-Greg, merged 2020-10-28" +[#2362]:https://github.com/puma/puma/pull/2362 "PR by @ekohl, merged 2020-11-10" +[#2485]:https://github.com/puma/puma/pull/2485 "PR by @elct9620, merged 2020-11-18" +[#2489]:https://github.com/puma/puma/pull/2489 "PR by @MSP-Greg, merged 2020-11-27" +[#2487]:https://github.com/puma/puma/pull/2487 "PR by @MSP-Greg, merged 2020-11-17" +[#2477]:https://github.com/puma/puma/pull/2477 "PR by @MSP-Greg, merged 2020-11-16" +[#2475]:https://github.com/puma/puma/pull/2475 "PR by @nateberkopec, merged 2020-11-02" +[#2439]:https://github.com/puma/puma/pull/2439 "PR by @kuei0221, merged 2020-10-26" +[#2460]:https://github.com/puma/puma/pull/2460 "PR by @cjlarose, merged 2020-10-27" +[#2473]:https://github.com/puma/puma/pull/2473 "PR by @cjlarose, merged 2020-11-01" +[#2479]:https://github.com/puma/puma/pull/2479 "PR by @cjlarose, merged 2020-11-10" +[#2495]:https://github.com/puma/puma/pull/2495 "PR by @JuanitoFatas, merged 2020-11-27" +[#2461]:https://github.com/puma/puma/pull/2461 "PR by @cjlarose, merged 2020-10-27" +[#2454]:https://github.com/puma/puma/issues/2454 "Issue by @majksner, closed 2020-10-27" +[#2432]:https://github.com/puma/puma/pull/2432 "PR by @MSP-Greg, merged 2020-10-25" +[#2442]:https://github.com/puma/puma/pull/2442 "PR by @wjordan, merged 2020-10-22" +[#2427]:https://github.com/puma/puma/pull/2427 "PR by @cjlarose, merged 2020-10-20" +[#2018]:https://github.com/puma/puma/issues/2018 "Issue by @gingerlime, closed 2020-10-20" +[#2435]:https://github.com/puma/puma/pull/2435 "PR by @wjordan, merged 2020-10-20" +[#2431]:https://github.com/puma/puma/pull/2431 "PR by @wjordan, merged 2020-10-16" +[#2212]:https://github.com/puma/puma/issues/2212 "Issue by @junaruga, closed 2020-10-16" +[#2409]:https://github.com/puma/puma/pull/2409 "PR by @fliiiix, merged 2020-10-03" +[#2448]:https://github.com/puma/puma/pull/2448 "PR by @MSP-Greg, merged 2020-10-25" +[#2450]:https://github.com/puma/puma/pull/2450 "PR by @MSP-Greg, merged 2020-10-25" +[#2419]:https://github.com/puma/puma/pull/2419 "PR by @MSP-Greg, merged 2020-10-09" +[#2279]:https://github.com/puma/puma/pull/2279 "PR by @wjordan, merged 2020-10-06" +[#2412]:https://github.com/puma/puma/pull/2412 "PR by @MSP-Greg, merged 2020-10-06" +[#2405]:https://github.com/puma/puma/pull/2405 "PR by @MSP-Greg, merged 2020-10-05" +[#2408]:https://github.com/puma/puma/pull/2408 "PR by @fliiiix, merged 2020-10-03" +[#2374]:https://github.com/puma/puma/pull/2374 "PR by @cjlarose, merged 2020-09-29" +[#2389]:https://github.com/puma/puma/pull/2389 "PR by @MSP-Greg, merged 2020-09-29" +[#2381]:https://github.com/puma/puma/pull/2381 "PR by @joergschray, merged 2020-09-24" +[#2271]:https://github.com/puma/puma/pull/2271 "PR by @wjordan, merged 2020-09-24" +[#2377]:https://github.com/puma/puma/pull/2377 "PR by @cjlarose, merged 2020-09-23" +[#2376]:https://github.com/puma/puma/pull/2376 "PR by @alexeevit, merged 2020-09-22" +[#2372]:https://github.com/puma/puma/pull/2372 "PR by @ahorek, merged 2020-09-22" +[#2384]:https://github.com/puma/puma/pull/2384 "PR by @schneems, merged 2020-09-27" +[#2375]:https://github.com/puma/puma/pull/2375 "PR by @MSP-Greg, merged 2020-09-23" +[#2373]:https://github.com/puma/puma/pull/2373 "PR by @MSP-Greg, merged 2020-09-23" +[#2305]:https://github.com/puma/puma/pull/2305 "PR by @MSP-Greg, merged 2020-09-14" +[#2099]:https://github.com/puma/puma/pull/2099 "PR by @wjordan, merged 2020-05-11" +[#2079]:https://github.com/puma/puma/pull/2079 "PR by @ayufan, merged 2020-05-11" +[#2093]:https://github.com/puma/puma/pull/2093 "PR by @schneems, merged 2019-12-18" +[#2256]:https://github.com/puma/puma/pull/2256 "PR by @nateberkopec, merged 2020-05-11" +[#2054]:https://github.com/puma/puma/pull/2054 "PR by @composerinteralia, merged 2019-11-11" +[#2106]:https://github.com/puma/puma/pull/2106 "PR by @ylecuyer, merged 2020-02-11" +[#2167]:https://github.com/puma/puma/pull/2167 "PR by @ChrisBr, closed 2020-07-06" +[#2344]:https://github.com/puma/puma/pull/2344 "PR by @dentarg, merged 2020-08-26" +[#2203]:https://github.com/puma/puma/pull/2203 "PR by @zanker-stripe, merged 2020-03-31" +[#2220]:https://github.com/puma/puma/pull/2220 "PR by @wjordan, merged 2020-04-14" +[#2238]:https://github.com/puma/puma/pull/2238 "PR by @sthirugn, merged 2020-05-07" +[#2086]:https://github.com/puma/puma/pull/2086 "PR by @bdewater, merged 2019-12-17" +[#2253]:https://github.com/puma/puma/pull/2253 "PR by @schneems, merged 2020-05-11" +[#2288]:https://github.com/puma/puma/pull/2288 "PR by @FTLam11, merged 2020-06-02" +[#1487]:https://github.com/puma/puma/pull/1487 "PR by @jxa, merged 2018-05-09" +[#2143]:https://github.com/puma/puma/pull/2143 "PR by @jalevin, merged 2020-04-21" +[#2169]:https://github.com/puma/puma/pull/2169 "PR by @nateberkopec, merged 2020-03-10" +[#2170]:https://github.com/puma/puma/pull/2170 "PR by @nateberkopec, merged 2020-03-10" +[#2076]:https://github.com/puma/puma/pull/2076 "PR by @drews256, merged 2020-02-27" +[#2022]:https://github.com/puma/puma/pull/2022 "PR by @olleolleolle, merged 2019-11-11" +[#2300]:https://github.com/puma/puma/pull/2300 "PR by @alexeevit, merged 2020-07-06" +[#2269]:https://github.com/puma/puma/pull/2269 "PR by @MSP-Greg, merged 2020-08-31" +[#2312]:https://github.com/puma/puma/pull/2312 "PR by @MSP-Greg, merged 2020-07-20" +[#2338]:https://github.com/puma/puma/issues/2338 "Issue by @micahhainlinestitchfix, closed 2020-08-18" +[#2116]:https://github.com/puma/puma/pull/2116 "PR by @MSP-Greg, merged 2020-05-15" +[#2074]:https://github.com/puma/puma/issues/2074 "Issue by @jchristie55332, closed 2020-02-19" +[#2211]:https://github.com/puma/puma/pull/2211 "PR by @MSP-Greg, merged 2020-03-30" +[#2069]:https://github.com/puma/puma/pull/2069 "PR by @MSP-Greg, merged 2019-11-09" +[#2112]:https://github.com/puma/puma/pull/2112 "PR by @wjordan, merged 2020-03-03" +[#1893]:https://github.com/puma/puma/pull/1893 "PR by @seven1m, merged 2020-02-18" +[#2119]:https://github.com/puma/puma/pull/2119 "PR by @wjordan, merged 2020-02-20" +[#2121]:https://github.com/puma/puma/pull/2121 "PR by @wjordan, merged 2020-02-21" +[#2154]:https://github.com/puma/puma/pull/2154 "PR by @cjlarose, merged 2020-03-10" +[#1551]:https://github.com/puma/puma/issues/1551 "Issue by @austinthecoder, closed 2020-03-10" +[#2198]:https://github.com/puma/puma/pull/2198 "PR by @eregon, merged 2020-03-24" +[#2216]:https://github.com/puma/puma/pull/2216 "PR by @praboud-stripe, merged 2020-04-06" +[#2122]:https://github.com/puma/puma/pull/2122 "PR by @wjordan, merged 2020-04-10" +[#2177]:https://github.com/puma/puma/issues/2177 "Issue by @GuiTeK, closed 2020-04-08" +[#2221]:https://github.com/puma/puma/pull/2221 "PR by @wjordan, merged 2020-04-17" +[#2233]:https://github.com/puma/puma/pull/2233 "PR by @ayufan, merged 2020-04-25" +[#2234]:https://github.com/puma/puma/pull/2234 "PR by @wjordan, merged 2020-04-30" +[#2225]:https://github.com/puma/puma/issues/2225 "Issue by @nateberkopec, closed 2020-04-27" +[#2267]:https://github.com/puma/puma/pull/2267 "PR by @wjordan, merged 2020-05-20" +[#2287]:https://github.com/puma/puma/pull/2287 "PR by @eugeneius, merged 2020-05-31" +[#2317]:https://github.com/puma/puma/pull/2317 "PR by @MSP-Greg, merged 2020-09-01" +[#2319]:https://github.com/puma/puma/issues/2319 "Issue by @AlexWayfer, closed 2020-09-03" +[#2326]:https://github.com/puma/puma/pull/2326 "PR by @rkistner, closed 2020-09-04" +[#2299]:https://github.com/puma/puma/issues/2299 "Issue by @JohnPhillips31416, closed 2020-09-17" +[#2095]:https://github.com/puma/puma/pull/2095 "PR by @bdewater, merged 2019-12-25" +[#2102]:https://github.com/puma/puma/pull/2102 "PR by @bdewater, merged 2020-02-07" +[#2111]:https://github.com/puma/puma/pull/2111 "PR by @wjordan, merged 2020-02-20" +[#1980]:https://github.com/puma/puma/pull/1980 "PR by @nateberkopec, merged 2020-02-27" +[#2189]:https://github.com/puma/puma/pull/2189 "PR by @jkowens, merged 2020-03-19" +[#2124]:https://github.com/puma/puma/pull/2124 "PR by @wjordan, merged 2020-04-14" +[#2223]:https://github.com/puma/puma/pull/2223 "PR by @wjordan, merged 2020-04-20" +[#2239]:https://github.com/puma/puma/pull/2239 "PR by @wjordan, merged 2020-05-15" +[#2496]:https://github.com/puma/puma/pull/2496 "PR by @TheRusskiy, merged 2020-11-30" +[#2304]:https://github.com/puma/puma/issues/2304 "Issue by @mpeltomaa, closed 2020-09-05" +[#2132]:https://github.com/puma/puma/issues/2132 "Issue by @bmclean, closed 2020-02-28" +[#2010]:https://github.com/puma/puma/pull/2010 "PR by @nateberkopec, merged 2019-10-07" +[#2012]:https://github.com/puma/puma/pull/2012 "PR by @headius, merged 2019-10-07" +[#2046]:https://github.com/puma/puma/pull/2046 "PR by @composerinteralia, merged 2019-10-21" +[#2052]:https://github.com/puma/puma/pull/2052 "PR by @composerinteralia, merged 2019-11-02" +[#1564]:https://github.com/puma/puma/issues/1564 "Issue by @perlun, closed 2019-10-07" +[#2035]:https://github.com/puma/puma/pull/2035 "PR by @AndrewSpeed, merged 2019-10-18" +[#2048]:https://github.com/puma/puma/pull/2048 "PR by @hahmed, merged 2019-10-21" +[#2050]:https://github.com/puma/puma/pull/2050 "PR by @olleolleolle, merged 2019-10-25" +[#1842]:https://github.com/puma/puma/issues/1842 "Issue by @nateberkopec, closed 2019-09-18" +[#1988]:https://github.com/puma/puma/issues/1988 "Issue by @mcg, closed 2019-10-01" +[#1986]:https://github.com/puma/puma/issues/1986 "Issue by @flaminestone, closed 2019-10-01" +[#1994]:https://github.com/puma/puma/issues/1994 "Issue by @LimeBlast, closed 2019-10-01" +[#2006]:https://github.com/puma/puma/pull/2006 "PR by @nateberkopec, merged 2019-10-01" +[#1222]:https://github.com/puma/puma/issues/1222 "Issue by @seanmckinley, closed 2019-10-04" +[#1885]:https://github.com/puma/puma/pull/1885 "PR by @spk, merged 2019-08-10" +[#1934]:https://github.com/puma/puma/pull/1934 "PR by @zarelit, merged 2019-08-28" +[#1105]:https://github.com/puma/puma/pull/1105 "PR by @daveallie, merged 2019-09-02" +[#1786]:https://github.com/puma/puma/pull/1786 "PR by @evanphx, merged 2019-09-11" +[#1320]:https://github.com/puma/puma/pull/1320 "PR by @nateberkopec, merged 2019-09-12" +[#1968]:https://github.com/puma/puma/pull/1968 "PR by @nateberkopec, merged 2019-09-15" +[#1908]:https://github.com/puma/puma/pull/1908 "PR by @MSP-Greg, merged 2019-08-23" +[#1952]:https://github.com/puma/puma/pull/1952 "PR by @MSP-Greg, merged 2019-09-19" +[#1941]:https://github.com/puma/puma/pull/1941 "PR by @MSP-Greg, merged 2019-09-02" +[#1961]:https://github.com/puma/puma/pull/1961 "PR by @nateberkopec, merged 2019-09-11" +[#1970]:https://github.com/puma/puma/pull/1970 "PR by @MSP-Greg, merged 2019-09-18" +[#1946]:https://github.com/puma/puma/pull/1946 "PR by @nateberkopec, merged 2019-09-02" +[#1831]:https://github.com/puma/puma/pull/1831 "PR by @spk, merged 2019-07-27" +[#1816]:https://github.com/puma/puma/pull/1816 "PR by @ylecuyer, merged 2019-08-01" +[#1844]:https://github.com/puma/puma/pull/1844 "PR by @ylecuyer, merged 2019-08-01" +[#1836]:https://github.com/puma/puma/pull/1836 "PR by @MSP-Greg, merged 2019-08-06" +[#1887]:https://github.com/puma/puma/pull/1887 "PR by @MSP-Greg, merged 2019-08-06" +[#1812]:https://github.com/puma/puma/pull/1812 "PR by @kou, merged 2019-08-03" +[#1491]:https://github.com/puma/puma/pull/1491 "PR by @olleolleolle, merged 2019-07-17" +[#1837]:https://github.com/puma/puma/pull/1837 "PR by @montanalow, merged 2019-07-25" +[#1857]:https://github.com/puma/puma/pull/1857 "PR by @Jesus, merged 2019-08-03" +[#1822]:https://github.com/puma/puma/pull/1822 "PR by @Jesus, merged 2019-08-01" +[#1863]:https://github.com/puma/puma/pull/1863 "PR by @dzunk, merged 2019-08-04" +[#1838]:https://github.com/puma/puma/pull/1838 "PR by @bogn83, merged 2019-07-14" +[#1882]:https://github.com/puma/puma/pull/1882 "PR by @okuramasafumi, merged 2019-08-06" +[#1848]:https://github.com/puma/puma/pull/1848 "PR by @nateberkopec, merged 2019-07-16" +[#1847]:https://github.com/puma/puma/pull/1847 "PR by @nateberkopec, merged 2019-07-16" +[#1846]:https://github.com/puma/puma/pull/1846 "PR by @nateberkopec, merged 2019-07-16" +[#1853]:https://github.com/puma/puma/pull/1853 "PR by @Jesus, merged 2019-07-18" +[#1850]:https://github.com/puma/puma/pull/1850 "PR by @nateberkopec, merged 2019-07-27" +[#1866]:https://github.com/puma/puma/pull/1866 "PR by @josacar, merged 2019-07-28" +[#1870]:https://github.com/puma/puma/pull/1870 "PR by @MSP-Greg, merged 2019-07-30" +[#1872]:https://github.com/puma/puma/pull/1872 "PR by @MSP-Greg, merged 2019-07-30" +[#1833]:https://github.com/puma/puma/issues/1833 "Issue by @julik, closed 2019-07-09" +[#1888]:https://github.com/puma/puma/pull/1888 "PR by @ClikeX, merged 2019-08-06" +[#1829]:https://github.com/puma/puma/pull/1829 "PR by @Fudoshiki, merged 2019-07-09" +[#1832]:https://github.com/puma/puma/pull/1832 "PR by @MSP-Greg, merged 2019-07-08" +[#1827]:https://github.com/puma/puma/pull/1827 "PR by @amrrbakry, merged 2019-06-27" +[#1562]:https://github.com/puma/puma/pull/1562 "PR by @skrobul, merged 2019-02-20" +[#1569]:https://github.com/puma/puma/pull/1569 "PR by @rianmcguire, merged 2019-02-20" +[#1648]:https://github.com/puma/puma/pull/1648 "PR by @wjordan, merged 2019-02-20" +[#1691]:https://github.com/puma/puma/pull/1691 "PR by @kares, merged 2019-02-20" +[#1716]:https://github.com/puma/puma/pull/1716 "PR by @mdkent, merged 2019-02-20" +[#1690]:https://github.com/puma/puma/pull/1690 "PR by @mic-kul, merged 2019-03-11" +[#1689]:https://github.com/puma/puma/pull/1689 "PR by @michaelherold, merged 2019-03-11" +[#1728]:https://github.com/puma/puma/pull/1728 "PR by @evanphx, merged 2019-03-20" +[#1824]:https://github.com/puma/puma/pull/1824 "PR by @spk, merged 2019-06-24" +[#1685]:https://github.com/puma/puma/pull/1685 "PR by @mainameiz, merged 2019-02-20" +[#1808]:https://github.com/puma/puma/pull/1808 "PR by @schneems, merged 2019-06-10" +[#1508]:https://github.com/puma/puma/pull/1508 "PR by @florin555, merged 2019-02-20" +[#1650]:https://github.com/puma/puma/pull/1650 "PR by @adam101, merged 2019-02-20" +[#1655]:https://github.com/puma/puma/pull/1655 "PR by @mipearson, merged 2019-02-20" +[#1671]:https://github.com/puma/puma/pull/1671 "PR by @eric-norcross, merged 2019-02-20" +[#1583]:https://github.com/puma/puma/pull/1583 "PR by @chwevans, merged 2019-02-20" +[#1773]:https://github.com/puma/puma/pull/1773 "PR by @enebo, merged 2019-04-14" +[#1731]:https://github.com/puma/puma/issues/1731 "Issue by @Fudoshiki, closed 2019-03-20" +[#1803]:https://github.com/puma/puma/pull/1803 "PR by @Jesus, merged 2019-05-28" +[#1741]:https://github.com/puma/puma/pull/1741 "PR by @MSP-Greg, merged 2019-03-19" +[#1674]:https://github.com/puma/puma/issues/1674 "Issue by @atitan, closed 2019-06-12" +[#1720]:https://github.com/puma/puma/issues/1720 "Issue by @voxik, closed 2019-03-20" +[#1730]:https://github.com/puma/puma/issues/1730 "Issue by @nearapogee, closed 2019-07-16" +[#1755]:https://github.com/puma/puma/issues/1755 "Issue by @vbalazs, closed 2019-07-26" +[#1649]:https://github.com/puma/puma/pull/1649 "PR by @schneems, merged 2018-10-17" +[#1607]:https://github.com/puma/puma/pull/1607 "PR by @harmdewit, merged 2018-08-15" +[#1700]:https://github.com/puma/puma/pull/1700 "PR by @schneems, merged 2019-01-05" +[#1630]:https://github.com/puma/puma/pull/1630 "PR by @eregon, merged 2018-09-11" +[#1478]:https://github.com/puma/puma/pull/1478 "PR by @eallison91, merged 2018-05-09" +[#1604]:https://github.com/puma/puma/pull/1604 "PR by @schneems, merged 2018-07-02" +[#1579]:https://github.com/puma/puma/pull/1579 "PR by @schneems, merged 2018-06-14" +[#1506]:https://github.com/puma/puma/pull/1506 "PR by @dekellum, merged 2018-05-09" +[#1563]:https://github.com/puma/puma/pull/1563 "PR by @dannyfallon, merged 2018-05-01" +[#1557]:https://github.com/puma/puma/pull/1557 "PR by @swrobel, merged 2018-05-09" +[#1529]:https://github.com/puma/puma/pull/1529 "PR by @desnudopenguino, merged 2018-03-20" +[#1532]:https://github.com/puma/puma/pull/1532 "PR by @schneems, merged 2018-03-21" +[#1482]:https://github.com/puma/puma/pull/1482 "PR by @shayonj, merged 2018-03-19" +[#1511]:https://github.com/puma/puma/pull/1511 "PR by @jemiam, merged 2018-03-19" +[#1545]:https://github.com/puma/puma/pull/1545 "PR by @hoshinotsuyoshi, merged 2018-03-28" +[#1550]:https://github.com/puma/puma/pull/1550 "PR by @eileencodes, merged 2018-03-29" +[#1553]:https://github.com/puma/puma/pull/1553 "PR by @eugeneius, merged 2018-04-02" +[#1510]:https://github.com/puma/puma/issues/1510 "Issue by @vincentwoo, closed 2018-03-06" +[#1524]:https://github.com/puma/puma/pull/1524 "PR by @tuwukee, closed 2018-03-06" +[#1507]:https://github.com/puma/puma/issues/1507 "Issue by @vincentwoo, closed 2018-03-19" +[#1483]:https://github.com/puma/puma/issues/1483 "Issue by @igravious, closed 2018-03-06" +[#1502]:https://github.com/puma/puma/issues/1502 "Issue by @vincentwoo, closed 2020-03-09" +[#1403]:https://github.com/puma/puma/pull/1403 "PR by @eileencodes, merged 2017-10-04" +[#1435]:https://github.com/puma/puma/pull/1435 "PR by @juliancheal, merged 2017-10-11" +[#1340]:https://github.com/puma/puma/pull/1340 "PR by @ViliusLuneckas, merged 2017-10-16" +[#1434]:https://github.com/puma/puma/pull/1434 "PR by @jumbosushi, merged 2017-10-10" +[#1436]:https://github.com/puma/puma/pull/1436 "PR by @luislavena, merged 2017-10-11" +[#1418]:https://github.com/puma/puma/pull/1418 "PR by @eileencodes, merged 2017-09-22" +[#1416]:https://github.com/puma/puma/pull/1416 "PR by @hiimtaylorjones, merged 2017-09-22" +[#1409]:https://github.com/puma/puma/pull/1409 "PR by @olleolleolle, merged 2017-09-13" +[#1427]:https://github.com/puma/puma/issues/1427 "Issue by @garybernhardt, closed 2017-10-04" +[#1430]:https://github.com/puma/puma/pull/1430 "PR by @MSP-Greg, merged 2017-10-09" +[#1429]:https://github.com/puma/puma/pull/1429 "PR by @perlun, merged 2017-10-09" +[#1455]:https://github.com/puma/puma/pull/1455 "PR by @perlun, merged 2017-11-16" +[#1425]:https://github.com/puma/puma/pull/1425 "PR by @vizcay, merged 2017-10-01" +[#1452]:https://github.com/puma/puma/pull/1452 "PR by @eprothro, merged 2017-11-16" +[#1439]:https://github.com/puma/puma/pull/1439 "PR by @MSP-Greg, merged 2017-10-16" +[#1442]:https://github.com/puma/puma/pull/1442 "PR by @MSP-Greg, merged 2017-10-19" +[#1464]:https://github.com/puma/puma/pull/1464 "PR by @MSP-Greg, merged 2017-11-20" +[#1384]:https://github.com/puma/puma/pull/1384 "PR by @noahgibbs, merged 2017-08-03" +[#1111]:https://github.com/puma/puma/pull/1111 "PR by @alexlance, merged 2017-06-04" +[#1392]:https://github.com/puma/puma/pull/1392 "PR by @hoffm, merged 2017-08-11" +[#1347]:https://github.com/puma/puma/pull/1347 "PR by @NikolayRys, merged 2017-06-28" +[#1334]:https://github.com/puma/puma/pull/1334 "PR by @respire, merged 2017-06-13" +[#1383]:https://github.com/puma/puma/pull/1383 "PR by @schneems, merged 2017-08-02" +[#1368]:https://github.com/puma/puma/pull/1368 "PR by @bongole, merged 2017-08-03" +[#1318]:https://github.com/puma/puma/pull/1318 "PR by @nateberkopec, merged 2017-08-03" +[#1376]:https://github.com/puma/puma/pull/1376 "PR by @pat, merged 2017-08-03" +[#1388]:https://github.com/puma/puma/pull/1388 "PR by @nateberkopec, merged 2017-08-08" +[#1390]:https://github.com/puma/puma/pull/1390 "PR by @junaruga, merged 2017-08-16" +[#1391]:https://github.com/puma/puma/pull/1391 "PR by @junaruga, merged 2017-08-16" +[#1385]:https://github.com/puma/puma/pull/1385 "PR by @grosser, merged 2017-08-16" +[#1377]:https://github.com/puma/puma/pull/1377 "PR by @shayonj, merged 2017-08-16" +[#1337]:https://github.com/puma/puma/pull/1337 "PR by @shayonj, merged 2017-08-16" +[#1325]:https://github.com/puma/puma/pull/1325 "PR by @palkan, merged 2017-06-04" +[#1395]:https://github.com/puma/puma/pull/1395 "PR by @junaruga, merged 2017-08-16" +[#1367]:https://github.com/puma/puma/issues/1367 "Issue by @dekellum, closed 2017-08-17" +[#1314]:https://github.com/puma/puma/pull/1314 "PR by @grosser, merged 2017-06-02" +[#1311]:https://github.com/puma/puma/pull/1311 "PR by @grosser, merged 2017-06-02" +[#1313]:https://github.com/puma/puma/pull/1313 "PR by @grosser, merged 2017-06-03" +[#1260]:https://github.com/puma/puma/pull/1260 "PR by @grosser, merged 2017-04-11" +[#1278]:https://github.com/puma/puma/pull/1278 "PR by @evanphx, merged 2017-04-28" +[#1306]:https://github.com/puma/puma/pull/1306 "PR by @jules2689, merged 2017-05-31" +[#1274]:https://github.com/puma/puma/pull/1274 "PR by @evanphx, merged 2017-05-01" +[#1261]:https://github.com/puma/puma/pull/1261 "PR by @jacksonrayhamilton, merged 2017-04-07" +[#1259]:https://github.com/puma/puma/pull/1259 "PR by @jacksonrayhamilton, merged 2017-04-07" +[#1248]:https://github.com/puma/puma/pull/1248 "PR by @davidarnold, merged 2017-04-18" +[#1277]:https://github.com/puma/puma/pull/1277 "PR by @schneems, merged 2017-05-01" +[#1290]:https://github.com/puma/puma/pull/1290 "PR by @schneems, merged 2017-05-12" +[#1285]:https://github.com/puma/puma/pull/1285 "PR by @fmauNeko, merged 2017-05-12" +[#1282]:https://github.com/puma/puma/pull/1282 "PR by @grosser, merged 2017-05-09" +[#1294]:https://github.com/puma/puma/pull/1294 "PR by @masry707, merged 2017-05-15" +[#1206]:https://github.com/puma/puma/pull/1206 "PR by @NikolayRys, closed 2017-06-27" +[#1241]:https://github.com/puma/puma/issues/1241 "Issue by @renchap, closed 2017-03-14" +[#1239]:https://github.com/puma/puma/pull/1239 "PR by @schneems, merged 2017-03-10" +[#1234]:https://github.com/puma/puma/pull/1234 "PR by @schneems, merged 2017-03-09" +[#1226]:https://github.com/puma/puma/pull/1226 "PR by @eileencodes, merged 2017-03-09" +[#1227]:https://github.com/puma/puma/pull/1227 "PR by @sirupsen, merged 2017-02-27" +[#1213]:https://github.com/puma/puma/pull/1213 "PR by @junaruga, merged 2017-02-28" +[#1182]:https://github.com/puma/puma/issues/1182 "Issue by @brunowego, closed 2017-02-09" +[#1203]:https://github.com/puma/puma/pull/1203 "PR by @twalpole, merged 2017-02-09" +[#1129]:https://github.com/puma/puma/pull/1129 "PR by @chtitux, merged 2016-12-12" +[#1165]:https://github.com/puma/puma/pull/1165 "PR by @sriedel, merged 2016-12-21" +[#1175]:https://github.com/puma/puma/pull/1175 "PR by @jemiam, merged 2016-12-21" +[#1068]:https://github.com/puma/puma/pull/1068 "PR by @junaruga, merged 2016-09-05" +[#1091]:https://github.com/puma/puma/pull/1091 "PR by @frodsan, merged 2016-09-17" +[#1088]:https://github.com/puma/puma/pull/1088 "PR by @frodsan, merged 2016-11-20" +[#1160]:https://github.com/puma/puma/pull/1160 "PR by @frodsan, merged 2016-11-24" +[#1169]:https://github.com/puma/puma/pull/1169 "PR by @scbrubaker02, merged 2016-12-12" +[#1061]:https://github.com/puma/puma/pull/1061 "PR by @michaelsauter, merged 2016-09-05" +[#1036]:https://github.com/puma/puma/issues/1036 "Issue by @matobinder, closed 2016-08-03" +[#1120]:https://github.com/puma/puma/pull/1120 "PR by @prathamesh-sonpatki, merged 2016-11-21" +[#1178]:https://github.com/puma/puma/pull/1178 "PR by @Koronen, merged 2016-12-21" +[#1002]:https://github.com/puma/puma/issues/1002 "Issue by @mattyb, closed 2016-07-26" +[#1063]:https://github.com/puma/puma/issues/1063 "Issue by @mperham, closed 2016-09-05" +[#1089]:https://github.com/puma/puma/issues/1089 "Issue by @AdamBialas, closed 2016-09-17" +[#1114]:https://github.com/puma/puma/pull/1114 "PR by @sj26, merged 2016-12-13" +[#1110]:https://github.com/puma/puma/pull/1110 "PR by @montdidier, merged 2016-12-12" +[#1135]:https://github.com/puma/puma/pull/1135 "PR by @jkraemer, merged 2016-11-19" +[#1081]:https://github.com/puma/puma/pull/1081 "PR by @frodsan, merged 2016-09-08" +[#1138]:https://github.com/puma/puma/pull/1138 "PR by @skull-squadron, merged 2016-12-13" +[#1118]:https://github.com/puma/puma/pull/1118 "PR by @hiroara, merged 2016-11-20" +[#1075]:https://github.com/puma/puma/issues/1075 "Issue by @pvalena, closed 2016-09-06" +[#932]:https://github.com/puma/puma/issues/932 "Issue by @everplays, closed 2016-07-24" +[#519]:https://github.com/puma/puma/issues/519 "Issue by @tmornini, closed 2016-07-25" +[#828]:https://github.com/puma/puma/issues/828 "Issue by @Zapotek, closed 2016-07-24" +[#984]:https://github.com/puma/puma/issues/984 "Issue by @erichmenge, closed 2016-07-24" +[#1028]:https://github.com/puma/puma/issues/1028 "Issue by @matobinder, closed 2016-07-24" +[#1023]:https://github.com/puma/puma/issues/1023 "Issue by @fera2k, closed 2016-07-24" +[#1027]:https://github.com/puma/puma/issues/1027 "Issue by @rosenfeld, closed 2016-07-24" +[#925]:https://github.com/puma/puma/issues/925 "Issue by @lokenmakwana, closed 2016-07-24" +[#911]:https://github.com/puma/puma/issues/911 "Issue by @veganstraightedge, closed 2016-07-24" +[#620]:https://github.com/puma/puma/issues/620 "Issue by @javanthropus, closed 2016-07-25" +[#778]:https://github.com/puma/puma/issues/778 "Issue by @niedhui, closed 2016-07-24" +[#1021]:https://github.com/puma/puma/pull/1021 "PR by @sarahzrf, merged 2016-07-20" +[#1022]:https://github.com/puma/puma/issues/1022 "Issue by @AKovtunov, closed 2017-08-16" +[#958]:https://github.com/puma/puma/issues/958 "Issue by @lalitlogical, closed 2016-04-23" +[#782]:https://github.com/puma/puma/issues/782 "Issue by @Tonkpils, closed 2016-07-19" +[#1010]:https://github.com/puma/puma/issues/1010 "Issue by @mneumark, closed 2016-07-19" +[#959]:https://github.com/puma/puma/issues/959 "Issue by @mwpastore, closed 2016-04-22" +[#840]:https://github.com/puma/puma/issues/840 "Issue by @maxkwallace, closed 2016-04-07" +[#1007]:https://github.com/puma/puma/pull/1007 "PR by @willnet, merged 2016-06-24" +[#1014]:https://github.com/puma/puma/pull/1014 "PR by @szymon-jez, merged 2016-07-11" +[#1015]:https://github.com/puma/puma/pull/1015 "PR by @bf4, merged 2016-07-19" +[#1017]:https://github.com/puma/puma/pull/1017 "PR by @jorihardman, merged 2016-07-19" +[#954]:https://github.com/puma/puma/pull/954 "PR by @jf, merged 2016-04-12" +[#955]:https://github.com/puma/puma/pull/955 "PR by @jf, merged 2016-04-22" +[#956]:https://github.com/puma/puma/pull/956 "PR by @maxkwallace, merged 2016-04-12" +[#960]:https://github.com/puma/puma/pull/960 "PR by @kmayer, merged 2016-04-15" +[#969]:https://github.com/puma/puma/pull/969 "PR by @frankwong15, merged 2016-05-10" +[#970]:https://github.com/puma/puma/pull/970 "PR by @willnet, merged 2016-04-26" +[#974]:https://github.com/puma/puma/pull/974 "PR by @reidmorrison, merged 2016-05-10" +[#977]:https://github.com/puma/puma/pull/977 "PR by @snow, merged 2016-05-10" +[#981]:https://github.com/puma/puma/pull/981 "PR by @zach-chai, merged 2016-07-19" +[#993]:https://github.com/puma/puma/pull/993 "PR by @scorix, merged 2016-07-19" +[#938]:https://github.com/puma/puma/issues/938 "Issue by @vandrijevik, closed 2016-04-07" +[#529]:https://github.com/puma/puma/issues/529 "Issue by @mperham, closed 2016-04-07" +[#788]:https://github.com/puma/puma/issues/788 "Issue by @herregroen, closed 2016-04-07" +[#894]:https://github.com/puma/puma/issues/894 "Issue by @rafbm, closed 2016-04-07" +[#937]:https://github.com/puma/puma/issues/937 "Issue by @huangxiangdan, closed 2016-04-07" +[#945]:https://github.com/puma/puma/pull/945 "PR by @dekellum, merged 2016-04-07" +[#946]:https://github.com/puma/puma/pull/946 "PR by @vipulnsward, merged 2016-04-07" +[#947]:https://github.com/puma/puma/pull/947 "PR by @vipulnsward, merged 2016-04-07" +[#936]:https://github.com/puma/puma/pull/936 "PR by @prathamesh-sonpatki, merged 2016-04-01" +[#940]:https://github.com/puma/puma/pull/940 "PR by @kyledrake, merged 2016-04-01" +[#942]:https://github.com/puma/puma/pull/942 "PR by @dekellum, merged 2016-04-01" +[#927]:https://github.com/puma/puma/pull/927 "PR by @jlecour, merged 2016-03-18" +[#931]:https://github.com/puma/puma/pull/931 "PR by @runlevel5, merged 2016-03-18" +[#922]:https://github.com/puma/puma/issues/922 "Issue by @LavirtheWhiolet, closed 2016-03-07" +[#923]:https://github.com/puma/puma/issues/923 "Issue by @donv, closed 2016-03-06" +[#912]:https://github.com/puma/puma/pull/912 "PR by @tricknotes, merged 2016-03-06" +[#921]:https://github.com/puma/puma/pull/921 "PR by @swrobel, merged 2016-03-06" +[#924]:https://github.com/puma/puma/pull/924 "PR by @tbrisker, merged 2016-03-07" +[#916]:https://github.com/puma/puma/issues/916 "Issue by @ma11hew28, closed 2016-03-06" +[#913]:https://github.com/puma/puma/issues/913 "Issue by @Casara, closed 2016-03-06" +[#918]:https://github.com/puma/puma/issues/918 "Issue by @rodrigdav, closed 2016-03-06" +[#910]:https://github.com/puma/puma/issues/910 "Issue by @ball-hayden, closed 2016-03-05" +[#914]:https://github.com/puma/puma/issues/914 "Issue by @osheroff, closed 2016-03-06" +[#901]:https://github.com/puma/puma/pull/901 "PR by @mitto, merged 2016-02-26" +[#902]:https://github.com/puma/puma/pull/902 "PR by @corrupt952, merged 2016-02-26" +[#905]:https://github.com/puma/puma/pull/905 "PR by @Eric-Guo, merged 2016-02-26" +[#852]:https://github.com/puma/puma/issues/852 "Issue by @asia653, closed 2016-02-25" +[#854]:https://github.com/puma/puma/issues/854 "Issue by @ollym, closed 2016-02-25" +[#824]:https://github.com/puma/puma/issues/824 "Issue by @MattWalston, closed 2016-02-25" +[#823]:https://github.com/puma/puma/issues/823 "Issue by @pneuman, closed 2016-02-25" +[#815]:https://github.com/puma/puma/issues/815 "Issue by @nate-dipiazza, closed 2016-02-25" +[#835]:https://github.com/puma/puma/issues/835 "Issue by @mwpastore, closed 2016-02-25" +[#798]:https://github.com/puma/puma/issues/798 "Issue by @schneems, closed 2016-02-25" +[#876]:https://github.com/puma/puma/issues/876 "Issue by @osheroff, closed 2016-02-25" +[#849]:https://github.com/puma/puma/issues/849 "Issue by @apotheon, closed 2016-02-25" +[#871]:https://github.com/puma/puma/pull/871 "PR by @deepj, merged 2016-02-25" +[#874]:https://github.com/puma/puma/pull/874 "PR by @wallclockbuilder, merged 2016-02-25" +[#883]:https://github.com/puma/puma/pull/883 "PR by @dadah89, merged 2016-02-25" +[#884]:https://github.com/puma/puma/pull/884 "PR by @furkanmustafa, merged 2016-02-25" +[#888]:https://github.com/puma/puma/pull/888 "PR by @mlarraz, merged 2016-02-25" +[#890]:https://github.com/puma/puma/pull/890 "PR by @todd, merged 2016-02-25" +[#891]:https://github.com/puma/puma/pull/891 "PR by @ctaintor, merged 2016-02-25" +[#893]:https://github.com/puma/puma/pull/893 "PR by @spastorino, merged 2016-02-25" +[#897]:https://github.com/puma/puma/pull/897 "PR by @vanchi-zendesk, merged 2016-02-25" +[#899]:https://github.com/puma/puma/pull/899 "PR by @kch, merged 2016-02-25" +[#859]:https://github.com/puma/puma/issues/859 "Issue by @boxofrad, closed 2016-01-28" +[#822]:https://github.com/puma/puma/pull/822 "PR by @kwugirl, merged 2016-01-28" +[#833]:https://github.com/puma/puma/pull/833 "PR by @joemiller, merged 2016-01-28" +[#837]:https://github.com/puma/puma/pull/837 "PR by @YurySolovyov, merged 2016-01-28" +[#839]:https://github.com/puma/puma/pull/839 "PR by @ka8725, merged 2016-01-15" +[#845]:https://github.com/puma/puma/pull/845 "PR by @deepj, merged 2016-01-28" +[#846]:https://github.com/puma/puma/pull/846 "PR by @sriedel, merged 2016-01-15" +[#850]:https://github.com/puma/puma/pull/850 "PR by @deepj, merged 2016-01-15" +[#853]:https://github.com/puma/puma/pull/853 "PR by @Jeffrey6052, merged 2016-01-28" +[#857]:https://github.com/puma/puma/pull/857 "PR by @osheroff, merged 2016-01-15" +[#858]:https://github.com/puma/puma/pull/858 "PR by @mlarraz, merged 2016-01-28" +[#860]:https://github.com/puma/puma/pull/860 "PR by @osheroff, merged 2016-01-15" +[#861]:https://github.com/puma/puma/pull/861 "PR by @osheroff, merged 2016-01-15" +[#818]:https://github.com/puma/puma/pull/818 "PR by @unleashed, merged 2015-11-06" +[#819]:https://github.com/puma/puma/pull/819 "PR by @VictorLowther, merged 2015-11-06" +[#563]:https://github.com/puma/puma/issues/563 "Issue by @deathbob, closed 2015-11-06" +[#803]:https://github.com/puma/puma/issues/803 "Issue by @burningTyger, closed 2016-04-07" +[#768]:https://github.com/puma/puma/pull/768 "PR by @nathansamson, merged 2015-11-06" +[#773]:https://github.com/puma/puma/pull/773 "PR by @rossta, merged 2015-11-06" +[#774]:https://github.com/puma/puma/pull/774 "PR by @snow, merged 2015-11-06" +[#781]:https://github.com/puma/puma/pull/781 "PR by @sunsations, merged 2015-11-06" +[#791]:https://github.com/puma/puma/pull/791 "PR by @unleashed, merged 2015-10-01" +[#793]:https://github.com/puma/puma/pull/793 "PR by @robdimarco, merged 2015-11-06" +[#794]:https://github.com/puma/puma/pull/794 "PR by @peterkeen, merged 2015-11-06" +[#795]:https://github.com/puma/puma/pull/795 "PR by @unleashed, merged 2015-11-06" +[#796]:https://github.com/puma/puma/pull/796 "PR by @cschneid, merged 2015-10-13" +[#799]:https://github.com/puma/puma/pull/799 "PR by @annawinkler, merged 2015-11-06" +[#800]:https://github.com/puma/puma/pull/800 "PR by @liamseanbrady, merged 2015-11-06" +[#801]:https://github.com/puma/puma/pull/801 "PR by @scottjg, merged 2015-11-06" +[#802]:https://github.com/puma/puma/pull/802 "PR by @scottjg, merged 2015-11-06" +[#804]:https://github.com/puma/puma/pull/804 "PR by @burningTyger, merged 2015-11-06" +[#809]:https://github.com/puma/puma/pull/809 "PR by @unleashed, merged 2015-11-06" +[#810]:https://github.com/puma/puma/pull/810 "PR by @vlmonk, merged 2015-11-06" +[#814]:https://github.com/puma/puma/pull/814 "PR by @schneems, merged 2015-11-04" +[#817]:https://github.com/puma/puma/pull/817 "PR by @unleashed, merged 2015-11-06" +[#735]:https://github.com/puma/puma/issues/735 "Issue by @trekr5, closed 2015-08-04" +[#769]:https://github.com/puma/puma/issues/769 "Issue by @dovestyle, closed 2015-08-16" +[#767]:https://github.com/puma/puma/issues/767 "Issue by @kapso, closed 2015-08-15" +[#765]:https://github.com/puma/puma/issues/765 "Issue by @monfresh, closed 2015-08-15" +[#764]:https://github.com/puma/puma/issues/764 "Issue by @keithpitt, closed 2015-08-15" +[#669]:https://github.com/puma/puma/pull/669 "PR by @chulkilee, closed 2015-08-14" +[#673]:https://github.com/puma/puma/pull/673 "PR by @chulkilee, closed 2015-08-14" +[#668]:https://github.com/puma/puma/pull/668 "PR by @kcollignon, merged 2015-08-14" +[#754]:https://github.com/puma/puma/pull/754 "PR by @nathansamson, merged 2015-08-14" +[#759]:https://github.com/puma/puma/pull/759 "PR by @BenV, merged 2015-08-14" +[#761]:https://github.com/puma/puma/pull/761 "PR by @dmarcotte, merged 2015-08-14" +[#742]:https://github.com/puma/puma/pull/742 "PR by @deivid-rodriguez, merged 2015-07-17" +[#743]:https://github.com/puma/puma/pull/743 "PR by @matthewd, merged 2015-07-18" +[#749]:https://github.com/puma/puma/pull/749 "PR by @huacnlee, merged 2015-08-04" +[#751]:https://github.com/puma/puma/pull/751 "PR by @costi, merged 2015-07-31" +[#741]:https://github.com/puma/puma/issues/741 "Issue by @GUI, closed 2015-07-17" +[#739]:https://github.com/puma/puma/issues/739 "Issue by @hab278, closed 2015-07-17" +[#737]:https://github.com/puma/puma/issues/737 "Issue by @dmill, closed 2015-07-16" +[#733]:https://github.com/puma/puma/issues/733 "Issue by @Eric-Guo, closed 2015-07-15" +[#736]:https://github.com/puma/puma/pull/736 "PR by @paulanunda, merged 2015-07-15" +[#722]:https://github.com/puma/puma/issues/722 "Issue by @mikeki, closed 2015-07-14" +[#694]:https://github.com/puma/puma/issues/694 "Issue by @yld, closed 2015-06-10" +[#705]:https://github.com/puma/puma/issues/705 "Issue by @TheTeaNerd, closed 2015-07-14" +[#686]:https://github.com/puma/puma/pull/686 "PR by @jjb, merged 2015-06-10" +[#693]:https://github.com/puma/puma/pull/693 "PR by @rob-murray, merged 2015-06-10" +[#697]:https://github.com/puma/puma/pull/697 "PR by @spk, merged 2015-06-10" +[#699]:https://github.com/puma/puma/pull/699 "PR by @deees, merged 2015-05-19" +[#701]:https://github.com/puma/puma/pull/701 "PR by @deepj, merged 2015-05-19" +[#702]:https://github.com/puma/puma/pull/702 "PR by @OleMchls, merged 2015-06-10" +[#703]:https://github.com/puma/puma/pull/703 "PR by @deepj, merged 2015-06-10" +[#704]:https://github.com/puma/puma/pull/704 "PR by @grega, merged 2015-06-10" +[#709]:https://github.com/puma/puma/pull/709 "PR by @lian, merged 2015-06-10" +[#711]:https://github.com/puma/puma/pull/711 "PR by @julik, merged 2015-06-10" +[#712]:https://github.com/puma/puma/pull/712 "PR by @chewi, merged 2015-07-14" +[#715]:https://github.com/puma/puma/pull/715 "PR by @raymondmars, merged 2015-07-14" +[#725]:https://github.com/puma/puma/pull/725 "PR by @rwz, merged 2015-07-14" +[#726]:https://github.com/puma/puma/pull/726 "PR by @jshafton, merged 2015-07-14" +[#729]:https://github.com/puma/puma/pull/729 "PR by @allaire, merged 2015-07-14" +[#730]:https://github.com/puma/puma/pull/730 "PR by @iamjarvo, merged 2015-07-14" +[#690]:https://github.com/puma/puma/issues/690 "Issue by @bachue, closed 2015-04-21" +[#684]:https://github.com/puma/puma/issues/684 "Issue by @tomquas, closed 2015-04-13" +[#698]:https://github.com/puma/puma/pull/698 "PR by @dmarcotte, merged 2015-05-04" +[#683]:https://github.com/puma/puma/issues/683 "Issue by @indirect, closed 2015-04-11" +[#657]:https://github.com/puma/puma/pull/657 "PR by @schneems, merged 2015-02-19" +[#658]:https://github.com/puma/puma/pull/658 "PR by @tomohiro, merged 2015-02-23" +[#662]:https://github.com/puma/puma/pull/662 "PR by @iaintshine, merged 2015-03-06" +[#664]:https://github.com/puma/puma/pull/664 "PR by @fxposter, merged 2015-03-09" +[#667]:https://github.com/puma/puma/pull/667 "PR by @JuanitoFatas, merged 2015-03-12" +[#672]:https://github.com/puma/puma/pull/672 "PR by @chulkilee, merged 2015-03-15" +[#653]:https://github.com/puma/puma/issues/653 "Issue by @dvrensk, closed 2015-02-11" +[#644]:https://github.com/puma/puma/pull/644 "PR by @bpaquet, merged 2015-01-29" +[#646]:https://github.com/puma/puma/pull/646 "PR by @mkonecny, merged 2015-02-05" +[#630]:https://github.com/puma/puma/issues/630 "Issue by @jelmd, closed 2015-01-20" +[#622]:https://github.com/puma/puma/issues/622 "Issue by @sabamotto, closed 2015-01-20" +[#583]:https://github.com/puma/puma/issues/583 "Issue by @rwojsznis, closed 2015-01-20" +[#586]:https://github.com/puma/puma/issues/586 "Issue by @ponchik, closed 2015-01-20" +[#359]:https://github.com/puma/puma/issues/359 "Issue by @natew, closed 2014-12-13" +[#633]:https://github.com/puma/puma/issues/633 "Issue by @joevandyk, closed 2015-01-20" +[#478]:https://github.com/puma/puma/pull/478 "PR by @rubencaro, merged 2015-01-20" +[#610]:https://github.com/puma/puma/pull/610 "PR by @kwilczynski, merged 2014-11-27" +[#611]:https://github.com/puma/puma/pull/611 "PR by @jasonl, merged 2015-01-20" +[#616]:https://github.com/puma/puma/pull/616 "PR by @jc00ke, merged 2014-12-10" +[#623]:https://github.com/puma/puma/pull/623 "PR by @raldred, merged 2015-01-20" +[#628]:https://github.com/puma/puma/pull/628 "PR by @rdpoor, merged 2015-01-20" +[#634]:https://github.com/puma/puma/pull/634 "PR by @deepj, merged 2015-01-20" +[#637]:https://github.com/puma/puma/pull/637 "PR by @raskhadafi, merged 2015-01-20" +[#639]:https://github.com/puma/puma/pull/639 "PR by @ebeigarts, merged 2015-01-20" +[#640]:https://github.com/puma/puma/pull/640 "PR by @bailsman, merged 2015-01-20" +[#591]:https://github.com/puma/puma/issues/591 "Issue by @renier, closed 2014-11-24" +[#606]:https://github.com/puma/puma/issues/606 "Issue by @, closed 2014-11-24" +[#560]:https://github.com/puma/puma/pull/560 "PR by @raskhadafi, merged 2014-11-24" +[#566]:https://github.com/puma/puma/pull/566 "PR by @sheltond, merged 2014-11-24" +[#593]:https://github.com/puma/puma/pull/593 "PR by @andruby, merged 2014-10-30" +[#594]:https://github.com/puma/puma/pull/594 "PR by @hassox, merged 2014-10-31" +[#596]:https://github.com/puma/puma/pull/596 "PR by @burningTyger, merged 2014-11-01" +[#601]:https://github.com/puma/puma/pull/601 "PR by @sorentwo, merged 2014-11-24" +[#602]:https://github.com/puma/puma/pull/602 "PR by @1334, merged 2014-11-24" +[#608]:https://github.com/puma/puma/pull/608 "PR by @Gu1, merged 2014-11-24" +[#538]:https://github.com/puma/puma/pull/538 "PR by @memiux, merged 2014-11-24" +[#550]:https://github.com/puma/puma/issues/550 "Issue by @, closed 2014-10-30" +[#549]:https://github.com/puma/puma/pull/549 "PR by @bsnape, merged 2014-10-16" +[#553]:https://github.com/puma/puma/pull/553 "PR by @lowjoel, merged 2014-10-16" +[#568]:https://github.com/puma/puma/pull/568 "PR by @mariuz, merged 2014-10-16" +[#578]:https://github.com/puma/puma/pull/578 "PR by @danielbuechele, merged 2014-10-16" +[#581]:https://github.com/puma/puma/pull/581 "PR by @alexch, merged 2014-10-16" +[#590]:https://github.com/puma/puma/pull/590 "PR by @dmarcotte, merged 2014-10-16" +[#574]:https://github.com/puma/puma/issues/574 "Issue by @minasmart, closed 2014-09-05" +[#561]:https://github.com/puma/puma/pull/561 "PR by @krasnoukhov, merged 2014-08-04" +[#570]:https://github.com/puma/puma/pull/570 "PR by @havenwood, merged 2014-08-20" +[#520]:https://github.com/puma/puma/pull/520 "PR by @misfo, merged 2014-06-16" +[#530]:https://github.com/puma/puma/pull/530 "PR by @dmarcotte, merged 2014-06-16" +[#537]:https://github.com/puma/puma/pull/537 "PR by @vlmonk, merged 2014-06-16" +[#540]:https://github.com/puma/puma/pull/540 "PR by @allaire, merged 2014-05-27" +[#544]:https://github.com/puma/puma/pull/544 "PR by @chulkilee, merged 2014-06-03" +[#551]:https://github.com/puma/puma/pull/551 "PR by @jcxplorer, merged 2014-07-02" +[#487]:https://github.com/puma/puma/pull/487 "PR by @, merged 2014-03-06" +[#492]:https://github.com/puma/puma/pull/492 "PR by @, merged 2014-03-06" +[#493]:https://github.com/puma/puma/pull/493 "PR by @alepore, merged 2014-03-07" +[#503]:https://github.com/puma/puma/pull/503 "PR by @mariuz, merged 2014-04-12" +[#505]:https://github.com/puma/puma/pull/505 "PR by @sammcj, merged 2014-04-12" +[#506]:https://github.com/puma/puma/pull/506 "PR by @dsander, merged 2014-04-12" +[#510]:https://github.com/puma/puma/pull/510 "PR by @momer, merged 2014-04-12" +[#511]:https://github.com/puma/puma/pull/511 "PR by @macool, merged 2014-04-12" +[#514]:https://github.com/puma/puma/pull/514 "PR by @nanaya, merged 2014-04-12" +[#517]:https://github.com/puma/puma/pull/517 "PR by @misfo, merged 2014-04-12" +[#518]:https://github.com/puma/puma/pull/518 "PR by @alxgsv, merged 2014-04-12" +[#471]:https://github.com/puma/puma/pull/471 "PR by @arthurnn, merged 2014-02-28" +[#485]:https://github.com/puma/puma/pull/485 "PR by @runlevel5, merged 2014-03-01" +[#486]:https://github.com/puma/puma/pull/486 "PR by @joshwlewis, merged 2014-03-02" +[#490]:https://github.com/puma/puma/pull/490 "PR by @tobinibot, merged 2014-03-06" +[#491]:https://github.com/puma/puma/pull/491 "PR by @brianknight10, merged 2014-03-06" +[#438]:https://github.com/puma/puma/issues/438 "Issue by @mperham, closed 2014-01-25" +[#333]:https://github.com/puma/puma/issues/333 "Issue by @SamSaffron, closed 2014-01-26" +[#440]:https://github.com/puma/puma/issues/440 "Issue by @sudara, closed 2014-01-25" +[#449]:https://github.com/puma/puma/issues/449 "Issue by @cezarsa, closed 2014-02-04" +[#444]:https://github.com/puma/puma/issues/444 "Issue by @le0pard, closed 2014-01-25" +[#370]:https://github.com/puma/puma/issues/370 "Issue by @pelcasandra, closed 2014-01-26" +[#377]:https://github.com/puma/puma/issues/377 "Issue by @mrbrdo, closed 2014-01-26" +[#406]:https://github.com/puma/puma/issues/406 "Issue by @simonrussell, closed 2014-01-25" +[#425]:https://github.com/puma/puma/issues/425 "Issue by @jhass, closed 2014-01-26" +[#432]:https://github.com/puma/puma/pull/432 "PR by @anatol, closed 2014-01-25" +[#428]:https://github.com/puma/puma/pull/428 "PR by @alexeyfrank, merged 2014-01-25" +[#429]:https://github.com/puma/puma/pull/429 "PR by @namusyaka, merged 2013-12-16" +[#431]:https://github.com/puma/puma/pull/431 "PR by @mrb, merged 2014-01-25" +[#433]:https://github.com/puma/puma/pull/433 "PR by @alepore, merged 2014-02-28" +[#437]:https://github.com/puma/puma/pull/437 "PR by @ibrahima, merged 2014-01-25" +[#446]:https://github.com/puma/puma/pull/446 "PR by @sudara, merged 2014-01-27" +[#451]:https://github.com/puma/puma/pull/451 "PR by @pwiebe, merged 2014-02-04" +[#453]:https://github.com/puma/puma/pull/453 "PR by @joevandyk, merged 2014-02-28" +[#470]:https://github.com/puma/puma/pull/470 "PR by @arthurnn, merged 2014-02-28" +[#472]:https://github.com/puma/puma/pull/472 "PR by @rubencaro, merged 2014-02-21" +[#480]:https://github.com/puma/puma/pull/480 "PR by @jjb, merged 2014-02-26" +[#481]:https://github.com/puma/puma/pull/481 "PR by @schneems, merged 2014-02-25" +[#482]:https://github.com/puma/puma/pull/482 "PR by @prathamesh-sonpatki, merged 2014-02-26" +[#483]:https://github.com/puma/puma/pull/483 "PR by @maxilev, merged 2014-02-26" +[#422]:https://github.com/puma/puma/issues/422 "Issue by @alexandru-calinoiu, closed 2013-12-05" +[#334]:https://github.com/puma/puma/issues/334 "Issue by @srgpqt, closed 2013-07-18" +[#179]:https://github.com/puma/puma/issues/179 "Issue by @betelgeuse, closed 2013-07-18" +[#332]:https://github.com/puma/puma/issues/332 "Issue by @SamSaffron, closed 2013-07-18" +[#317]:https://github.com/puma/puma/issues/317 "Issue by @masterkain, closed 2013-07-11" +[#309]:https://github.com/puma/puma/issues/309 "Issue by @masterkain, closed 2013-07-09" +[#166]:https://github.com/puma/puma/issues/166 "Issue by @emassip, closed 2013-07-06" +[#292]:https://github.com/puma/puma/issues/292 "Issue by @pulse00, closed 2013-07-06" +[#274]:https://github.com/puma/puma/issues/274 "Issue by @mrbrdo, closed 2013-07-06" +[#304]:https://github.com/puma/puma/issues/304 "Issue by @nandosola, closed 2013-07-06" +[#287]:https://github.com/puma/puma/issues/287 "Issue by @runlevel5, closed 2013-07-06" +[#256]:https://github.com/puma/puma/issues/256 "Issue by @rkh, closed 2013-07-01" +[#285]:https://github.com/puma/puma/issues/285 "Issue by @mkwiatkowski, closed 2013-06-20" +[#270]:https://github.com/puma/puma/issues/270 "Issue by @iamroody, closed 2013-06-01" +[#246]:https://github.com/puma/puma/issues/246 "Issue by @amencarini, closed 2013-06-01" +[#278]:https://github.com/puma/puma/issues/278 "Issue by @titanous, closed 2013-06-18" +[#251]:https://github.com/puma/puma/issues/251 "Issue by @cure, closed 2013-06-18" +[#252]:https://github.com/puma/puma/issues/252 "Issue by @vixns, closed 2013-06-01" +[#234]:https://github.com/puma/puma/issues/234 "Issue by @jgarber, closed 2013-04-08" +[#228]:https://github.com/puma/puma/issues/228 "Issue by @joelmats, closed 2013-04-29" +[#192]:https://github.com/puma/puma/issues/192 "Issue by @steverandy, closed 2013-02-09" +[#206]:https://github.com/puma/puma/issues/206 "Issue by @moll, closed 2013-03-19" +[#154]:https://github.com/puma/puma/issues/154 "Issue by @trevor, closed 2013-03-19" +[#208]:https://github.com/puma/puma/issues/208 "Issue by @ochronus, closed 2013-03-18" +[#189]:https://github.com/puma/puma/issues/189 "Issue by @tolot27, closed 2013-02-09" +[#185]:https://github.com/puma/puma/issues/185 "Issue by @nicolai86, closed 2013-02-06" +[#182]:https://github.com/puma/puma/issues/182 "Issue by @sriedel, closed 2013-02-05" +[#183]:https://github.com/puma/puma/issues/183 "Issue by @concept47, closed 2013-02-05" +[#176]:https://github.com/puma/puma/issues/176 "Issue by @cryo28, closed 2013-02-05" +[#180]:https://github.com/puma/puma/issues/180 "Issue by @tscolari, closed 2013-02-05" +[#170]:https://github.com/puma/puma/issues/170 "Issue by @nixme, closed 2012-11-29" +[#148]:https://github.com/puma/puma/issues/148 "Issue by @rafaelss, closed 2012-11-18" +[#128]:https://github.com/puma/puma/issues/128 "Issue by @fbjork, closed 2012-10-20" +[#155]:https://github.com/puma/puma/issues/155 "Issue by @ehlertij, closed 2012-10-13" +[#123]:https://github.com/puma/puma/pull/123 "PR by @jcoene, closed 2012-07-19" +[#111]:https://github.com/puma/puma/pull/111 "PR by @kenkeiter, closed 2012-07-19" +[#98]:https://github.com/puma/puma/pull/98 "PR by @Flink, closed 2012-05-15" +[#94]:https://github.com/puma/puma/issues/94 "Issue by @ender672, closed 2012-05-08" +[#84]:https://github.com/puma/puma/issues/84 "Issue by @sigursoft, closed 2012-04-29" +[#78]:https://github.com/puma/puma/issues/78 "Issue by @dstrelau, closed 2012-04-28" +[#79]:https://github.com/puma/puma/issues/79 "Issue by @jammi, closed 2012-04-28" +[#65]:https://github.com/puma/puma/issues/65 "Issue by @bporterfield, closed 2012-04-11" +[#54]:https://github.com/puma/puma/issues/54 "Issue by @masterkain, closed 2012-04-10" +[#58]:https://github.com/puma/puma/pull/58 "PR by @paneq, closed 2012-04-10" +[#61]:https://github.com/puma/puma/issues/61 "Issue by @dustalov, closed 2012-04-10" +[#63]:https://github.com/puma/puma/issues/63 "Issue by @seamusabshere, closed 2012-04-11" +[#60]:https://github.com/puma/puma/issues/60 "Issue by @paneq, closed 2012-04-11" +[#53]:https://github.com/puma/puma/pull/53 "PR by @sxua, closed 2012-04-11" diff --git a/vendor/cache/puma-fba741b91780/LICENSE b/vendor/cache/puma-fba741b91780/LICENSE new file mode 100644 index 000000000..14bfc858a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Evan Phoenix. Some code by Zed Shaw, (c) 2005. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/cache/puma-fba741b91780/README.md b/vendor/cache/puma-fba741b91780/README.md new file mode 100644 index 000000000..01843eed5 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/README.md @@ -0,0 +1,477 @@ +

+ +

+ +# Puma: A Ruby Web Server Built For Parallelism + +[![Actions](https://github.com/puma/puma/workflows/Tests/badge.svg?branch=master)](https://github.com/puma/puma/actions?query=workflow%3ATests) +[![Code Climate](https://codeclimate.com/github/puma/puma.svg)](https://codeclimate.com/github/puma/puma) +[![StackOverflow](https://img.shields.io/badge/stackoverflow-Puma-blue.svg)]( https://stackoverflow.com/questions/tagged/puma ) + +Puma is a **simple, fast, multi-threaded, and highly parallel HTTP 1.1 server for Ruby/Rack applications**. + +## Built For Speed & Parallelism + +Puma is a server for [Rack](https://github.com/rack/rack)-powered HTTP applications written in Ruby. It is: +* **Multi-threaded**. Each request is served in a separate thread. This helps you serve more requests per second with less memory use. +* **Multi-process**. "Pre-forks" in cluster mode, using less memory per-process thanks to copy-on-write memory. +* **Standalone**. With SSL support, zero-downtime rolling restarts and a built-in request bufferer, you can deploy Puma without any reverse proxy. +* **Battle-tested**. Our HTTP parser is inherited from Mongrel and has over 15 years of production use. Puma is currently the most popular Ruby webserver, and is the default server for Ruby on Rails. + +Originally designed as a server for [Rubinius](https://github.com/rubinius/rubinius), Puma also works well with Ruby (MRI) and JRuby. + +On MRI, there is a Global VM Lock (GVL) that ensures only one thread can run Ruby code at a time. But if you're doing a lot of blocking IO (such as HTTP calls to external APIs like Twitter), Puma still improves MRI's throughput by allowing IO waiting to be done in parallel. Truly parallel Ruby implementations (TruffleRuby, JRuby) don't have this limitation. + +## Quick Start + +``` +$ gem install puma +$ puma +``` + +Without arguments, puma will look for a rackup (.ru) file in +working directory called `config.ru`. + +## SSL Connection Support + +Puma will install/compile with support for ssl sockets, assuming OpenSSL +development files are installed on the system. + +If the system does not have OpenSSL development files installed, Puma will +install/compile, but it will not allow ssl connections. + +## Frameworks + +### Rails + +Puma is the default server for Rails, included in the generated Gemfile. + +Start your server with the `rails` command: + +``` +$ rails server +``` + +Many configuration options and Puma features are not available when using `rails server`. It is recommended that you use Puma's executable instead: + +``` +$ bundle exec puma +``` + +### Sinatra + +You can run your Sinatra application with Puma from the command line like this: + +``` +$ ruby app.rb -s Puma +``` + +In order to actually configure Puma using a config file, like `puma.rb`, however, you need to use the `puma` executable. To do this, you must add a rackup file to your Sinatra app: + +```ruby +# config.ru +require './app' +run Sinatra::Application +``` + +You can then start your application using: + +``` +$ bundle exec puma +``` + +## Configuration + +Puma provides numerous options. Consult `puma -h` (or `puma --help`) for a full list of CLI options, or see `Puma::DSL` or [dsl.rb](https://github.com/puma/puma/blob/master/lib/puma/dsl.rb). + +You can also find several configuration examples as part of the +[test](https://github.com/puma/puma/tree/master/test/config) suite. + +For debugging purposes, you can set the environment variable `PUMA_LOG_CONFIG` with a value +and the loaded configuration will be printed as part of the boot process. + +### Thread Pool + +Puma uses a thread pool. You can set the minimum and maximum number of threads that are available in the pool with the `-t` (or `--threads`) flag: + +``` +$ puma -t 8:32 +``` + +Puma will automatically scale the number of threads, from the minimum until it caps out at the maximum, based on how much traffic is present. The current default is `0:16` and on MRI is `0:5`. Feel free to experiment, but be careful not to set the number of maximum threads to a large number, as you may exhaust resources on the system (or cause contention for the Global VM Lock, when using MRI). + +Be aware that additionally Puma creates threads on its own for internal purposes (e.g. handling slow clients). So, even if you specify -t 1:1, expect around 7 threads created in your application. + +### Clustered mode + +Puma also offers "clustered mode". Clustered mode `fork`s workers from a master process. Each child process still has its own thread pool. You can tune the number of workers with the `-w` (or `--workers`) flag: + +``` +$ puma -t 8:32 -w 3 +``` + +Or with the `WEB_CONCURRENCY` environment variable: + +``` +$ WEB_CONCURRENCY=3 puma -t 8:32 +``` + +Note that threads are still used in clustered mode, and the `-t` thread flag setting is per worker, so `-w 2 -t 16:16` will spawn 32 threads in total, with 16 in each worker process. + +If the `WEB_CONCURRENCY` environment variable is set to `"auto"` and the `concurrent-ruby` gem is available in your application, Puma will set the worker process count to the result of [available processors](https://ruby-concurrency.github.io/concurrent-ruby/master/Concurrent.html#available_processor_count-class_method). + +For an in-depth discussion of the tradeoffs of thread and process count settings, [see our docs](https://github.com/puma/puma/blob/9282a8efa5a0c48e39c60d22ca70051a25df9f55/docs/kubernetes.md#workers-per-pod-and-other-config-issues). + +In clustered mode, Puma can "preload" your application. This loads all the application code *prior* to forking. Preloading reduces total memory usage of your application via an operating system feature called [copy-on-write](https://en.wikipedia.org/wiki/Copy-on-write). + +If the `WEB_CONCURRENCY` environment variable is set to a value > 1 (and `--prune-bundler` has not been specified), preloading will be enabled by default. Otherwise, you can use the `--preload` flag from the command line: + +``` +$ puma -w 3 --preload +``` + +Or, if you're using a configuration file, you can use the `preload_app!` method: + +```ruby +# config/puma.rb +workers 3 +preload_app! +``` + +Preloading can’t be used with phased restart, since phased restart kills and restarts workers one-by-one, and preloading copies the code of master into the workers. + +#### Clustered mode hooks + +When using clustered mode, Puma's configuration DSL provides `before_fork` and `on_worker_boot` +hooks to run code when the master process forks and child workers are booted respectively. + +It is recommended to use these hooks with `preload_app!`, otherwise constants loaded by your +application (such as `Rails`) will not be available inside the hooks. + +```ruby +# config/puma.rb +before_fork do + # Add code to run inside the Puma master process before it forks a worker child. +end + +on_worker_boot do + # Add code to run inside the Puma worker process after forking. +end +``` + +In addition, there is an `on_refork` hook which is used only in [`fork_worker` mode](docs/fork_worker.md), +when the worker 0 child process forks a grandchild worker: + +```ruby +on_refork do + # Used only when fork_worker mode is enabled. Add code to run inside the Puma worker 0 + # child process before it forks a grandchild worker. +end +``` + +Importantly, note the following considerations when Ruby forks a child process: + +1. File descriptors such as network sockets **are** copied from the parent to the forked + child process. Dual-use of the same sockets by parent and child will result in I/O conflicts + such as `SocketError`, `Errno::EPIPE`, and `EOFError`. +2. Background Ruby threads, including threads used by various third-party gems for connection + monitoring, etc., are **not** copied to the child process. Often this does not cause + immediate problems until a third-party connection goes down, at which point there will + be no supervisor to reconnect it. + +Therefore, we recommend the following: + +1. If possible, do not establish any socket connections (HTTP, database connections, etc.) + inside Puma's master process when booting. +2. If (1) is not possible, use `before_fork` and `on_refork` to disconnect the parent's socket + connections when forking, so that they are not accidentally copied to the child process. +3. Use `on_worker_boot` to restart any background threads on the forked child. + +#### Master process lifecycle hooks + +Puma's configuration DSL provides master process lifecycle hooks `on_booted`, `on_restart`, and `on_stopped` +which may be used to specify code blocks to run on each event: + +```ruby +# config/puma.rb +on_booted do + # Add code to run in the Puma master process after it boots, + # and also after a phased restart completes. +end + +on_restart do + # Add code to run in the Puma master process when it receives + # a restart command but before it restarts. +end + +on_stopped do + # Add code to run in the Puma master process when it receives + # a stop command but before it shuts down. +end +``` + +### Error handling + +If Puma encounters an error outside of the context of your application, it will respond with a 400/500 and a simple +textual error message (see `Puma::Server#lowlevel_error` or [server.rb](https://github.com/puma/puma/blob/master/lib/puma/server.rb)). +You can specify custom behavior for this scenario. For example, you can report the error to your third-party +error-tracking service (in this example, [rollbar](https://rollbar.com)): + +```ruby +lowlevel_error_handler do |e, env, status| + if status == 400 + message = "The server could not process the request due to an error, such as an incorrectly typed URL, malformed syntax, or a URL that contains illegal characters.\n" + else + message = "An error has occurred, and engineers have been informed. Please reload the page. If you continue to have problems, contact support@example.com\n" + Rollbar.critical(e) + end + + [status, {}, [message]] +end +``` + +### Binding TCP / Sockets + +Bind Puma to a socket with the `-b` (or `--bind`) flag: + +``` +$ puma -b tcp://127.0.0.1:9292 +``` + +To use a UNIX Socket instead of TCP: + +``` +$ puma -b unix:///var/run/puma.sock +``` + +If you need to change the permissions of the UNIX socket, just add a umask parameter: + +``` +$ puma -b 'unix:///var/run/puma.sock?umask=0111' +``` + +Need a bit of security? Use SSL sockets: + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert' +``` +#### Self-signed SSL certificates (via the [`localhost`] gem, for development use): + +Puma supports the [`localhost`] gem for self-signed certificates. This is particularly useful if you want to use Puma with SSL locally, and self-signed certificates will work for your use-case. Currently, the integration can only be used in MRI. + +Puma automatically configures SSL when the [`localhost`] gem is loaded in a `development` environment: + +Add the gem to your Gemfile: +```ruby +group(:development) do + gem 'localhost' +end +``` + +And require it implicitly using bundler: +```ruby +require "bundler" +Bundler.require(:default, ENV["RACK_ENV"].to_sym) +``` + +Alternatively, you can require the gem in your configuration file, either `config/puma/development.rb`, `config/puma.rb`, or set via the `-C` cli option: +```ruby +require 'localhost' +# configuration methods (from Puma::DSL) as needed +``` + +Additionally, Puma must be listening to an SSL socket: + +```shell +$ puma -b 'ssl://localhost:9292' -C config/use_local_host.rb + +# The following options allow you to reach Puma over HTTP as well: +$ puma -b ssl://localhost:9292 -b tcp://localhost:9393 -C config/use_local_host.rb +``` + +[`localhost`]: https://github.com/socketry/localhost + +#### Controlling SSL Cipher Suites + +To use or avoid specific SSL ciphers for TLSv1.2 and below, use `ssl_cipher_filter` or `ssl_cipher_list` options. + +##### Ruby: + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert&ssl_cipher_filter=!aNULL:AES+SHA' +``` + +##### JRuby: + +``` +$ puma -b 'ssl://127.0.0.1:9292?keystore=path_to_keystore&keystore-pass=keystore_password&ssl_cipher_list=TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA' +``` + +To configure the available TLSv1.3 ciphersuites, use `ssl_ciphersuites` option (not available for JRuby). + +##### Ruby: + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert&ssl_ciphersuites=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256' +``` + +See https://www.openssl.org/docs/man1.1.1/man1/ciphers.html for cipher filter format and full list of cipher suites. + +Disable TLS v1 with the `no_tlsv1` option: + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert&no_tlsv1=true' +``` + +#### Controlling OpenSSL Verification Flags + +To enable verification flags offered by OpenSSL, use `verification_flags` (not available for JRuby): + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert&verification_flags=PARTIAL_CHAIN' +``` + +You can also set multiple verification flags (by separating them with a comma): + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert&verification_flags=PARTIAL_CHAIN,CRL_CHECK' +``` + +List of available flags: `USE_CHECK_TIME`, `CRL_CHECK`, `CRL_CHECK_ALL`, `IGNORE_CRITICAL`, `X509_STRICT`, `ALLOW_PROXY_CERTS`, `POLICY_CHECK`, `EXPLICIT_POLICY`, `INHIBIT_ANY`, `INHIBIT_MAP`, `NOTIFY_POLICY`, `EXTENDED_CRL_SUPPORT`, `USE_DELTAS`, `CHECK_SS_SIGNATURE`, `TRUSTED_FIRST`, `SUITEB_128_LOS_ONLY`, `SUITEB_192_LOS`, `SUITEB_128_LOS`, `PARTIAL_CHAIN`, `NO_ALT_CHAINS`, `NO_CHECK_TIME` +(see https://www.openssl.org/docs/manmaster/man3/X509_VERIFY_PARAM_set_hostflags.html#VERIFICATION-FLAGS). + +#### Controlling OpenSSL Password Decryption + +To enable runtime decryption of an encrypted SSL key (not available for JRuby), use `key_password_command`: + +``` +$ puma -b 'ssl://127.0.0.1:9292?key=path_to_key&cert=path_to_cert&key_password_command=/path/to/command.sh' +``` + +`key_password_command` must: + +1. Be executable by Puma. +2. Print the decryption password to stdout. + + For example: + +```shell +#!/bin/sh + +echo "this is my password" +``` + +`key_password_command` can be used with `key` or `key_pem`. If the key +is not encrypted, the executable will not be called. + +### Control/Status Server + +Puma has a built-in status and control app that can be used to query and control Puma. + +``` +$ puma --control-url tcp://127.0.0.1:9293 --control-token foo +``` + +Puma will start the control server on localhost port 9293. All requests to the control server will need to include control token (in this case, `token=foo`) as a query parameter. This allows for simple authentication. Check out `Puma::App::Status` or [status.rb](https://github.com/puma/puma/blob/master/lib/puma/app/status.rb) to see what the status app has available. + +You can also interact with the control server via `pumactl`. This command will restart Puma: + +``` +$ pumactl --control-url 'tcp://127.0.0.1:9293' --control-token foo restart +``` + +To see a list of `pumactl` options, use `pumactl --help`. + +### Configuration File + +You can also provide a configuration file with the `-C` (or `--config`) flag: + +``` +$ puma -C /path/to/config +``` + +If no configuration file is specified, Puma will look for a configuration file at `config/puma.rb`. If an environment is specified (via the `--environment` flag or through the `APP_ENV`, `RACK_ENV`, or `RAILS_ENV` environment variables) Puma looks for a configuration file at `config/puma/.rb` and then falls back to `config/puma.rb`. + +If you want to prevent Puma from looking for a configuration file in those locations, include the `--no-config` flag: + +``` +$ puma --no-config + +# or + +$ puma -C "-" +``` + +The other side-effects of setting the environment are whether to show stack traces (in `development` or `test`), and setting RACK_ENV may potentially affect middleware looking for this value to change their behavior. The default puma RACK_ENV value is `development`. You can see all config default values in `Puma::Configuration#puma_default_options` or [configuration.rb](https://github.com/puma/puma/blob/61c6213fbab/lib/puma/configuration.rb#L182-L204). + +Check out `Puma::DSL` or [dsl.rb](https://github.com/puma/puma/blob/master/lib/puma/dsl.rb) to see all available options. + +## Restart + +Puma includes the ability to restart itself. When available (MRI, Rubinius, JRuby), Puma performs a "hot restart". This is the same functionality available in *Unicorn* and *NGINX* which keep the server sockets open between restarts. This makes sure that no pending requests are dropped while the restart is taking place. + +For more, see the [Restart documentation](docs/restart.md). + +## Signals + +Puma responds to several signals. A detailed guide to using UNIX signals with Puma can be found in the [Signals documentation](docs/signals.md). + +## Platform Constraints + +Some platforms do not support all Puma features. + + * **JRuby**, **Windows**: server sockets are not seamless on restart, they must be closed and reopened. These platforms have no way to pass descriptors into a new process that is exposed to Ruby. Also, cluster mode is not supported due to a lack of fork(2). + * **Windows**: Cluster mode is not supported due to a lack of fork(2). + * **Kubernetes**: The way Kubernetes handles pod shutdowns interacts poorly with server processes implementing graceful shutdown, like Puma. See the [kubernetes section of the documentation](docs/kubernetes.md) for more details. + +## Known Bugs + +For MRI versions 2.2.7, 2.2.8, 2.2.9, 2.2.10, 2.3.4 and 2.4.1, you may see ```stream closed in another thread (IOError)```. It may be caused by a [Ruby bug](https://bugs.ruby-lang.org/issues/13632). It can be fixed with the gem https://rubygems.org/gems/stopgap_13632: + +```ruby +if %w(2.2.7 2.2.8 2.2.9 2.2.10 2.3.4 2.4.1).include? RUBY_VERSION + begin + require 'stopgap_13632' + rescue LoadError + end +end +``` + +## Deployment + + * Puma has support for Capistrano with an [external gem](https://github.com/seuros/capistrano-puma). + + * Additionally, Puma has support for built-in daemonization via the [puma-daemon](https://github.com/kigster/puma-daemon) ruby gem. The gem restores the `daemonize` option that was removed from Puma starting version 5, but only for MRI Ruby. + + +It is common to use process monitors with Puma. Modern process monitors like systemd or rc.d +provide continuous monitoring and restarts for increased reliability in production environments: + +* [rc.d](docs/jungle/rc.d/README.md) +* [systemd](docs/systemd.md) + +Community guides: + +* [Deploying Puma on OpenBSD using relayd and httpd](https://gist.github.com/anon987654321/4532cf8d6c59c1f43ec8973faa031103) + +## Community Extensions + +### Plugins + +* [puma-metrics](https://github.com/harmjanblok/puma-metrics) — export Puma metrics to Prometheus +* [puma-plugin-statsd](https://github.com/yob/puma-plugin-statsd) — send Puma metrics to statsd +* [puma-plugin-systemd](https://github.com/sj26/puma-plugin-systemd) — deeper integration with systemd for notify, status and watchdog. Puma 5.1.0 integrated notify and watchdog, which probably conflicts with this plugin. Puma 6.1.0 added status support which obsoletes the plugin entirely. +* [puma-plugin-telemetry](https://github.com/babbel/puma-plugin-telemetry) - telemetry plugin for Puma offering various targets to publish +* [puma-acme](https://github.com/anchordotdev/puma-acme) - automatic SSL/HTTPS certificate provisioning and setup + +### Monitoring + +* [puma-status](https://github.com/ylecuyer/puma-status) — Monitor CPU/Mem/Load of running puma instances from the CLI + +## Contributing + +Find details for contributing in the [contribution guide](CONTRIBUTING.md). + +## License + +Puma is copyright Evan Phoenix and contributors, licensed under the BSD 3-Clause license. See the included LICENSE file for details. diff --git a/vendor/cache/puma-fba741b91780/Rakefile b/vendor/cache/puma-fba741b91780/Rakefile new file mode 100644 index 000000000..c60a39881 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/Rakefile @@ -0,0 +1,102 @@ +require "bundler/setup" +require "rake/testtask" +require "rake/extensiontask" +require "rake/javaextensiontask" +require_relative 'lib/puma/detect' +require 'rubygems/package_task' +require 'bundler/gem_tasks' + +begin + # Add rubocop task + require "rubocop/rake_task" + RuboCop::RakeTask.new +rescue LoadError +end + +gemspec = Gem::Specification.load("puma.gemspec") +Gem::PackageTask.new(gemspec).define + +Rake::FileUtilsExt.verbose_flag = !!ENV['PUMA_TEST_DEBUG'] +# generate extension code using Ragel (C and Java) +desc "Generate extension code (C and Java) using Ragel" +task :ragel + +file 'ext/puma_http11/http11_parser.c' => ['ext/puma_http11/http11_parser.rl'] do |t| + begin + sh "ragel #{t.prerequisites.last} -C -G2 -I ext/puma_http11 -o #{t.name}" + rescue + fail "Could not build wrapper using Ragel (it failed or not installed?)" + end +end +task :ragel => ['ext/puma_http11/http11_parser.c'] + +file 'ext/puma_http11/org/jruby/puma/Http11Parser.java' => ['ext/puma_http11/http11_parser.java.rl'] do |t| + begin + sh "ragel #{t.prerequisites.last} -J -G2 -I ext/puma_http11 -o #{t.name}" + rescue + fail "Could not build wrapper using Ragel (it failed or not installed?)" + end +end +task :ragel => ['ext/puma_http11/org/jruby/puma/Http11Parser.java'] + +if !Puma.jruby? + # compile extensions using rake-compiler + # C (MRI, Rubinius) + Rake::ExtensionTask.new("puma_http11", gemspec) do |ext| + # place extension inside namespace + ext.lib_dir = "lib/puma" + + CLEAN.include "lib/puma/{1.8,1.9}" + CLEAN.include "lib/puma/puma_http11.rb" + end +else + # Java (JRuby) + # ::Rake::JavaExtensionTask.source_files supplies the list of files to + # compile. At present, it only works with a glob prefixed with @ext_dir. + # override it so we can select the files + class ::Rake::JavaExtensionTask + def source_files + if ENV["PUMA_DISABLE_SSL"] + # uses no_ssl/PumaHttp11Service.java, removes MiniSSL.java + FileList[ + File.join(@ext_dir, "no_ssl/PumaHttp11Service.java"), + File.join(@ext_dir, "org/jruby/puma/Http11.java"), + File.join(@ext_dir, "org/jruby/puma/Http11Parser.java") + ] + else + FileList[ + File.join(@ext_dir, "PumaHttp11Service.java"), + File.join(@ext_dir, "org/jruby/puma/Http11.java"), + File.join(@ext_dir, "org/jruby/puma/Http11Parser.java"), + File.join(@ext_dir, "org/jruby/puma/MiniSSL.java") + ] + end + end + end + + Rake::JavaExtensionTask.new("puma_http11", gemspec) do |ext| + ext.lib_dir = "lib/puma" + ext.release = '8' + end +end + +# the following is a fat-binary stub that will be used when +# require 'puma/puma_http11' and will use either 1.8 or 1.9 version depending +# on RUBY_VERSION +file "lib/puma/puma_http11.rb" do |t| + File.open(t.name, "w") do |f| + f.puts "RUBY_VERSION =~ /(\d+.\d+)/" + f.puts 'require "puma/#{$1}/puma_http11"' + end +end + +Rake::TestTask.new(:test) + +# tests require extension be compiled, but depend on the platform +if Puma.jruby? + task :test => [:java] +else + task :test => [:compile] +end + +task :default => [:rubocop, :test] diff --git a/vendor/cache/puma-fba741b91780/Release.md b/vendor/cache/puma-fba741b91780/Release.md new file mode 100644 index 000000000..dc9ffdeb0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/Release.md @@ -0,0 +1,17 @@ +## Before Release + +- Make sure tests pass and your last local commit matches master. +- Run tests with latest jruby +- Update the version in `const.rb`. +- On minor or major version updates i.e. from 3.10.x to 3.11.x update the "codename" in `const.rb`. +- Create history entries with https://github.com/MSP-Greg/issue-pr-link + +# Release process + +Using "3.7.1" as a version example. + +1. `bundle exec rake release` +1. Switch to latest JRuby version +1. `rake java gem` +1. `gem push pkg/puma-VERSION-java.gem` +1. Add release on Github at https://github.com/puma/puma/releases/new diff --git a/vendor/cache/puma-fba741b91780/SECURITY.md b/vendor/cache/puma-fba741b91780/SECURITY.md new file mode 100644 index 000000000..e5d97c853 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| :------------ | :--------: | +| Latest release in 6.x | ✅ | +| Latest release in 5.x | ✅ | +| All other releases | ❌ | + +## Reporting a Vulnerability + +Contact [Evan Phoenix.](https://github.com/evanphx) diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/bench_base.rb b/vendor/cache/puma-fba741b91780/benchmarks/local/bench_base.rb new file mode 100644 index 000000000..3ecad8848 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/bench_base.rb @@ -0,0 +1,394 @@ +# frozen_string_literal: true + +require 'optparse' + +module TestPuma + + HOST4 = ENV.fetch('PUMA_TEST_HOST4', '127.0.0.1') + HOST6 = ENV.fetch('PUMA_TEST_HOST6', '::1') + PORT = ENV.fetch('PUMA_TEST_PORT', 40001).to_i + + # Array of response body sizes. If specified, set by ENV['PUMA_TEST_SIZES'] + # + SIZES = if (t = ENV['PUMA_TEST_SIZES']) + t.split(',').map(&:to_i).freeze + else + [1, 10, 100, 256, 512, 1024, 2048].freeze + end + + TYPES = [[:a, 'array'].freeze, [:c, 'chunk'].freeze, + [:s, 'string'].freeze, [:i, 'io'].freeze].freeze + + # Creates files used by 'i' (File/IO) responses. Placed in + # "#{Dir.tmpdir}/.puma_response_body_io" + # @param sizes [Array ] Array of sizes + # + def self.create_io_files(sizes = SIZES) + require 'tmpdir' + tmp_folder = "#{Dir.tmpdir}/.puma_response_body_io" + Dir.mkdir(tmp_folder) unless Dir.exist? tmp_folder + fn_format = "#{tmp_folder}/body_io_%04d.txt" + str = ("── Puma Hello World! ── " * 31) + "── Puma Hello World! ──\n" # 1 KB + sizes.each do |len| + suf = format "%04d", len + fn = format fn_format, len + unless File.exist? fn + body = "Hello World\n#{str}".byteslice(0,1023) + "\n" + (str * (len-1)) + File.write fn, body + end + end + end + + # Base class for generating client request streams + # + class BenchBase + # We're running under GitHub Actions + IS_GHA = ENV['GITHUB_ACTIONS'] == 'true' + + WRK_PERCENTILE = [0.50, 0.75, 0.9, 0.99, 1.0].freeze + + HDR_BODY_CONF = "Body-Conf: " + + # extracts 'type' string from `-b` argument + TYPES_RE = /\A[acis]+/.freeze + + # extracts 'size' string from `-b` argument + SIZES_RE = /\d[\d,]*\z/.freeze + + def initialize + sleep 5 # wait for server to boot + + @thread_loops = nil + @clients_per_thread = nil + @req_per_client = nil + @body_sizes = SIZES + @body_types = TYPES + @dly_app = nil + @bind_type = :tcp + + @ios_to_close = [] + + setup_options + + unless File.exist? @state_file + puts "Can't find state file '#{@state_file}'" + exit 1 + end + + mstr_pid = File.binread(@state_file)[/^pid: +(\d+)/, 1].to_i + begin + Process.kill 0, mstr_pid + rescue Errno::ESRCH + puts 'Puma server stopped?' + exit 1 + rescue Errno::EPERM + end + + case @bind_type + when :ssl, :ssl4, :tcp, :tcp4 + @bind_host = HOST4 + @bind_port = PORT + when :ssl6, :tcp6 + @bind_host = HOST6 + @bind_port = PORT + when :unix + @bind_path = 'tmp/benchmark_skt.unix' + when :aunix + @bind_path = '@benchmark_skt.aunix' + else + exit 1 + end + end + + def setup_options + OptionParser.new do |o| + o.on "-T", "--stream-threads THREADS", OptionParser::DecimalInteger, "request_stream: loops/threads" do |arg| + @stream_threads = arg.to_i + end + + o.on "-c", "--wrk-connections CONNECTIONS", OptionParser::DecimalInteger, "request_stream: clients_per_thread" do |arg| + @wrk_connections = arg.to_i + end + + o.on "-R", "--requests REQUESTS", OptionParser::DecimalInteger, "request_stream: requests per socket" do |arg| + @req_per_socket = arg.to_i + end + + o.on "-D", "--duration DURATION", OptionParser::DecimalInteger, "wrk/stream: duration" do |arg| + @duration = arg.to_i + end + + o.on "-b", "--body_conf BODY_CONF", String, "CI RackUp: type and size of response body in kB" do |arg| + if (types = arg[TYPES_RE]) + @body_types = TYPES.select { |a| types.include? a[0].to_s } + end + + if (sizes = arg[SIZES_RE]) + @body_sizes = sizes.split(',') + @body_sizes.map!(&:to_i) + @body_sizes.sort! + end + end + + o.on "-d", "--dly_app DELAYAPP", Float, "CI RackUp: app response delay" do |arg| + @dly_app = arg.to_f + end + + o.on "-s", "--socket SOCKETTYPE", String, "Bind type: tcp, ssl, tcp6, ssl6, unix, aunix" do |arg| + @bind_type = arg.to_sym + end + + o.on "-S", "--state PUMA_STATEFILE", String, "Puma Server: state file" do |arg| + @state_file = arg + end + + o.on "-t", "--threads PUMA_THREADS", String, "Puma Server: threads" do |arg| + @threads = arg + end + + o.on "-w", "--workers PUMA_WORKERS", OptionParser::DecimalInteger, "Puma Server: workers" do |arg| + @workers = arg.to_i + end + + o.on "-W", "--wrk_bind WRK_STR", String, "wrk: bind string" do |arg| + @wrk_bind_str = arg + end + + o.on("-h", "--help", "Prints this help") do + puts o + exit + end + end.parse! ARGV + end + + def close_clients + closed = 0 + @ios_to_close.each do |socket| + if socket && socket.to_io.is_a?(IO) && !socket.closed? + begin + if @bind_type == :ssl + socket.sysclose + else + socket.close + end + closed += 1 + rescue Errno::EBADF + end + end + end + puts "Closed #{closed} sockets" unless closed.zero? + end + + # Runs wrk and returns data from its output. + # @param cmd [String] The wrk command string, with arguments + # @return [Hash] The wrk data + # + def run_wrk_parse(cmd, log: false) + STDOUT.syswrite cmd.ljust 55 + + if @dly_app + cmd.sub! ' -H ', " -H 'Dly: #{@dly_app.round 4}' -H " + end + + wrk_output = %x[#{cmd}] + if log + puts '', wrk_output, '' + end + + wrk_data = "#{wrk_output[/\A.+ connections/m]}\n#{wrk_output[/ Thread Stats.+\z/m]}" + + ary = wrk_data[/^ +\d+ +requests.+/].strip.split ' ' + + fmt = " | %6s %s %s %7s %8s %s\n" + + STDOUT.syswrite format(fmt, *ary) + + hsh = {} + + rps = wrk_data[/^Requests\/sec: +([\d.]+)/, 1].to_f + requests = wrk_data[/^ +(\d+) +requests/, 1].to_i + + transfer = wrk_data[/^Transfer\/sec: +([\d.]+)/, 1].to_f + transfer_unit = wrk_data[/^Transfer\/sec: +[\d.]+(GB|KB|MB)/, 1] + transfer_mult = mult_for_unit transfer_unit + + read = wrk_data[/ +([\d.]+)(GB|KB|MB) +read$/, 1].to_f + read_unit = wrk_data[/ +[\d.]+(GB|KB|MB) +read$/, 1] + read_mult = mult_for_unit read_unit + + resp_transfer = (transfer * transfer_mult)/rps + resp_read = (read * read_mult)/requests.to_f + + mult = transfer/read + + hsh[:resp_size] = ((resp_transfer * mult + resp_read)/(mult + 1)).round + + hsh[:resp_size] = hsh[:resp_size] - 1770 - hsh[:resp_size].to_s.length + + hsh[:rps] = rps.round + hsh[:requests] = requests + + if (t = wrk_data[/^ +Socket errors: +(.+)/, 1]) + hsh[:errors] = t + end + + read = wrk_data[/ +([\d.]+)(GB|KB|MB) +read$/, 1].to_f + unit = wrk_data[/ +[\d.]+(GB|KB|MB) +read$/, 1] + + mult = mult_for_unit unit + + hsh[:read] = (mult * read).round + + if hsh[:errors] + t = hsh[:errors] + hsh[:errors] = t.sub('connect ', 'c').sub('read ', 'r') + .sub('write ', 'w').sub('timeout ', 't') + end + + t_re = ' +([\d.ums]+)' + + latency = + wrk_data.match(/^ +50%#{t_re}\s+75%#{t_re}\s+90%#{t_re}\s+99%#{t_re}/).captures + # add up max time + latency.push wrk_data[/^ +Latency.+/].split(' ')[-2] + + hsh[:times_summary] = WRK_PERCENTILE.zip(latency.map do |t| + if t.end_with?('ms') + t.to_f + elsif t.end_with?('us') + t.to_f/1000 + elsif t.end_with?('s') + t.to_f * 1000 + else + 0 + end + end).to_h + hsh + end + + def mult_for_unit(unit) + case unit + when 'KB' then 1_024 + when 'MB' then 1_024**2 + when 'GB' then 1_024**3 + end + end + + # Outputs info about the run. Example output: + # + # benchmarks/local/response_time_wrk.sh -w2 -t5:5 -s tcp6 + # Server cluster mode -w2 -t5:5, bind: tcp6 + # Puma repo branch 00-response-refactor + # ruby 3.2.0dev (2022-06-11T12:26:03Z master 28e27ee76e) +YJIT [x86_64-linux] + # + def env_log + puts "#{ENV['PUMA_BENCH_CMD']} #{ENV['PUMA_BENCH_ARGS']}" + puts @workers ? + "Server cluster mode -w#{@workers} -t#{@threads}, bind: #{@bind_type}" : + "Server single mode -t#{@threads}, bind: #{@bind_type}" + + branch = %x[git branch][/^\* (.*)/, 1] + if branch + puts "Puma repo branch #{branch.strip}", RUBY_DESCRIPTION + else + const = File.read File.expand_path('../../lib/puma/const.rb', __dir__) + puma_version = const[/^ +PUMA_VERSION[^'"]+['"]([^\s'"]+)/, 1] + puts "Puma version #{puma_version}", RUBY_DESCRIPTION + end + end + + # Parses data returned by `PumaInfo.run stats` + # @return [Hash] The data from Puma stats + # + def parse_stats + stats = {} + + obj = @puma_info.run 'stats' + + worker_status = obj[:worker_status] + + worker_status.each do |w| + pid = w[:pid] + req_cnt = w[:last_status][:requests_count] + id = format 'worker-%01d-%02d', w[:phase], w[:index] + hsh = { + pid: pid, + requests: req_cnt - @worker_req_ttl[pid], + backlog: w[:last_status][:backlog] + } + @pids[pid] = id + @worker_req_ttl[pid] = req_cnt + stats[id] = hsh + end + + stats + end + + # Runs gc in the server, then parses data from + # `smem -c 'pid rss pss uss command'` + # @return [Hash] The data from smem + # + def parse_smem + @puma_info.run 'gc' + sleep 1 + + hsh_smem = Hash.new [] + pids = @pids.keys + + smem_info = %x[smem -c 'pid rss pss uss command'] + + smem_info.lines.each do |l| + ary = l.strip.split ' ', 5 + if pids.include? ary[0].to_i + hsh_smem[@pids[ary[0].to_i]] = { + pid: ary[0].to_i, + rss: ary[1].to_i, + pss: ary[2].to_i, + uss: ary[3].to_i + } + end + end + hsh_smem.sort.to_h + end + end + + class ResponseTimeBase < BenchBase + def run + @puma_info = PumaInfo.new ['-S', @state_file] + end + + # Prints summarized data. Example: + # ``` + # Body ────────── req/sec ────────── ─────── req 50% times ─────── + # KB array chunk string io array chunk string io + # 1 13760 13492 13817 9610 0.744 0.759 0.740 1.160 + # 10 13536 13077 13492 9269 0.759 0.785 0.760 1.190 + # ``` + # + # @param summaries [Hash] generated in subclasses + # + def overall_summary(summaries) + names = +'' + @body_types.each { |_, t_desc| names << t_desc.rjust(8) } + + puts "\nBody ────────── req/sec ────────── ─────── req 50% times ───────" \ + "\n KB #{names.ljust 32}#{names}" + + len = @body_types.length + digits = [4 - Math.log10(@max_050_time).to_i, 3].min + + fmt_rps = ('%6d ' * len).strip + fmt_times = (digits < 0 ? " %6d" : " %6.#{digits}f") * len + + @body_sizes.each do |size| + line = format '%-5d ', size + resp = '' + line << format(fmt_rps , *@body_types.map { |_, t_desc| summaries[size][t_desc][:rps] }).ljust(30) + line << format(fmt_times, *@body_types.map { |_, t_desc| summaries[size][t_desc][:times_summary][0.5] }) + puts line + end + puts '─' * 69 + end + end + +end diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/bench_base.sh b/vendor/cache/puma-fba741b91780/benchmarks/local/bench_base.sh new file mode 100755 index 000000000..892d84040 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/bench_base.sh @@ -0,0 +1,175 @@ +#!/bin/bash + +# -T client threads (wrk -t) +# -c connections per client thread +# -R requests per client +# +# Total connections/requests = l * c * r +# +# -b response body size kB +# -d app delay +# +# -s Puma bind socket type, default ssl, also tcp or unix +# -t Puma threads +# -w Puma workers +# -r Puma rackup file + +if [[ "$@" =~ ^[^-].* ]]; then + echo "Error: Invalid option was specified $1" + exit +fi + +PUMA_BENCH_CMD=$0 +PUMA_BENCH_ARGS=$@ + +export PUMA_BENCH_CMD +export PUMA_BENCH_ARGS + +if [ -z "$PUMA_TEST_HOST4" ]; then export PUMA_TEST_HOST4=127.0.0.1; fi +if [ -z "$PUMA_TEST_HOST6" ]; then export PUMA_TEST_HOST6=::1; fi +if [ -z "$PUMA_TEST_PORT" ]; then export PUMA_TEST_PORT=40001; fi +if [ -z "$PUMA_TEST_CTRL" ]; then export PUMA_TEST_CTRL=40010; fi +if [ -z "$PUMA_TEST_STATE" ]; then export PUMA_TEST_STATE=tmp/bench_test_puma.state; fi + +export PUMA_CTRL=$PUMA_TEST_HOST4:$PUMA_TEST_CTRL + +while getopts :b:C:c:D:d:R:r:s:T:t:w:Y option +do +case "${option}" in +#———————————————————— RUBY options +Y) export RUBYOPT=--yjit;; +#———————————————————— Puma options +C) conf=${OPTARG};; +t) threads=${OPTARG};; +w) workers=${OPTARG};; +r) rackup_file=${OPTARG};; +#———————————————————— app/common options +b) body_conf=${OPTARG};; +s) skt_type=${OPTARG};; +d) dly_app=${OPTARG};; +#———————————————————— request_stream options +T) stream_threads=${OPTARG};; +D) duration=${OPTARG};; +R) req_per_socket=${OPTARG};; +#———————————————————— wrk options +c) connections=${OPTARG};; +# T) stream_threads=${OPTARG};; +# D) duration=${OPTARG};; +?) echo "Error: Invalid option was specified -$OPTARG"; exit;; +esac +done + +# -n not empty, -z is empty + +ruby_args="-S $PUMA_TEST_STATE" + +if [ -n "$connections" ]; then + ruby_args="$ruby_args -c$connections" +fi + +if [ -n "$stream_threads" ]; then + ruby_args="$ruby_args -T$stream_threads" +fi + +if [ -n "$duration" ] ; then + ruby_args="$ruby_args -D$duration" +fi + +if [ -n "$req_per_socket" ]; then + ruby_args="$ruby_args -R$req_per_socket" +fi + +if [ -n "$dly_app" ]; then + ruby_args="$ruby_args -d$dly_app" +fi + +if [ -n "$body_conf" ]; then + ruby_args="$ruby_args -b $body_conf" + export CI_BODY_CONF=$body_conf +fi + +if [ -z "$skt_type" ]; then + skt_type=tcp +fi + +ruby_args="$ruby_args -s $skt_type" + +puma_args="-S $PUMA_TEST_STATE" + +if [ -n "$workers" ]; then + puma_args="$puma_args -w$workers" + ruby_args="$ruby_args -w$workers" +fi + +if [ -z "$threads" ]; then + threads=0:5 +fi + +puma_args="$puma_args -t$threads" +ruby_args="$ruby_args -t$threads" + +if [ -n "$conf" ]; then + puma_args="$puma_args -C $conf" +fi + +if [ -z "$rackup_file" ]; then + rackup_file="test/rackup/ci_select.ru" +fi + +ip4=$PUMA_TEST_HOST4:$PUMA_TEST_PORT +ip6=[$PUMA_TEST_HOST6]:$PUMA_TEST_PORT + +case $skt_type in + ssl4) + bind="ssl://$PUMA_TEST_HOST4:$PUMA_TEST_PORT?cert=examples/puma/cert_puma.pem&key=examples/puma/puma_keypair.pem&verify_mode=none" + curl_str=https://$ip4 + wrk_str=https://$ip4 + ;; + ssl) + bind="ssl://$ip4?cert=examples/puma/cert_puma.pem&key=examples/puma/puma_keypair.pem&verify_mode=none" + curl_str=https://$ip4 + wrk_str=https://$ip4 + ;; + ssl6) + bind="ssl://$ip6?cert=examples/puma/cert_puma.pem&key=examples/puma/puma_keypair.pem&verify_mode=none" + curl_str=https://$ip6 + wrk_str=https://$ip6 + ;; + tcp4) + bind=tcp://$ip4 + curl_str=http://$ip4 + wrk_str=http://$ip4 + ;; + tcp) + bind=tcp://$ip4 + curl_str=http://$ip4 + wrk_str=http://$ip4 + ;; + tcp6) + bind=tcp://$ip6 + curl_str=http://$ip6 + wrk_str=http://$ip6 + ;; + unix) + bind=unix://tmp/benchmark_skt.unix + curl_str="--unix-socket tmp/benchmark_skt.unix http:/n" + ;; + aunix) + bind=unix://@benchmark_skt.aunix + curl_str="--abstract-unix-socket benchmark_skt.aunix http:/n" + ;; + *) + echo "Error: Invalid socket type option was specified '$skt_type'" + exit + ;; +esac + +StartPuma() +{ + if [ -n "$1" ]; then + rackup_file=$1 + fi + printf "\nbundle exec bin/puma -q -b $bind $puma_args --control-url=tcp://$PUMA_CTRL --control-token=test $rackup_file\n\n" + bundle exec bin/puma -q -b $bind $puma_args --control-url=tcp://$PUMA_CTRL --control-token=test $rackup_file & + sleep 6s +} diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/puma_info.rb b/vendor/cache/puma-fba741b91780/benchmarks/local/puma_info.rb new file mode 100644 index 000000000..adfd346d9 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/puma_info.rb @@ -0,0 +1,201 @@ +# frozen_string_literal: true + +require 'optparse' +require_relative '../../lib/puma/state_file' +require_relative '../../lib/puma/const' +require_relative '../../lib/puma/detect' +require_relative '../../lib/puma/configuration' +require 'uri' +require 'socket' +require 'json' + +module TestPuma + + # Similar to puma_ctl.rb, but returns objects. Command list is minimal. + # + class PumaInfo + # @version 5.0.0 + PRINTABLE_COMMANDS = %w{gc-stats stats stop thread-backtraces}.freeze + + COMMANDS = (PRINTABLE_COMMANDS + %w{gc}).freeze + + attr_reader :master_pid + + def initialize(argv, stdout=STDOUT, stderr=STDERR) + @state = nil + @quiet = false + @pidfile = nil + @pid = nil + @control_url = nil + @control_auth_token = nil + @config_file = nil + @command = nil + @environment = ENV['RACK_ENV'] || ENV['RAILS_ENV'] + + @argv = argv + @stdout = stdout + @stderr = stderr + @cli_options = {} + + opts = OptionParser.new do |o| + o.banner = "Usage: pumactl (-p PID | -P pidfile | -S status_file | -C url -T token | -F config.rb) (#{PRINTABLE_COMMANDS.join("|")})" + + o.on "-S", "--state PATH", "Where the state file to use is" do |arg| + @state = arg + end + + o.on "-Q", "--quiet", "Not display messages" do |arg| + @quiet = true + end + + o.on "-C", "--control-url URL", "The bind url to use for the control server" do |arg| + @control_url = arg + end + + o.on "-T", "--control-token TOKEN", "The token to use as authentication for the control server" do |arg| + @control_auth_token = arg + end + + o.on "-F", "--config-file PATH", "Puma config script" do |arg| + @config_file = arg + end + + o.on "-e", "--environment ENVIRONMENT", + "The environment to run the Rack app on (default development)" do |arg| + @environment = arg + end + + o.on_tail("-H", "--help", "Show this message") do + @stdout.puts o + exit + end + + o.on_tail("-V", "--version", "Show version") do + @stdout.puts Const::PUMA_VERSION + exit + end + end + + opts.order!(argv) { |a| opts.terminate a } + opts.parse! + + unless @config_file == '-' + environment = @environment || 'development' + + if @config_file.nil? + @config_file = %W(config/puma/#{environment}.rb config/puma.rb).find do |f| + File.exist?(f) + end + end + + if @config_file + config = Puma::Configuration.new({ config_files: [@config_file] }, {}) + config.load + @state ||= config.options[:state] + @control_url ||= config.options[:control_url] + @control_auth_token ||= config.options[:control_auth_token] + @pidfile ||= config.options[:pidfile] + end + end + + @master_pid = File.binread(@state)[/^pid: +(\d+)/, 1].to_i + + rescue => e + @stdout.puts e.message + exit 1 + end + + def message(msg) + @stdout.puts msg unless @quiet + end + + def prepare_configuration + if @state + unless File.exist? @state + raise "State file not found: #{@state}" + end + + sf = Puma::StateFile.new + sf.load @state + + @control_url = sf.control_url + @control_auth_token = sf.control_auth_token + @pid = sf.pid + end + end + + def send_request + uri = URI.parse @control_url + + # create server object by scheme + server = + case uri.scheme + when 'ssl' + require 'openssl' + OpenSSL::SSL::SSLSocket.new( + TCPSocket.new(uri.host, uri.port), + OpenSSL::SSL::SSLContext.new) + .tap { |ssl| ssl.sync_close = true } # default is false + .tap(&:connect) + when 'tcp' + TCPSocket.new uri.host, uri.port + when 'unix' + # check for abstract UNIXSocket + UNIXSocket.new(@control_url.start_with?('unix://@') ? + "\0#{uri.host}#{uri.path}" : "#{uri.host}#{uri.path}") + else + raise "Invalid scheme: #{uri.scheme}" + end + + url = "/#{@command}" + + if @control_auth_token + url = url + "?token=#{@control_auth_token}" + end + + server.syswrite "GET #{url} HTTP/1.0\r\n\r\n" + + unless data = server.read + raise 'Server closed connection before responding' + end + + response = data.split("\r\n") + + if response.empty? + raise "Server sent empty response" + end + + @http, @code, @message = response.first.split(' ',3) + + if @code == '403' + raise 'Unauthorized access to server (wrong auth token)' + elsif @code == '404' + raise "Command error: #{response.last}" + elsif @code == '500' && @command == 'stop-sigterm' + # expected with stop-sigterm + elsif @code != '200' + raise "Bad response from server: #{@code}" + end + return unless PRINTABLE_COMMANDS.include? @command + JSON.parse response.last, {symbolize_names: true} + ensure + if server + if uri.scheme == 'ssl' + server.sysclose + else + server.close unless server.closed? + end + end + end + + def run(cmd) + return unless COMMANDS.include?(cmd) + @command = cmd + prepare_configuration + send_request + rescue => e + message e.message + exit 1 + end + end +end diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/response_time_wrk.rb b/vendor/cache/puma-fba741b91780/benchmarks/local/response_time_wrk.rb new file mode 100644 index 000000000..b87b2e6cb --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/response_time_wrk.rb @@ -0,0 +1,242 @@ +# frozen_string_literal: true + +require_relative 'bench_base' +require_relative 'puma_info' + +module TestPuma + + # This file is called from `response_time_wrk.sh`. It requires `wrk`. + # We suggest using https://github.com/ioquatix/wrk + # + # It starts a `Puma` server, then collects data from one or more runs of wrk. + # It logs the wrk data as each wrk runs is done, then summarizes + # the data in two tables. + # + # The default runs a matrix of the following, and takes a bit over 5 minutes, + # with 28 (4x7) wrk runs: + # + # bodies - array, chunk, string, io
+ # + # sizes - 1k, 10k, 100k, 256k, 512k, 1024k, 2048k + # + # See the file 'Testing - benchmark/local files' for sample output and information + # on arguments for the shell script. + # + # Examples: + # + # * `benchmarks/local/response_time_wrk.sh -w2 -t5:5 -s tcp6 -Y`
+ # 2 Puma workers, Puma threads 5:5, IPv6 http, 28 wrk runs with matrix above + # + # * `benchmarks/local/response_time_wrk.sh -t6:6 -s tcp -Y -b ac10,50,100`
+ # Puma single mode (0 workers), Puma threads 6:6, IPv4 http, six wrk runs, + # [array, chunk] * [10kb, 50kb, 100kb] + # + class ResponseTimeWrk < ResponseTimeBase + + WRK = ENV.fetch('WRK', 'wrk') + + def run + time_start = Process.clock_gettime(Process::CLOCK_MONOTONIC) + super + # default values + @duration ||= 10 + max_threads = (@threads[/\d+\z/] || 5).to_i + @stream_threads ||= (0.8 * (@workers || 1) * max_threads).to_i + connections = @stream_threads * (@wrk_connections || 2) + + warm_up + + @max_100_time = 0 + @max_050_time = 0 + @errors = false + + summaries = Hash.new { |h,k| h[k] = {} } + + @single_size = @body_sizes.length == 1 + @single_type = @body_types.length == 1 + + @body_sizes.each do |size| + @body_types.each do |pre, desc| + header = @single_size ? "-H '#{HDR_BODY_CONF}#{pre}#{size}'" : + "-H '#{HDR_BODY_CONF}#{pre}#{size}'".ljust(21) + + # warmup? + if pre == :i + wrk_cmd = %Q[#{WRK} -t#{@stream_threads} -c#{connections} -d1s --latency #{header} #{@wrk_bind_str}] + %x[#{wrk_cmd}] + end + + wrk_cmd = %Q[#{WRK} -t#{@stream_threads} -c#{connections} -d#{@duration}s --latency #{header} #{@wrk_bind_str}] + hsh = run_wrk_parse wrk_cmd + + @errors ||= hsh.key? :errors + + times = hsh[:times_summary] + @max_100_time = times[1.0] if times[1.0] > @max_100_time + @max_050_time = times[0.5] if times[0.5] > @max_050_time + summaries[size][desc] = hsh + end + sleep 0.5 + @puma_info.run 'gc' + sleep 2.0 + end + + run_summaries summaries + + if @single_size || @single_type + puts '' + else + overall_summary(summaries) unless @single_size || @single_type + end + + puts "wrk -t#{@stream_threads} -c#{connections} -d#{@duration}s" + + env_log + + rescue => e + puts e.class, e.message, e.backtrace + ensure + puts '' + @puma_info.run 'stop' + sleep 2 + running_time = Process.clock_gettime(Process::CLOCK_MONOTONIC) - time_start + puts format("\n%2d:%d Total Time", (running_time/60).to_i, running_time % 60) + end + + # Prints parsed data of each wrk run. Similar to: + # ``` + # Type req/sec 50% 75% 90% 99% 100% Resp Size + # ───────────────────────────────────────────────────────────────── 1kB + # array 13760 0.74 2.51 5.22 7.76 11.18 2797 + # ``` + # + # @param summaries [Hash] + # + def run_summaries(summaries) + digits = [4 - Math.log10(@max_100_time).to_i, 3].min + + fmt_vals = +'%-6s %6d' + fmt_vals << (digits < 0 ? " %6d" : " %6.#{digits}f")*5 + fmt_vals << ' %8d' + + label = @single_type ? 'Size' : 'Type' + + if @errors + puts "\n#{label} req/sec 50% 75% 90% 99% 100% Resp Size Errors" + desc_width = 83 + else + puts "\n#{label} req/sec 50% 75% 90% 99% 100% Resp Size" + desc_width = 65 + end + + puts format("#{'─' * desc_width} %s", @body_types[0][1]) if @single_type + + @body_sizes.each do |size| + puts format("#{'─' * desc_width}%5dkB", size) unless @single_type + @body_types.each do |_, t_desc| + hsh = summaries[size][t_desc] + times = hsh[:times_summary].values + desc = @single_type ? size : t_desc +# puts format(fmt_vals, desc, hsh[:rps], *times, hsh[:read]/hsh[:requests]) + puts format(fmt_vals, desc, hsh[:rps], *times, hsh[:resp_size]) + end + end + + end + + # Checks if any body files need to be created, reads all the body files, + # then runs a quick 'wrk warmup' command for each body type + # + def warm_up + puts "\nwarm-up" + if @body_types.map(&:first).include? :i + TestPuma.create_io_files @body_sizes + + # get size files cached + if @body_types.include? :i + 2.times do + @body_sizes.each do |size| + fn = format "#{Dir.tmpdir}/.puma_response_body_io/body_io_%04d.txt", size + t = File.read fn, mode: 'rb' + end + end + end + end + + size = @body_sizes.length == 1 ? @body_sizes.first : 10 + + @body_types.each do |pre, _| + header = "-H '#{HDR_BODY_CONF}#{pre}#{size}'".ljust(21) + warm_up_cmd = %Q[#{WRK} -t2 -c4 -d1s --latency #{header} #{@wrk_bind_str}] + run_wrk_parse warm_up_cmd + end + puts '' + end + + # Experimental - try to see how busy a CI system is. + def ci_test_rps + host = ENV['HOST'] + port = ENV['PORT'].to_i + + str = 'a' * 65_500 + + server = TCPServer.new host, port + + svr_th = Thread.new do + loop do + begin + Thread.new(server.accept) do |client| + client.sysread 65_536 + client.syswrite str + client.close + end + rescue => e + break + end + end + end + + threads = [] + + t_st = Process.clock_gettime(Process::CLOCK_MONOTONIC) + + 100.times do + threads << Thread.new do + 100.times { + s = TCPSocket.new host, port + s.syswrite str + s.sysread 65_536 + s = nil + } + end + end + + threads.each(&:join) + loops_time = (1_000*(Process.clock_gettime(Process::CLOCK_MONOTONIC) - t_st)).to_i + + threads.clear + threads = nil + + server.close + svr_th.join + + req_limit = + if loops_time > 3_050 then 13_000 + elsif loops_time > 2_900 then 13_500 + elsif loops_time > 2_500 then 14_000 + elsif loops_time > 2_200 then 18_000 + elsif loops_time > 2_100 then 19_000 + elsif loops_time > 1_900 then 20_000 + elsif loops_time > 1_800 then 21_000 + elsif loops_time > 1_600 then 22_500 + else 23_000 + end + [req_limit, loops_time] + end + + def puts(*ary) + ary.each { |s| STDOUT.syswrite "#{s}\n" } + end + end +end +TestPuma::ResponseTimeWrk.new.run diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/response_time_wrk.sh b/vendor/cache/puma-fba741b91780/benchmarks/local/response_time_wrk.sh new file mode 100755 index 000000000..861f162b6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/response_time_wrk.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# see comments in response_time_wrk.rb + +source benchmarks/local/bench_base.sh + +if [ "$skt_type" == "unix" ] || [ "$skt_type" == "aunix" ]; then + printf "\nwrk doesn't support UNIXSockets...\n\n" + exit +fi + +StartPuma + +ruby -I./lib benchmarks/local/response_time_wrk.rb $ruby_args -W $wrk_str +wrk_exit=$? + +printf "\n" +exit $wrk_exit diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/Gemfile b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/Gemfile new file mode 100644 index 000000000..dbfbdfc2f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/Gemfile @@ -0,0 +1,13 @@ +source "http://rubygems.org" +git_source(:github) { |repo| "https://github.com/#{repo}.git" } + +ruby "3.2.0" + +gem "sinatra" +gem "puma_worker_killer" + +# current puma release +gem "puma" + +# PR to reduce memory of large file uploads +# gem "puma", github: "willkoehler/puma", branch: "reduce_read_body_memory" diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/README.md b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/README.md new file mode 100644 index 000000000..b9d5bb528 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/README.md @@ -0,0 +1,92 @@ +# Large file upload demo + +This is a simple app to demonstrate memory used by Puma for large file uploads and +compare it to proposed changes in PR https://github.com/puma/puma/pull/3062 + +### Steps to test memory improvements in https://github.com/puma/puma/pull/3062 + +- Run the app with puma_worker_killer: `bundle exec puma -p 9090 --config puma.rb` +- Make a POST request with curl: `curl --form "data=@some_large_file.mp4" --limit-rate 10M http://localhost:9090/` +- Puma will log memory usage in the console + +Below is example of the results uploading a 115MB video. + +### Puma 6.0.2 + +``` +[11820] Puma starting in cluster mode... +[11820] * Puma version: 6.0.2 (ruby 3.2.0-p0) ("Sunflower") +[11820] * Min threads: 0 +[11820] * Max threads: 5 +[11820] * Environment: development +[11820] * Master PID: 11820 +[11820] * Workers: 1 +[11820] * Restarts: (✔) hot (✔) phased +[11820] * Listening on http://0.0.0.0:3000 +[11820] Use Ctrl-C to stop +[11820] - Worker 0 (PID: 11949) booted in 0.06s, phase: 0 +[11820] PumaWorkerKiller: Consuming 70.984375 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 70.984375 mb with master and 1 workers. + +...curl request made - memory increases as file is received + +[11820] PumaWorkerKiller: Consuming 72.796875 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 75.921875 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 78.953125 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 82.15625 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 85.265625 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 88.046875 mb with master and 1 workers. + +...(clipped out lines) memory keeps increasing while request is received + +[11820] PumaWorkerKiller: Consuming 121.53125 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 122.75 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 125.40625 mb with master and 1 workers. + +...request handed off from Puma to Rack/Sinatra + +[11820] PumaWorkerKiller: Consuming 220.6875 mb with master and 1 workers. +127.0.0.1 - - [26/Jan/2023:20:09:56 -0500] "POST /upload HTTP/1.1" 200 162 0.0553 +[11820] PumaWorkerKiller: Consuming 228.96875 mb with master and 1 workers. +[11820] PumaWorkerKiller: Consuming 228.96875 mb with master and 1 workers. +``` + +### With PR https://github.com/puma/puma/pull/3062 + +``` +[20815] Puma starting in cluster mode... +[20815] * Puma version: 6.0.2 (ruby 3.2.0-p0) ("Sunflower") +[20815] * Min threads: 0 +[20815] * Max threads: 5 +[20815] * Environment: development +[20815] * Master PID: 20815 +[20815] * Workers: 1 +[20815] * Restarts: (✔) hot (✔) phased +[20815] * Listening on http://0.0.0.0:3000 +[20815] Use Ctrl-C to stop +[20815] - Worker 0 (PID: 20944) booted in 0.1s, phase: 0 +[20815] PumaWorkerKiller: Consuming 73.25 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.25 mb with master and 1 workers. + +...curl request made - memory stays level as file is received + +[20815] PumaWorkerKiller: Consuming 73.28125 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.296875 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.34375 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.359375 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.359375 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.359375 mb with master and 1 workers. + +...(clipped out lines) memory continues to stay level + +[20815] PumaWorkerKiller: Consuming 73.703125 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.703125 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 73.703125 mb with master and 1 workers. + +...request handed off from Puma to Rack/Sinatra + +[20815] PumaWorkerKiller: Consuming 181.96875 mb with master and 1 workers. +127.0.0.1 - - [26/Jan/2023:20:27:16 -0500] "POST /upload HTTP/1.1" 200 162 0.0585 +[20815] PumaWorkerKiller: Consuming 183.78125 mb with master and 1 workers. +[20815] PumaWorkerKiller: Consuming 183.78125 mb with master and 1 workers. +``` diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/config.ru b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/config.ru new file mode 100644 index 000000000..890a5aaf0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/config.ru @@ -0,0 +1,7 @@ +require "sinatra" + +post "/" do + 204 +end + +run Sinatra::Application diff --git a/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/puma.rb b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/puma.rb new file mode 100644 index 000000000..9188a0691 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/local/sinatra/puma.rb @@ -0,0 +1,15 @@ +silence_single_worker_warning + +workers 1 + +before_fork do + require "puma_worker_killer" + + PumaWorkerKiller.config do |config| + config.ram = 1024 # mb + config.frequency = 0.3 # seconds + config.reaper_status_logs = true # Log memory: PumaWorkerKiller: Consuming 54.34765625 mb with master and 1 workers. + end + + PumaWorkerKiller.start +end diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_body.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_body.sh new file mode 100755 index 000000000..6554d58db --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_body.sh @@ -0,0 +1,8 @@ +# You are encouraged to use @ioquatix's wrk fork, located here: https://github.com/ioquatix/wrk + +bundle exec bin/puma -t 4 test/rackup/hello.ru & +PID1=$! +sleep 5 +wrk -c 4 -s benchmarks/wrk/lua/big_body.lua --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_file.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_file.sh new file mode 100755 index 000000000..4485aaa27 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_file.sh @@ -0,0 +1,6 @@ +bundle exec bin/puma -t 4 test/rackup/big_file.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 60 --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_response.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_response.sh new file mode 100755 index 000000000..2ecf5f51e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/big_response.sh @@ -0,0 +1,6 @@ +bundle exec bin/puma -t 4 test/rackup/big_response.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 60 --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/cpu_spin.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/cpu_spin.sh new file mode 100755 index 000000000..2dcb63c6e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/cpu_spin.sh @@ -0,0 +1,103 @@ +#!/bin/bash + +set -eo pipefail + +ITERATIONS=400000 +HOST=127.0.0.1:9292 +URL="http://$HOST/cpu/$ITERATIONS" + +MIN_WORKERS=1 +MAX_WORKERS=4 + +MIN_THREADS=4 +MAX_THREADS=4 + +DURATION=2 +MIN_CONCURRENT=1 +MAX_CONCURRENT=8 + +retry() { + local tries="$1" + local sleep="$2" + shift 2 + + for i in $(seq 1 $tries); do + if eval "$@"; then + return 0 + fi + + sleep "$sleep" + done + + return 1 +} + +ms() { + VALUE=$(cat) + FRAC=${VALUE%%[ums]*} + case "$VALUE" in + *us) + echo "scale=1; ${FRAC}/1000" | bc + ;; + + *ms) + echo "scale=1; ${FRAC}/1" | bc + ;; + + *s) + echo "scale=1; ${FRAC}*1000/1" | bc + ;; + esac +} + +run_wrk() { + mkdir tmp &>/dev/null || true + result=$(wrk -H "Connection: Close" -c "$wrk_c" -t "$wrk_t" -d "$DURATION" --latency "$@" | tee -a tmp/wrk.txt) + req_sec=$(echo "$result" | grep "^Requests/sec:" | awk '{print $2}') + latency_avg=$(echo "$result" | grep "^\s*Latency.*%" | awk '{print $2}' | ms) + latency_stddev=$(echo "$result" | grep "^\s*Latency.*%" | awk '{print $3}' | ms) + latency_50=$(echo "$result" | grep "^\s*50%" | awk '{print $2}' | ms) + latency_75=$(echo "$result" | grep "^\s*75%" | awk '{print $2}' | ms) + latency_90=$(echo "$result" | grep "^\s*90%" | awk '{print $2}' | ms) + latency_99=$(echo "$result" | grep "^\s*99%" | awk '{print $2}' | ms) + + echo -e "$workers\t$threads\t$wrk_c\t$wrk_t\t$req_sec\t$latency_avg\t$latency_stddev\t$latency_50\t$latency_75\t$latency_90\t$latency_99" +} + +run_concurrency_tests() { + echo + echo -e "PUMA_W\tPUMA_T\tWRK_C\tWRK_T\tREQ_SEC\tL_AVG\tL_DEV\tL_50%\tL_75%\tL_90%\tL_99%" + for wrk_c in $(seq $MIN_CONCURRENT $MAX_CONCURRENT); do + wrk_t="$wrk_c" + eval "$@" + sleep 1 + done + echo +} + +with_puma() { + # start puma and wait for 10s for it to start + bundle exec bin/puma -w "$workers" -t "$threads" -b "tcp://$HOST" -C test/config/cpu_spin.rb & + local puma_pid=$! + trap "kill $puma_pid" EXIT + + # wait for Puma to be up + if ! retry 10 1s curl --fail "$URL" &>/dev/null; then + echo "Failed to connect to $URL." + return 1 + fi + + # execute testing command + eval "$@" + kill "$puma_pid" || true + trap - EXIT + wait +} + +for workers in $(seq $MIN_WORKERS $MAX_WORKERS); do + for threads in $(seq $MIN_THREADS $MAX_THREADS); do + with_puma \ + run_concurrency_tests \ + run_wrk "$URL" + done +done diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/hello.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/hello.sh new file mode 100755 index 000000000..ca51b7467 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/hello.sh @@ -0,0 +1,8 @@ +# You are encouraged to use @ioquatix's wrk fork, located here: https://github.com/ioquatix/wrk + +bundle exec bin/puma -t 4 test/rackup/hello.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 30 --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/jruby_ssl_realistic_response.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/jruby_ssl_realistic_response.sh new file mode 100755 index 000000000..5eeb6ddc5 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/jruby_ssl_realistic_response.sh @@ -0,0 +1,8 @@ +bundle exec ruby bin/puma \ + -t 4 -b "ssl://localhost:9292?keystore=examples/puma/keystore.jks&keystore-pass=blahblah&verify_mode=none" \ + test/rackup/realistic_response.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 30 --latency https://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/lua/big_body.lua b/vendor/cache/puma-fba741b91780/benchmarks/wrk/lua/big_body.lua new file mode 100644 index 000000000..953049b82 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/lua/big_body.lua @@ -0,0 +1,3 @@ +wrk.method = "POST" +wrk.body = string.rep("body", 1000000) +wrk.headers["Content-Type"] = "application/x-www-form-urlencoded" diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/many_long_headers.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/many_long_headers.sh new file mode 100755 index 000000000..f0378c00e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/many_long_headers.sh @@ -0,0 +1,6 @@ +bundle exec bin/puma -t 4 test/rackup/many_long_headers.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 30 --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/more_conns_than_threads.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/more_conns_than_threads.sh new file mode 100755 index 000000000..3f72f2d35 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/more_conns_than_threads.sh @@ -0,0 +1,6 @@ +bundle exec bin/puma -t 6 test/rackup/hello.ru & +PID1=$! +sleep 5 +wrk -c 12 --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/realistic_response.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/realistic_response.sh new file mode 100755 index 000000000..b8e74a6c4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/realistic_response.sh @@ -0,0 +1,6 @@ +bundle exec bin/puma -t 4 test/rackup/realistic_response.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 30 --latency http://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/benchmarks/wrk/ssl_realistic_response.sh b/vendor/cache/puma-fba741b91780/benchmarks/wrk/ssl_realistic_response.sh new file mode 100755 index 000000000..da55da669 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/benchmarks/wrk/ssl_realistic_response.sh @@ -0,0 +1,8 @@ +bundle exec ruby bin/puma \ + -t 4 -b "ssl://localhost:9292?key=examples%2Fpuma%2Fpuma_keypair.pem&cert=examples%2Fpuma%2Fcert_puma.pem&verify_mode=none" \ + test/rackup/realistic_response.ru & +PID1=$! +sleep 5 +wrk -c 4 -d 30 --latency https://localhost:9292 + +kill $PID1 diff --git a/vendor/cache/puma-fba741b91780/bin/puma b/vendor/cache/puma-fba741b91780/bin/puma new file mode 100755 index 000000000..9c67c0fc4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/bin/puma @@ -0,0 +1,10 @@ +#!/usr/bin/env ruby +# +# Copyright (c) 2011 Evan Phoenix +# + +require 'puma/cli' + +cli = Puma::CLI.new ARGV + +cli.run diff --git a/vendor/cache/puma-fba741b91780/bin/puma-wild b/vendor/cache/puma-fba741b91780/bin/puma-wild new file mode 100644 index 000000000..3701b2105 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/bin/puma-wild @@ -0,0 +1,25 @@ +#!/usr/bin/env ruby +# +# Copyright (c) 2014 Evan Phoenix +# + +require 'rubygems' + +cli_arg = ARGV.shift + +inc = "" + +if cli_arg == "-I" + inc = ARGV.shift + $LOAD_PATH.concat inc.split(":") +end + +module Puma; end + +Puma.const_set(:WILD_ARGS, ["-I", inc]) + +require 'puma/cli' + +cli = Puma::CLI.new ARGV + +cli.run diff --git a/vendor/cache/puma-fba741b91780/bin/pumactl b/vendor/cache/puma-fba741b91780/bin/pumactl new file mode 100755 index 000000000..51ab353d8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/bin/pumactl @@ -0,0 +1,12 @@ +#!/usr/bin/env ruby + +require 'puma/control_cli' + +cli = Puma::ControlCLI.new ARGV.dup + +begin + cli.run +rescue => e + STDERR.puts e.message + exit 1 +end diff --git a/vendor/cache/puma-fba741b91780/docs/architecture.md b/vendor/cache/puma-fba741b91780/docs/architecture.md new file mode 100644 index 000000000..83f438b73 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/architecture.md @@ -0,0 +1,74 @@ +# Architecture + +## Overview + +![https://bit.ly/2iJuFky](images/puma-general-arch.png) + +Puma is a threaded Ruby HTTP application server processing requests across a TCP +and/or UNIX socket. + + +Puma processes (there can be one or many) accept connections from the socket via +a thread (in the [`Reactor`](../lib/puma/reactor.rb) class). The connection, +once fully buffered and read, moves into the `todo` list, where an available +thread will pick it up (in the [`ThreadPool`](../lib/puma/thread_pool.rb) +class). + +Puma works in two main modes: cluster and single. In single mode, only one Puma +process boots. In cluster mode, a `master` process is booted, which prepares +(and may boot) the application and then uses the `fork()` system call to create +one or more `child` processes. These `child` processes all listen to the same +socket. The `master` process does not listen to the socket or process requests - +its purpose is primarily to manage and listen for UNIX signals and possibly kill +or boot `child` processes. + +We sometimes call `child` processes (or Puma processes in `single` mode) +_workers_, and we sometimes call the threads created by Puma's +[`ThreadPool`](../lib/puma/thread_pool.rb) _worker threads_. + +## How Requests Work + +![https://bit.ly/2zwzhEK](images/puma-connection-flow.png) + +* Upon startup, Puma listens on a TCP or UNIX socket. + * The backlog of this socket is configured with a default of 1024, but the + actual backlog value is capped by the `net.core.somaxconn` sysctl value. + The backlog determines the size of the queue for unaccepted connections. If + the backlog is full, the operating system is not accepting new connections. + * This socket backlog is distinct from the `backlog` of work as reported by + `Puma.stats` or the control server. The backlog that `Puma.stats` refers to + represents the number of connections in the process' `todo` set waiting for + a thread from the [`ThreadPool`](../lib/puma/thread_pool.rb). +* By default, a single, separate thread (created by the + [`Reactor`](../lib/puma/reactor.rb) class) reads and buffers requests from the + socket. + * When at least one worker thread is available for work, the reactor thread + listens to the socket and accepts a request (if one is waiting). + * The reactor thread waits for the entire HTTP request to be received. + * Puma exposes the time spent waiting for the HTTP request body to be + received to the Rack app as `env['puma.request_body_wait']` + (milliseconds). + * Once fully buffered and received, the connection is pushed into the "todo" + set. +* Worker threads pop work off the "todo" set for processing. + * The worker thread processes the request via `call`ing the configured Rack + application. The Rack application generates the HTTP response. + * The worker thread writes the response to the connection. While Puma buffers + requests via a separate thread, it does not use a separate thread for + responses. + * Once done, the thread becomes available to process another connection in the + "todo" set. + +### `queue_requests` + +![https://bit.ly/2zxCJ1Z](images/puma-connection-flow-no-reactor.png) + +The `queue_requests` option is `true` by default, enabling the separate reactor +thread used to buffer requests as described above. + +If set to `false`, this buffer will not be used for connections while waiting +for the request to arrive. + +In this mode, when a connection is accepted, it is added to the "todo" queue +immediately, and a worker will synchronously do any waiting necessary to read +the HTTP request from the socket. diff --git a/vendor/cache/puma-fba741b91780/docs/compile_options.md b/vendor/cache/puma-fba741b91780/docs/compile_options.md new file mode 100644 index 000000000..4b4f9f9e3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/compile_options.md @@ -0,0 +1,55 @@ +# Compile Options + +There are some `cflags` provided to change Puma's default configuration for its +C extension. + +## Query String, `PUMA_QUERY_STRING_MAX_LENGTH` + +By default, the max length of `QUERY_STRING` is `1024 * 10`. But you may want to +adjust it to accept longer queries in GET requests. + +For manual install, pass the `PUMA_QUERY_STRING_MAX_LENGTH` option like this: + +``` +gem install puma -- --with-cflags="-D PUMA_QUERY_STRING_MAX_LENGTH=64000" +``` + +For Bundler, use its configuration system: + +``` +bundle config build.puma "--with-cflags='-D PUMA_QUERY_STRING_MAX_LENGTH=64000'" +``` + +## Request Path, `PUMA_REQUEST_PATH_MAX_LENGTH` + +By default, the max length of `REQUEST_PATH` is `8192`. But you may want to +adjust it to accept longer paths in requests. + +For manual install, pass the `PUMA_REQUEST_PATH_MAX_LENGTH` option like this: + +``` +gem install puma -- --with-cflags="-D PUMA_REQUEST_PATH_MAX_LENGTH=64000" +``` + +For Bundler, use its configuration system: + +``` +bundle config build.puma "--with-cflags='-D PUMA_REQUEST_PATH_MAX_LENGTH=64000'" +``` + +## Request URI, `PUMA_REQUEST_URI_MAX_LENGTH` + +By default, the max length of `REQUEST_URI` is `1024 * 12`. But you may want to +adjust it to accept longer URIs in requests. + +For manual install, pass the `PUMA_REQUEST_URI_MAX_LENGTH` option like this: + +``` +gem install puma -- --with-cflags="-D PUMA_REQUEST_URI_MAX_LENGTH=64000" +``` + +For Bundler, use its configuration system: + +``` +bundle config build.puma "--with-cflags='-D PUMA_REQUEST_URI_MAX_LENGTH=64000'" +``` diff --git a/vendor/cache/puma-fba741b91780/docs/deployment.md b/vendor/cache/puma-fba741b91780/docs/deployment.md new file mode 100644 index 000000000..2364aa66d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/deployment.md @@ -0,0 +1,102 @@ +# Deployment engineering for Puma + +Puma expects to be run in a deployed environment eventually. You can use it as +your development server, but most people use it in their production deployments. + +To that end, this document serves as a foundation of wisdom regarding deploying +Puma to production while increasing happiness and decreasing downtime. + +## Specifying Puma + +Most people will specify Puma by including `gem "puma"` in a Gemfile, so we'll +assume this is how you're using Puma. + +## Single vs. Cluster mode + +Initially, Puma was conceived as a thread-only web server, but support for +processes was added in version 2. + +To run `puma` in single mode (i.e., as a development environment), set the +number of workers to 0; anything higher will run in cluster mode. + +Here are some tips for cluster mode: + +### MRI + +* Use cluster mode and set the number of workers to 1.5x the number of CPU cores + in the machine, starting from a minimum of 2. +* Set the number of threads to desired concurrent requests/number of workers. + Puma defaults to 5, and that's a decent number. + +#### Migrating from Unicorn + +* If you're migrating from unicorn though, here are some settings to start with: + * Set workers to half the number of unicorn workers you're using + * Set threads to 2 + * Enjoy 50% memory savings +* As you grow more confident in the thread-safety of your app, you can tune the + workers down and the threads up. + +#### Ubuntu / Systemd (Systemctl) Installation + +See [systemd.md](systemd.md) + +#### Worker utilization + +**How do you know if you've got enough (or too many workers)?** + +A good question. Due to MRI's GIL, only one thread can be executing Ruby code at +a time. But since so many apps are waiting on IO from DBs, etc., they can +utilize threads to use the process more efficiently. + +Generally, you never want processes that are pegged all the time. That can mean +there is more work to do than the process can get through. On the other hand, if +you have processes that sit around doing nothing, then they're just eating up +resources. + +Watch your CPU utilization over time and aim for about 70% on average. 70% +utilization means you've got capacity still but aren't starving threads. + +**Measuring utilization** + +Using a timestamp header from an upstream proxy server (e.g., `nginx` or +`haproxy`) makes it possible to indicate how long requests have been waiting for +a Puma thread to become available. + +* Have your upstream proxy set a header with the time it received the request: + * nginx: `proxy_set_header X-Request-Start "${msec}";` + * haproxy >= 1.9: `http-request set-header X-Request-Start + t=%[date()]%[date_us()]` + * haproxy < 1.9: `http-request set-header X-Request-Start t=%[date()]` +* In your Rack middleware, determine the amount of time elapsed since + `X-Request-Start`. +* To improve accuracy, you will want to subtract time spent waiting for slow + clients: + * `env['puma.request_body_wait']` contains the number of milliseconds Puma + spent waiting for the client to send the request body. + * haproxy: `%Th` (TLS handshake time) and `%Ti` (idle time before request) + can can also be added as headers. + +## Should I daemonize? + +The Puma 5.0 release removed daemonization. For older versions and alternatives, +continue reading. + +I prefer not to daemonize my servers and use something like `runit` or `systemd` +to monitor them as child processes. This gives them fast response to crashes and +makes it easy to figure out what is going on. Additionally, unlike `unicorn`, +Puma does not require daemonization to do zero-downtime restarts. + +I see people using daemonization because they start puma directly via Capistrano +task and thus want it to live on past the `cap deploy`. To these people, I say: +You need to be using a process monitor. Nothing is making sure Puma stays up in +this scenario! You're just waiting for something weird to happen, Puma to die, +and to get paged at 3 AM. Do yourself a favor, at least the process monitoring +your OS comes with, be it `sysvinit` or `systemd`. Or branch out and use `runit` +or hell, even `monit`. + +## Restarting + +You probably will want to deploy some new code at some point, and you'd like +Puma to start running that new code. There are a few options for restarting +Puma, described separately in our [restart documentation](restart.md). diff --git a/vendor/cache/puma-fba741b91780/docs/fork_worker.md b/vendor/cache/puma-fba741b91780/docs/fork_worker.md new file mode 100644 index 000000000..c5e416386 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/fork_worker.md @@ -0,0 +1,35 @@ +# Fork-Worker Cluster Mode [Experimental] + +Puma 5 introduces an experimental new cluster-mode configuration option, `fork_worker` (`--fork-worker` from the CLI). This mode causes Puma to fork additional workers from worker 0, instead of directly from the master process: + +``` +10000 \_ puma 4.3.3 (tcp://0.0.0.0:9292) [puma] +10001 \_ puma: cluster worker 0: 10000 [puma] +10002 \_ puma: cluster worker 1: 10000 [puma] +10003 \_ puma: cluster worker 2: 10000 [puma] +10004 \_ puma: cluster worker 3: 10000 [puma] +``` + +The `fork_worker` option allows your application to be initialized only once for copy-on-write memory savings, and it has two additional advantages: + +1. **Compatible with phased restart.** Because the master process itself doesn't preload the application, this mode works with phased restart (`SIGUSR1` or `pumactl phased-restart`). When worker 0 reloads as part of a phased restart, it initializes a new copy of your application first, then the other workers reload by forking from this new worker already containing the new preloaded application. + + This allows a phased restart to complete as quickly as a hot restart (`SIGUSR2` or `pumactl restart`), while still minimizing downtime by staggering the restart across cluster workers. + +2. **'Refork' for additional copy-on-write improvements in running applications.** Fork-worker mode introduces a new `refork` command that re-loads all nonzero workers by re-forking them from worker 0. + + This command can potentially improve memory utilization in large or complex applications that don't fully pre-initialize on startup, because the re-forked workers can share copy-on-write memory with a worker that has been running for a while and serving requests. + + You can trigger a refork by sending the cluster the `SIGURG` signal or running the `pumactl refork` command at any time. A refork will also automatically trigger once, after a certain number of requests have been processed by worker 0 (default 1000). To configure the number of requests before the auto-refork, pass a positive integer argument to `fork_worker` (e.g., `fork_worker 1000`), or `0` to disable. + +### Usage Considerations + +- `fork_worker` introduces a new `on_refork` configuration hook. If you were using the `before_fork` hook previously, we generally recommend to copy its logic to `on_refork`. Note that `fork_worker` triggers the `before_fork` configuration hook *only* when initially forking the master process to worker 0, and triggers the `on_refork` hook on all subsequent forks from worker 0 to additional workers. + +### Limitations + +- This mode is still very experimental so there may be bugs or edge-cases, particularly around expected behavior of existing hooks. Please open a [bug report](https://github.com/puma/puma/issues/new?template=bug_report.md) if you encounter any issues. + +- In order to fork new workers cleanly, worker 0 shuts down its server and stops serving requests so there are no open file descriptors or other kinds of shared global state between processes, and to maximize copy-on-write efficiency across the newly-forked workers. This may temporarily reduce total capacity of the cluster during a phased restart / refork. + +- In a cluster with `n` workers, a normal phased restart stops and restarts workers one by one while the application is loaded in each process, so `n-1` workers are available serving requests during the restart. In a phased restart in fork-worker mode, the application is first loaded in worker 0 while `n-1` workers are available, then worker 0 remains stopped while the rest of the workers are reloaded one by one, leaving only `n-2` workers to be available for a brief period of time. Reloading the rest of the workers should be quick because the application is preloaded at that point, but there may be situations where it can take longer (slow clients, long-running application code, slow worker-fork hooks, etc). diff --git a/vendor/cache/puma-fba741b91780/docs/images/puma-connection-flow-no-reactor.png b/vendor/cache/puma-fba741b91780/docs/images/puma-connection-flow-no-reactor.png new file mode 100644 index 000000000..05ef8d01f Binary files /dev/null and b/vendor/cache/puma-fba741b91780/docs/images/puma-connection-flow-no-reactor.png differ diff --git a/vendor/cache/puma-fba741b91780/docs/images/puma-connection-flow.png b/vendor/cache/puma-fba741b91780/docs/images/puma-connection-flow.png new file mode 100644 index 000000000..afae5dfd9 Binary files /dev/null and b/vendor/cache/puma-fba741b91780/docs/images/puma-connection-flow.png differ diff --git a/vendor/cache/puma-fba741b91780/docs/images/puma-general-arch.png b/vendor/cache/puma-fba741b91780/docs/images/puma-general-arch.png new file mode 100644 index 000000000..89e26bd8b Binary files /dev/null and b/vendor/cache/puma-fba741b91780/docs/images/puma-general-arch.png differ diff --git a/vendor/cache/puma-fba741b91780/docs/java_options.md b/vendor/cache/puma-fba741b91780/docs/java_options.md new file mode 100644 index 000000000..30d2a4e8e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/java_options.md @@ -0,0 +1,48 @@ +# Java Options + +`System Properties` or `Environment Variables` can be used to change Puma's +default configuration for its Java extension. The provided values are evaluated +during initialization, and changes while running the app have no effect. +Moreover, default values may be used in case of invalid inputs. + +## Supported Options + +| ENV Name | Default Value | Validation | +|------------------------------|:-------------:|:------------------------:| +| PUMA_QUERY_STRING_MAX_LENGTH | 1024 * 10 | Positive natural number | +| PUMA_REQUEST_PATH_MAX_LENGTH | 8192 | Positive natural number | +| PUMA_REQUEST_URI_MAX_LENGTH | 1024 * 12 | Positive natural number | + +## Examples + +### Invalid inputs + +An empty string will be handled as missing, and the default value will be used instead. +Puma will print an error message for other invalid values. + +``` +foo@bar:~/puma$ PUMA_QUERY_STRING_MAX_LENGTH=abc PUMA_REQUEST_PATH_MAX_LENGTH='' PUMA_REQUEST_URI_MAX_LENGTH=0 bundle exec bin/puma test/rackup/hello.ru + +The value 0 for PUMA_REQUEST_URI_MAX_LENGTH is invalid. Using default value 12288 instead. +The value abc for PUMA_QUERY_STRING_MAX_LENGTH is invalid. Using default value 10240 instead. +Puma starting in single mode... +``` + +### Valid inputs + +``` +foo@bar:~/puma$ PUMA_REQUEST_PATH_MAX_LENGTH=9 bundle exec bin/puma test/rackup/hello.ru + +Puma starting in single mode... +``` +``` +foo@bar:~ export path=/123456789 # 10 chars +foo@bar:~ curl "http://localhost:9292${path}" + +Puma caught this error: HTTP element REQUEST_PATH is longer than the 9 allowed length. (Puma::HttpParserError) + +foo@bar:~ export path=/12345678 # 9 chars +foo@bar:~ curl "http://localhost:9292${path}" +Hello World +``` + diff --git a/vendor/cache/puma-fba741b91780/docs/jungle/README.md b/vendor/cache/puma-fba741b91780/docs/jungle/README.md new file mode 100644 index 000000000..46713f937 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/jungle/README.md @@ -0,0 +1,9 @@ +# Puma as a service + +## Systemd + +See [/docs/systemd](https://github.com/puma/puma/blob/master/docs/systemd.md). + +## rc.d + +See `/docs/jungle/rc.d` for FreeBSD's rc.d scripts diff --git a/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/README.md b/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/README.md new file mode 100644 index 000000000..2c5ddf51a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/README.md @@ -0,0 +1,74 @@ +# Puma as a service using rc.d + +Manage multiple Puma servers as services on one box using FreeBSD's rc.d service. + +## Dependencies + +* `jq` - a command-line json parser is needed to parse the json in the config file + +## Installation + + # Copy the puma script to the rc.d directory (make sure everyone has read/execute perms) + sudo cp puma /usr/local/etc/rc.d/ + + # Create an empty configuration file + sudo touch /usr/local/etc/puma.conf + + # Enable the puma service + sudo echo 'puma_enable="YES"' >> /etc/rc.conf + +## Managing the jungle + +Puma apps are referenced in /usr/local/etc/puma.conf by default. + +Start the jungle running: + +`service puma start` + +This script will run at boot time. + + +You can also stop the jungle (stops ALL puma instances) by running: + +`service puma stop` + + +To restart the jungle: + +`service puma restart` + +## Conventions + +* The script expects: + * a config file to exist under `config/puma.rb` in your app. E.g.: `/home/apps/my-app/config/puma.rb`. + +You can always change those defaults by editing the scripts. + +## Here's what a minimal app's config file should have + +``` +{ + "servers" : [ + { + "dir": "/path/to/rails/project", + "user": "deploy-user", + "ruby_version": "ruby.version", + "ruby_env": "rbenv" + } + ] +} +``` + +## Before starting... + +You need to customise `puma.conf` to: + +* Set the right user your app should be running on unless you want root to execute it! +* Set the directory of the app +* Set the ruby version to execute +* Set the ruby environment (currently set to rbenv, since that is the only ruby environment currently supported) +* Add additional server instances following the scheme in the example + +## Notes: + +Only rbenv is currently supported. diff --git a/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/puma b/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/puma new file mode 100755 index 000000000..e80022387 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/puma @@ -0,0 +1,61 @@ +#!/bin/sh +# + +# PROVIDE: puma + +. /etc/rc.subr + +name="puma" +start_cmd="puma_start" +stop_cmd="puma_stop" +restart_cmd="puma_restart" +rcvar=puma_enable +required_files=/usr/local/etc/puma.conf + +puma_start() +{ + server_count=$(/usr/local/bin/jq ".servers[] .ruby_env" /usr/local/etc/puma.conf | wc -l) + i=0 + while [ "$i" -lt "$server_count" ]; do + rb_env=$(/usr/local/bin/jq -r ".servers[$i].ruby_env" /usr/local/etc/puma.conf) + dir=$(/usr/local/bin/jq -r ".servers[$i].dir" /usr/local/etc/puma.conf) + user=$(/usr/local/bin/jq -r ".servers[$i].user" /usr/local/etc/puma.conf) + rb_ver=$(/usr/local/bin/jq -r ".servers[$i].ruby_version" /usr/local/etc/puma.conf) + case $rb_env in + "rbenv") + cd $dir && rbenv shell $rb_ver && /usr/sbin/daemon -u $user bundle exec puma -C $dir/config/puma.rb + ;; + *) + ;; + esac + i=$(( i + 1 )) + done +} + +puma_stop() +{ + pkill ruby +} + +puma_restart() +{ + server_count=$(/usr/local/bin/jq ".servers[] .ruby_env" /usr/local/etc/puma.conf | wc -l) + i=0 + while [ "$i" -lt "$server_count" ]; do + rb_env=$(/usr/local/bin/jq -r ".servers[$i].ruby_env" /usr/local/etc/puma.conf) + dir=$(/usr/local/bin/jq -r ".servers[$i].dir" /usr/local/etc/puma.conf) + user=$(/usr/local/bin/jq -r ".servers[$i].user" /usr/local/etc/puma.conf) + rb_ver=$(/usr/local/bin/jq -r ".servers[$i].ruby_version" /usr/local/etc/puma.conf) + case $rb_env in + "rbenv") + cd $dir && rbenv shell $rb_ver && /usr/sbin/daemon -u $user bundle exec puma -C $dir/config/puma.rb + ;; + *) + ;; + esac + i=$(( i + 1 )) + done +} + +load_rc_config $name +run_rc_command "$1" diff --git a/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/puma.conf b/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/puma.conf new file mode 100644 index 000000000..600537ace --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/jungle/rc.d/puma.conf @@ -0,0 +1,10 @@ +{ + "servers" : [ + { + "dir": "/path/to/rails/project", + "user": "deploy-user", + "ruby_version": "ruby.version", + "ruby_env": "rbenv" + } + ] +} diff --git a/vendor/cache/puma-fba741b91780/docs/kubernetes.md b/vendor/cache/puma-fba741b91780/docs/kubernetes.md new file mode 100644 index 000000000..5c59a4db3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/kubernetes.md @@ -0,0 +1,78 @@ +# Kubernetes + +## Running Puma in Kubernetes + +In general running Puma in Kubernetes works as-is, no special configuration is needed beyond what you would write anyway to get a new Kubernetes Deployment going. There is one known interaction between the way Kubernetes handles pod termination and how Puma handles `SIGINT`, where some request might be sent to Puma after it has already entered graceful shutdown mode and is no longer accepting requests. This can lead to dropped requests during rolling deploys. A workaround for this is listed at the end of this article. + +## Basic setup + +Assuming you already have a running cluster and docker image repository, you can run a simple Puma app with the following example Dockerfile and Deployment specification. These are meant as examples only and are deliberately very minimal to the point of skipping many options that are recommended for running in production, like healthchecks and envvar configuration with ConfigMaps. In general you should check the [Kubernetes documentation](https://kubernetes.io/docs/home/) and [Docker documentation](https://docs.docker.com/) for a more comprehensive overview of the available options. + +A basic Dockerfile example: +``` +FROM ruby:2.5.1-alpine # can be updated to newer ruby versions +RUN apk update && apk add build-base # and any other packages you need + +# Only rebuild gem bundle if Gemfile changes +COPY Gemfile Gemfile.lock ./ +RUN bundle install + +# Copy over the rest of the files +COPY . . + +# Open up port and start the service +EXPOSE 9292 +CMD bundle exec rackup -o 0.0.0.0 +``` + +A sample `deployment.yaml`: +``` +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-awesome-puma-app +spec: + selector: + matchLabels: + app: my-awesome-puma-app + template: + metadata: + labels: + app: my-awesome-puma-app + service: my-awesome-puma-app + spec: + containers: + - name: my-awesome-puma-app + image: + ports: + - containerPort: 9292 +``` + +## Graceful shutdown and pod termination + +For some high-throughput systems, it is possible that some HTTP requests will return responses with response codes in the 5XX range during a rolling deploy to a new version. This is caused by [the way that Kubernetes terminates a pod during rolling deploys](https://cloud.google.com/blog/products/gcp/kubernetes-best-practices-terminating-with-grace): + +1. The replication controller determines a pod should be shut down. +2. The Pod is set to the “Terminating” State and removed from the endpoints list of all Services, so that it receives no more requests. +3. The pods pre-stop hook get called. The default for this is to send `SIGTERM` to the process inside the pod. +4. The pod has up to `terminationGracePeriodSeconds` (default: 30 seconds) to gracefully shut down. Puma will do this (after it receives SIGTERM) by closing down the socket that accepts new requests and finishing any requests already running before exiting the Puma process. +5. If the pod is still running after `terminationGracePeriodSeconds` has elapsed, the pod receives `SIGKILL` to make sure the process inside it stops. After that, the container exits and all other Kubernetes objects associated with it are cleaned up. + +There is a subtle race condition between step 2 and 3: The replication controller does not synchronously remove the pod from the Services AND THEN call the pre-stop hook of the pod, but rather it asynchronously sends "remove this pod from your endpoints" requests to the Services and then immediately proceeds to invoke the pods' pre-stop hook. If the Service controller (typically something like nginx or haproxy) receives this request handles this request "too" late (due to internal lag or network latency between the replication and Service controllers) then it is possible that the Service controller will send one or more requests to a Puma process which has already shut down its listening socket. These requests will then fail with 5XX error codes. + +The way Kubernetes works this way, rather than handling step 2 synchronously, is due to the CAP theorem: in a distributed system there is no way to guarantee that any message will arrive promptly. In particular, waiting for all Service controllers to report back might get stuck for an indefinite time if one of them has already been terminated or if there has been a net split. A way to work around this is to add a sleep to the pre-stop hook of the same time as the `terminationGracePeriodSeconds` time. This will allow the Puma process to keep serving new requests during the entire grace period, although it will no longer receive new requests after all Service controllers have propagated the removal of the pod from their endpoint lists. Then, after `terminationGracePeriodSeconds`, the pod receives `SIGKILL` and closes down. If your process can't handle SIGKILL properly, for example because it needs to release locks in different services, you can also sleep for a shorter period (and/or increase `terminationGracePeriodSeconds`) as long as the time slept is longer than the time that your Service controllers take to propagate the pod removal. The downside of this workaround is that all pods will take at minimum the amount of time slept to shut down and this will increase the time required for your rolling deploy. + +More discussions and links to relevant articles can be found in https://github.com/puma/puma/issues/2343. + +## Workers Per Pod, and Other Config Issues + +With containerization, you will have to make a decision about how "big" to make each pod. Should you run 2 pods with 50 workers each? 25 pods, each with 4 workers? 100 pods, with each Puma running in single mode? Each scenario represents the same total amount of capacity (100 Puma processes that can respond to requests), but there are tradeoffs to make. + +* Worker counts should be somewhere between 4 and 32 in most cases. You want more than 4 in order to minimize time spent in request queueing for a free Puma worker, but probably less than ~32 because otherwise autoscaling is working in too large of an increment or they probably won't fit very well into your nodes. In any queueing system, queue time is proportional to 1/n, where n is the number of things pulling from the queue. Each pod will have its own request queue (i.e., the socket backlog). If you have 4 pods with 1 worker each (4 request queues), wait times are, proportionally, about 4 times higher than if you had 1 pod with 4 workers (1 request queue). +* Unless you have a very I/O-heavy application (50%+ time spent waiting on IO), use the default thread count (5 for MRI). Using higher numbers of threads with low I/O wait (<50%) will lead to additional request queueing time (latency!) and additional memory usage. +* More processes per pod reduces memory usage per process, because of copy-on-write memory and because the cost of the single master process is "amortized" over more child processes. +* Don't run less than 4 processes per pod if you can. Low numbers of processes per pod will lead to high request queueing, which means you will have to run more pods. +* If multithreaded, allocate 1 CPU per worker. If single threaded, allocate 0.75 cpus per worker. Most web applications spend about 25% of their time in I/O - but when you're running multi-threaded, your Puma process will have higher CPU usage and should be able to fully saturate a CPU core. +* Most Puma processes will use about ~512MB-1GB per worker, and about 1GB for the master process. However, you probably shouldn't bother with setting memory limits lower than around 2GB per process, because most places you are deploying will have 2GB of RAM per CPU. A sensible memory limit for a Puma configuration of 4 child workers might be something like 8 GB (1 GB for the master, 7GB for the 4 children). + diff --git a/vendor/cache/puma-fba741b91780/docs/nginx.md b/vendor/cache/puma-fba741b91780/docs/nginx.md new file mode 100644 index 000000000..64b278264 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/nginx.md @@ -0,0 +1,80 @@ +# Nginx configuration example file + +This is a very common setup using an upstream. It was adapted from some Capistrano recipe I found on the Internet a while ago. + +```nginx +upstream myapp { + server unix:///myapp/tmp/puma.sock; +} + +server { + listen 80; + server_name myapp.com; + + # ~2 seconds is often enough for most folks to parse HTML/CSS and + # retrieve needed images/icons/frames, connections are cheap in + # nginx so increasing this is generally safe... + keepalive_timeout 5; + + # path for static files + root /myapp/public; + access_log /myapp/log/nginx.access.log; + error_log /myapp/log/nginx.error.log info; + + # this rewrites all the requests to the maintenance.html + # page if it exists in the doc root. This is for capistrano's + # disable web task + if (-f $document_root/maintenance.html) { + rewrite ^(.*)$ /maintenance.html last; + break; + } + + location / { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $host; + + # If the file exists as a static file serve it directly without + # running all the other rewrite tests on it + if (-f $request_filename) { + break; + } + + # check for index.html for directory index + # if it's there on the filesystem then rewrite + # the url to add /index.html to the end of it + # and then break to send it to the next config rules. + if (-f $request_filename/index.html) { + rewrite (.*) $1/index.html break; + } + + # this is the meat of the rack page caching config + # it adds .html to the end of the url and then checks + # the filesystem for that file. If it exists, then we + # rewrite the url to have explicit .html on the end + # and then send it on its way to the next config rule. + # if there is no file on the fs then it sets all the + # necessary headers and proxies to our upstream pumas + if (-f $request_filename.html) { + rewrite (.*) $1.html break; + } + + if (!-f $request_filename) { + proxy_pass http://myapp; + break; + } + } + + # Now this supposedly should work as it gets the filenames with querystrings that Rails provides. + # BUT there's a chance it could break the ajax calls. + location ~* \.(ico|css|gif|jpe?g|png|js)(\?[0-9]+)?$ { + expires max; + break; + } + + # Error pages + # error_page 500 502 503 504 /500.html; + location = /500.html { + root /myapp/current/public; + } +} +``` diff --git a/vendor/cache/puma-fba741b91780/docs/plugins.md b/vendor/cache/puma-fba741b91780/docs/plugins.md new file mode 100644 index 000000000..c7500c10d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/plugins.md @@ -0,0 +1,38 @@ +## Plugins + +Puma 3.0 added support for plugins that can augment configuration and service +operations. + +There are two canonical plugins to aid in the development of new plugins: + +* [tmp\_restart](https://github.com/puma/puma/blob/master/lib/puma/plugin/tmp_restart.rb): + Restarts the server if the file `tmp/restart.txt` is touched +* [heroku](https://github.com/puma/puma-heroku/blob/master/lib/puma/plugin/heroku.rb): + Packages up the default configuration used by Puma on Heroku (being sunset + with the release of Puma 5.0) + +Plugins are activated in a Puma configuration file (such as `config/puma.rb'`) +by adding `plugin "name"`, such as `plugin "heroku"`. + +Plugins are activated based on path requirements so, activating the `heroku` +plugin is much like `require "puma/plugin/heroku"`. This allows gems to provide +multiple plugins (as well as unrelated gems to provide Puma plugins). + +The `tmp_restart` plugin comes with Puma, so it is always available. + +To use the `heroku` plugin, add `puma-heroku` to your Gemfile or install it. + +### API + +## Server-wide hooks + +Plugins can use a couple of hooks at the server level: `start` and `config`. + +`start` runs when the server has started and allows the plugin to initiate other +functionality to augment Puma. + +`config` runs when the server is being configured and receives a `Puma::DSL` +object that is useful for additional configuration. + +Public methods in [`Puma::Plugin`](../lib/puma/plugin.rb) are treated as a +public API for plugins. diff --git a/vendor/cache/puma-fba741b91780/docs/rails_dev_mode.md b/vendor/cache/puma-fba741b91780/docs/rails_dev_mode.md new file mode 100644 index 000000000..add3cee84 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/rails_dev_mode.md @@ -0,0 +1,28 @@ +# Running Puma in Rails Development Mode + +## "Loopback requests" + +Be cautious of "loopback requests," where a Rails application executes a request to a server that, in turn, results in another request back to the same Rails application before the first request completes. Having a loopback request will trigger [Rails' load interlock](https://guides.rubyonrails.org/threading_and_code_execution.html#load-interlock) mechanism. The load interlock mechanism prevents a thread from using Rails autoloading mechanism to load constants while the application code is still running inside another thread. + +This issue only occurs in the development environment as Rails' load interlock is not used in production environments. Although we're not sure, we believe this issue may not occur with the new `zeitwerk` code loader. + +### Solutions + +#### 1. Bypass Rails' load interlock with `.permit_concurrent_loads` + +Wrap the first request inside a block that will allow concurrent loads: [`ActiveSupport::Dependencies.interlock.permit_concurrent_loads`](https://guides.rubyonrails.org/threading_and_code_execution.html#permit-concurrent-loads). Anything wrapped inside the `.permit_concurrent_loads` block will bypass the load interlock mechanism, allowing new threads to access the Rails environment and boot properly. + +###### Example + +```ruby +response = ActiveSupport::Dependencies.interlock.permit_concurrent_loads do + # Your HTTP request code here. For example: + Faraday.post url, data: 'foo' +end + +do_something_with response +``` + +#### 2. Use multiple processes on Puma + +Alternatively, you may also enable multiple (single-threaded) workers on Puma. By doing so, you are sidestepping the problem by creating multiple processes rather than new threads. However, this workaround is not ideal because debugging tools such as [byebug](https://github.com/deivid-rodriguez/byebug/issues/487) and [pry](https://github.com/pry/pry/issues/2153), work poorly with any multi-process web server. diff --git a/vendor/cache/puma-fba741b91780/docs/restart.md b/vendor/cache/puma-fba741b91780/docs/restart.md new file mode 100644 index 000000000..7ea5b02c3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/restart.md @@ -0,0 +1,65 @@ +Puma provides three distinct kinds of restart operations, each for different use cases. This document describes "hot restarts" and "phased restarts." The third kind of restart operation is called "refork" and is described in the documentation for [`fork_worker`](fork_worker.md). + +## Hot restart + +To perform a "hot" restart, Puma performs an `exec` operation to start the process up again, so no memory is shared between the old process and the new process. As a result, it is safe to issue a restart at any place where you would manually stop Puma and start it again. In particular, it is safe to upgrade Puma itself using a hot restart. + +If the new process is unable to load, it will simply exit. You should therefore run Puma under a process monitor when using it in production. + +### How-to + +Any of the following will cause a Puma server to perform a hot restart: + +* Send the `puma` process the `SIGUSR2` signal +* Issue a `GET` request to the Puma status/control server with the path `/restart` +* Issue `pumactl restart` (this uses the control server method if available, otherwise sends the `SIGUSR2` signal to the process) + +### Supported configurations + +* Works in cluster mode and single mode +* Supported on all platforms + +### Client experience + +* All platforms: clients with an in-flight request are served responses before the connection is closed gracefully. Puma gracefully disconnects any idle HTTP persistent connections before restarting. +* On MRI or TruffleRuby on Linux and BSD: Clients who connect just before the server restarts may experience increased latency while the server stops and starts again, but their connections will not be closed prematurely. +* On Windows and JRuby: Clients who connect just before a restart may experience "connection reset" errors. + +### Additional notes + +* The newly started Puma process changes its current working directory to the directory specified by the `directory` option. If `directory` is set to symlink, this is automatically re-evaluated, so this mechanism can be used to upgrade the application. +* Only one version of the application is running at a time. +* `on_restart` is invoked just before the server shuts down. This can be used to clean up resources (like long-lived database connections) gracefully. Since Ruby 2.0, it is not typically necessary to explicitly close file descriptors on restart. This is because any file descriptor opened by Ruby will have the `FD_CLOEXEC` flag set, meaning that file descriptors are closed on `exec`. `on_restart` is useful, though, if your application needs to perform any more graceful protocol-specific shutdown procedures before closing connections. + +## Phased restart + +Phased restarts replace all running workers in a Puma cluster. This is a useful way to upgrade the application that Puma is serving gracefully. A phased restart works by first killing an old worker, then starting a new worker, waiting until the new worker has successfully started before proceeding to the next worker. This process continues until all workers are replaced. The master process is not restarted. + +### How-to + +Any of the following will cause a Puma server to perform a phased restart: + +* Send the `puma` process the `SIGUSR1` signal +* Issue a `GET` request to the Puma status/control server with the path `/phased-restart` +* Issue `pumactl phased-restart` (this uses the control server method if available, otherwise sends the `SIGUSR1` signal to the process) + +### Supported configurations + +* Works in cluster mode only +* To support upgrading the application that Puma is serving, ensure `prune_bundler` is enabled and that `preload_app!` is disabled +* Supported on all platforms where cluster mode is supported + +### Client experience + +* In-flight requests are always served responses before the connection is closed gracefully +* Idle persistent connections are gracefully disconnected +* New connections are not lost, and clients will not experience any increase in latency (as long as the number of configured workers is greater than one) + +### Additional notes + +* When a phased restart begins, the Puma master process changes its current working directory to the directory specified by the `directory` option. If `directory` is set to symlink, this is automatically re-evaluated, so this mechanism can be used to upgrade the application. +* On a single server, it's possible that two versions of the application are running concurrently during a phased restart. +* `on_restart` is not invoked +* Phased restarts can be slow for Puma clusters with many workers. Hot restarts often complete more quickly, but at the cost of increased latency during the restart. +* Phased restarts cannot be used to upgrade any gems loaded by the Puma master process, including `puma` itself, anything in `extra_runtime_dependencies`, or dependencies thereof. Upgrading other gems is safe. +* If you remove the gems from old releases as part of your deployment strategy, there are additional considerations. Do not put any gems into `extra_runtime_dependencies` that have native extensions or have dependencies that have native extensions (one common example is `puma_worker_killer` and its dependency on `ffi`). Workers will fail on boot during a phased restart. The underlying issue is recorded in [an issue on the rubygems project](https://github.com/rubygems/rubygems/issues/4004). Hot restarts are your only option here if you need these dependencies. diff --git a/vendor/cache/puma-fba741b91780/docs/signals.md b/vendor/cache/puma-fba741b91780/docs/signals.md new file mode 100644 index 000000000..915560cfc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/signals.md @@ -0,0 +1,98 @@ +The [unix signal](https://en.wikipedia.org/wiki/Unix_signal) is a method of sending messages between [processes](https://en.wikipedia.org/wiki/Process_(computing)). When a signal is sent, the operating system interrupts the target process's normal flow of execution. There are standard signals that are used to stop a process, but there are also custom signals that can be used for other purposes. This document is an attempt to list all supported signals that Puma will respond to. In general, signals need only be sent to the master process of a cluster. + +## Sending Signals + +If you are new to signals, it can be helpful to see how they are used. When a process starts in a *nix-like operating system, it will have a [PID - or process identifier](https://en.wikipedia.org/wiki/Process_identifier) that can be used to send signals to the process. For demonstration, we will create an infinitely running process by tailing a file: + +```sh +$ echo "foo" >> my.log +$ irb +> pid = Process.spawn 'tail -f my.log' +``` + +From here, we can see that the tail process is running by using the `ps` command: + +```sh +$ ps aux | grep tail +schneems 87152 0.0 0.0 2432772 492 s032 S+ 12:46PM 0:00.00 tail -f my.log +``` + +You can send a signal in Ruby using the [Process module](https://ruby-doc.org/3.2.2/Process.html#method-c-kill): + +``` +$ irb +> puts pid +=> 87152 +Process.detach(pid) # https://ruby-doc.org/3.2.2/Process.html#method-c-detach +Process.kill("TERM", pid) +``` + +Now you will see via `ps` that there is no more `tail` process. Sometimes when referring to signals, the `SIG` prefix will be used. For example, `SIGTERM` is equivalent to sending `TERM` via `Process.kill`. + +## Puma Signals + +Puma cluster responds to these signals: + +- `TTIN` increment the worker count by 1 +- `TTOU` decrement the worker count by 1 +- `TERM` send `TERM` to worker. The worker will attempt to finish then exit. +- `USR2` restart workers. This also reloads the Puma configuration file, if there is one. +- `USR1` restart workers in phases, a rolling restart. This will not reload the configuration file. +- `HUP ` reopen log files defined in stdout_redirect configuration parameter. If there is no stdout_redirect option provided, it will behave like `INT` +- `INT ` equivalent of sending Ctrl-C to cluster. Puma will attempt to finish then exit. +- `CHLD` +- `URG ` refork workers in phases from worker 0 if `fork_workers` option is enabled. +- `INFO` print backtraces of all puma threads + +## Callbacks order in case of different signals + +### Start application + +``` +puma configuration file reloaded, if there is one +* Pruning Bundler environment +puma configuration file reloaded, if there is one + +before_fork +on_worker_fork +after_worker_fork + +Gemfile in context + +on_worker_boot + +Code of the app is loaded and running +``` + +### Send USR2 + +``` +on_worker_shutdown +on_restart + +puma configuration file reloaded, if there is one + +before_fork +on_worker_fork +after_worker_fork + +Gemfile in context + +on_worker_boot + +Code of the app is loaded and running +``` + +### Send USR1 + +``` +on_worker_shutdown +on_worker_fork +after_worker_fork + +Gemfile in context + +on_worker_boot + +Code of the app is loaded and running +``` diff --git a/vendor/cache/puma-fba741b91780/docs/stats.md b/vendor/cache/puma-fba741b91780/docs/stats.md new file mode 100644 index 000000000..39695e97a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/stats.md @@ -0,0 +1,143 @@ +## Accessing stats + +Stats can be accessed in two ways: + +### control server + +`$ pumactl stats` or `GET /stats` + +[Read more about `pumactl` and the control server in the README.](https://github.com/puma/puma#controlstatus-server). + +### Puma.stats + +`Puma.stats` produces a JSON string. `Puma.stats_hash` produces a ruby hash. + +#### in single mode + +Invoke `Puma.stats` anywhere in runtime, e.g. in a rails initializer: + +```ruby +# config/initializers/puma_stats.rb + +Thread.new do + loop do + sleep 30 + puts Puma.stats + end +end +``` + +#### in cluster mode + +Invoke `Puma.stats` from the master process + +```ruby +# config/puma.rb + +before_fork do + Thread.new do + loop do + puts Puma.stats + sleep 30 + end + end +end +``` + + +## Explanation of stats + +`Puma.stats` returns different information and a different structure depending on if Puma is in single vs. cluster mode. There is one top-level attribute that is common to both modes: + +* started_at: when Puma was started + +### single mode and individual workers in cluster mode + +When Puma runs in single mode, these stats are available at the top level. When Puma runs in cluster mode, these stats are available within the `worker_status` array in a hash labeled `last_status`, in an array of hashes where one hash represents each worker. + +* backlog: requests that are waiting for an available thread to be available. if this is above 0, you need more capacity [always true?] +* running: how many threads are spawned. A spawned thread may be busy processing a request or waiting for a new request. If `min_threads` and `max_threads` are set to the same number, + this will be a never-changing number (other than rare cases when a thread dies, etc). +* pool_capacity: the number of requests that the server is capable of taking right now. For example, if the number is 5, then it means there are 5 threads sitting idle ready to take a request. If one request comes in, then the value would be 4 until it finishes processing. If the minimum threads allowed is zero, this number will still have a maximum value of the maximum threads allowed. +* max_threads: the maximum number of threads Puma is configured to spool per worker +* requests_count: the number of requests this worker has served since starting + + +### cluster mode + +* phase: which phase of restart the process is in, during [phased restart](https://github.com/puma/puma/blob/master/docs/restart.md) +* workers: ?? +* booted_workers: how many workers currently running? +* old_workers: ?? +* worker_status: array of hashes of info for each worker (see below) + +### worker status + +* started_at: when the worker started +* pid: the process id of the worker process +* index: each worker gets a number. if Puma is configured to have 3 workers, then this will be 0, 1, or 2 +* booted: if it's done booting [?] +* last_checkin: Last time the worker responded to the master process' heartbeat check. +* last_status: a hash of info about the worker's state handling requests. See the explanation for this in "single mode and individual workers in cluster mode" section above. + + +## Examples + +Here are two example stats hashes produced by `Puma.stats`: + +### single + +```json +{ + "started_at": "2021-01-14T07:12:35Z", + "backlog": 0, + "running": 5, + "pool_capacity": 5, + "max_threads": 5, + "requests_count": 3 +} +``` + +### cluster + +```json +{ + "started_at": "2021-01-14T07:09:17Z", + "workers": 2, + "phase": 0, + "booted_workers": 2, + "old_workers": 0, + "worker_status": [ + { + "started_at": "2021-01-14T07:09:24Z", + "pid": 64136, + "index": 0, + "phase": 0, + "booted": true, + "last_checkin": "2021-01-14T07:11:09Z", + "last_status": { + "backlog": 0, + "running": 5, + "pool_capacity": 5, + "max_threads": 5, + "requests_count": 2 + } + }, + { + "started_at": "2021-01-14T07:09:24Z", + "pid": 64137, + "index": 1, + "phase": 0, + "booted": true, + "last_checkin": "2021-01-14T07:11:09Z", + "last_status": { + "backlog": 0, + "running": 5, + "pool_capacity": 5, + "max_threads": 5, + "requests_count": 1 + } + } + ] +} +``` diff --git a/vendor/cache/puma-fba741b91780/docs/systemd.md b/vendor/cache/puma-fba741b91780/docs/systemd.md new file mode 100644 index 000000000..1c7f577a1 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/systemd.md @@ -0,0 +1,253 @@ +# systemd + +[systemd](https://www.freedesktop.org/wiki/Software/systemd/) is a commonly +available init system (PID 1) on many Linux distributions. It offers process +monitoring (including automatic restarts) and other useful features for running +Puma in production. + +## Service Configuration + +Below is a sample puma.service configuration file for systemd, which can be +copied or symlinked to `/etc/systemd/system/puma.service`, or if desired, using +an application or instance-specific name. + +Note that this uses the systemd preferred "simple" type where the start command +remains running in the foreground (does not fork and exit). + +~~~~ ini +[Unit] +Description=Puma HTTP Server +After=network.target + +# Uncomment for socket activation (see below) +# Requires=puma.socket + +[Service] +# Puma supports systemd's `Type=notify` and watchdog service +# monitoring, as of Puma 5.1 or later. +# On earlier versions of Puma or JRuby, change this to `Type=simple` and remove +# the `WatchdogSec` line. +Type=notify + +# If your Puma process locks up, systemd's watchdog will restart it within seconds. +WatchdogSec=10 + +# Preferably configure a non-privileged user +# User= + +# The path to your application code root directory. +# Also replace the "" placeholders below with this path. +# Example /home/username/myapp +WorkingDirectory= + +# Helpful for debugging socket activation, etc. +# Environment=PUMA_DEBUG=1 + +# SystemD will not run puma even if it is in your path. You must specify +# an absolute URL to puma. For example /usr/local/bin/puma +# Alternatively, create a binstub with `bundle binstubs puma --path ./sbin` in the WorkingDirectory +ExecStart=//bin/puma -C /puma.rb + +# Variant: Rails start. +# ExecStart=//bin/puma -C /config/puma.rb ../config.ru + +# Variant: Use `bundle exec puma` instead of binstub +# Variant: Specify directives inline. +# ExecStart=//puma -b tcp://0.0.0.0:9292 -b ssl://0.0.0.0:9293?key=key.pem&cert=cert.pem + + +Restart=always + +[Install] +WantedBy=multi-user.target +~~~~ + +See +[systemd.exec](https://www.freedesktop.org/software/systemd/man/systemd.exec.html) +for additional details. + +## Socket Activation + +systemd and Puma also support socket activation, where systemd opens the +listening socket(s) in advance and provides them to the Puma master process on +startup. Among other advantages, this keeps listening sockets open across puma +restarts and achieves graceful restarts, including when upgraded Puma, and is +compatible with both clustered mode and application preload. + +**Note:** Any wrapper scripts which `exec`, or other indirections in `ExecStart` +may result in activated socket file descriptors being closed before reaching the +puma master process. + +**Note:** Socket activation doesn't currently work on JRuby. This is tracked in +[#1367]. + +Configure one or more `ListenStream` sockets in a companion `*.socket` unit file +to use socket activation. Also, uncomment the associated `Requires` directive +for the socket unit in the service file (see above.) Here is a sample +puma.socket, matching the ports used in the above puma.service: + +~~~~ ini +[Unit] +Description=Puma HTTP Server Accept Sockets + +[Socket] +ListenStream=0.0.0.0:9292 +ListenStream=0.0.0.0:9293 + +# AF_UNIX domain socket +# SocketUser, SocketGroup, etc. may be needed for Unix domain sockets +# ListenStream=/run/puma.sock + +# Socket options matching Puma defaults +ReusePort=true +Backlog=1024 +# Enable this if you're using Puma with the "low_latency" option, read more in Puma DSL docs and systemd docs: +# https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html#NoDelay= +# NoDelay=true + +[Install] +WantedBy=sockets.target +~~~~ + +See +[systemd.socket](https://www.freedesktop.org/software/systemd/man/systemd.socket.html) +for additional configuration details. + +Note that the above configurations will work with Puma in either single process +or cluster mode. + +### Sockets and symlinks + +When using releases folders, you should set the socket path using the shared +folder path (ex. `/srv/projet/shared/tmp/puma.sock`), not the release folder +path (`/srv/projet/releases/1234/tmp/puma.sock`). + +Puma will detect the release path socket as different than the one provided by +systemd and attempt to bind it again, resulting in the exception `There is +already a server bound to:`. + +### Binding + +By default, you need to configure Puma to have binds matching with all +ListenStream statements. Any mismatched systemd ListenStreams will be closed by +Puma. + +To automatically bind to all activated sockets, the option +`--bind-to-activated-sockets` can be used. This matches the config DSL +`bind_to_activated_sockets` statement. This will cause Puma to create a bind +automatically for any activated socket. When systemd socket activation is not +enabled, this option does nothing. + +This also accepts an optional argument `only` (DSL: `'only'`) to discard any +binds that's not socket activated. + +## Usage + +Without socket activation, use `systemctl` as root (i.e., via `sudo`) as with +other system services: + +~~~~ sh +# After installing or making changes to puma.service +systemctl daemon-reload + +# Enable so it starts on boot +systemctl enable puma.service + +# Initial startup. +systemctl start puma.service + +# Check status +systemctl status puma.service + +# A normal restart. Warning: listener's sockets will be closed +# while a new puma process initializes. +systemctl restart puma.service +~~~~ + +With socket activation, several but not all of these commands should be run for +both socket and service: + +~~~~ sh +# After installing or making changes to either puma.socket or +# puma.service. +systemctl daemon-reload + +# Enable both socket and service, so they start on boot. Alternatively +# you could leave puma.service disabled, and systemd will start it on +# the first use (with startup lag on the first request) +systemctl enable puma.socket puma.service + +# Initial startup. The Requires directive (see above) ensures the +# socket is started before the service. +systemctl start puma.socket puma.service + +# Check the status of both socket and service. +systemctl status puma.socket puma.service + +# A "hot" restart, with systemd keeping puma.socket listening and +# providing to the new puma (master) instance. +systemctl restart puma.service + +# A normal restart, needed to handle changes to +# puma.socket, such as changing the ListenStream ports. Note +# daemon-reload (above) should be run first. +systemctl restart puma.socket puma.service +~~~~ + +Here is sample output from `systemctl status` with both service and socket +running: + +~~~~ +● puma.socket - Puma HTTP Server Accept Sockets + Loaded: loaded (/etc/systemd/system/puma.socket; enabled; vendor preset: enabled) + Active: active (running) since Thu 2016-04-07 08:40:19 PDT; 1h 2min ago + Listen: 0.0.0.0:9233 (Stream) + 0.0.0.0:9234 (Stream) + +Apr 07 08:40:19 hx systemd[874]: Listening on Puma HTTP Server Accept Sockets. + +● puma.service - Puma HTTP Server + Loaded: loaded (/etc/systemd/system/puma.service; enabled; vendor preset: enabled) + Active: active (running) since Thu 2016-04-07 08:40:19 PDT; 1h 2min ago + Main PID: 28320 (ruby) + CGroup: /system.slice/puma.service + ├─28320 puma 3.3.0 (tcp://0.0.0.0:9233,ssl://0.0.0.0:9234?key=key.pem&cert=cert.pem) [app] + ├─28323 puma: cluster worker 0: 28320 [app] + └─28327 puma: cluster worker 1: 28320 [app] + +Apr 07 08:40:19 hx puma[28320]: Puma starting in cluster mode... +Apr 07 08:40:19 hx puma[28320]: * Version 3.3.0 (ruby 2.2.4-p230), codename: Jovial Platypus +Apr 07 08:40:19 hx puma[28320]: * Min threads: 0, max threads: 16 +Apr 07 08:40:19 hx puma[28320]: * Environment: production +Apr 07 08:40:19 hx puma[28320]: * Process workers: 2 +Apr 07 08:40:19 hx puma[28320]: * Phased restart available +Apr 07 08:40:19 hx puma[28320]: * Activated tcp://0.0.0.0:9233 +Apr 07 08:40:19 hx puma[28320]: * Activated ssl://0.0.0.0:9234?key=key.pem&cert=cert.pem +Apr 07 08:40:19 hx puma[28320]: Use Ctrl-C to stop +~~~~ + +### capistrano3-puma + +By default, [capistrano3-puma](https://github.com/seuros/capistrano-puma) uses +`pumactl` for deployment restarts outside of systemd. To learn the exact +commands that this tool would use for `ExecStart` and `ExecStop`, use the +following `cap` commands in dry-run mode, and update from the above forking +service configuration accordingly. Note also that the configured `User` should +likely be the same as the capistrano3-puma `:puma_user` option. + +~~~~ sh +stage=production # or different stage, as needed +cap $stage puma:start --dry-run +cap $stage puma:stop --dry-run +~~~~ + +### Disabling Puma Systemd Integration + +If you would like to disable Puma's systemd integration, for example if you handle it elsewhere +in your code yourself, simply set the the environment variable `PUMA_SKIP_SYSTEMD` to any value. + + + +[Restart]: https://www.freedesktop.org/software/systemd/man/systemd.service.html#Restart= +[#1367]: https://github.com/puma/puma/issues/1367 +[#1499]: https://github.com/puma/puma/issues/1499 diff --git a/vendor/cache/puma-fba741b91780/docs/testing_benchmarks_local_files.md b/vendor/cache/puma-fba741b91780/docs/testing_benchmarks_local_files.md new file mode 100644 index 000000000..aea776ba3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/testing_benchmarks_local_files.md @@ -0,0 +1,150 @@ +# Testing - benchmark/local files + +These files generate data that shows request-per-second (RPS), etc. Typically, files are in +pairs, a shell script and a Ruby script. The shell script starts the server, then runs the +Ruby file, which starts client request stream(s), then collects and logs metrics. + +## response_time_wrk.sh + +This uses [wrk] for generating data. One or more wrk runs are performed. Summarizes RPS and +wrk latency times. The default for the `-b` argument runs 28 different client request streams, +and takes a bit over 5 minutes. See 'Request Stream Configuration' below for `-b` argument +description. + +
+ Summary output for
benchmarks/local/response_time_wrk.sh -w2 -t5:5 -s tcp6:
+ +``` +Type req/sec 50% 75% 90% 99% 100% Resp Size +───────────────────────────────────────────────────────────────── 1kB +array 13710 0.74 2.52 5.23 7.76 37.45 1024 +chunk 13502 0.76 2.55 5.28 7.84 11.23 1042 +string 13794 0.74 2.51 5.20 7.75 14.07 1024 +io 9615 1.16 3.45 7.13 10.57 15.75 1024 +───────────────────────────────────────────────────────────────── 10kB +array 13458 0.76 2.57 5.31 7.93 13.94 10239 +chunk 13066 0.78 2.64 5.46 8.18 38.48 10320 +string 13500 0.76 2.55 5.29 7.88 11.42 10240 +io 9293 1.18 3.59 7.39 10.94 16.99 10240 +───────────────────────────────────────────────────────────────── 100kB +array 11315 0.96 3.06 6.33 9.49 17.69 102424 +chunk 9916 1.10 3.48 7.20 10.73 15.14 103075 +string 10948 1.00 3.17 6.57 9.83 17.88 102378 +io 8901 1.21 3.72 7.48 11.27 59.98 102407 +───────────────────────────────────────────────────────────────── 256kB +array 9217 1.15 3.82 7.88 11.74 17.12 262212 +chunk 7339 1.45 4.76 9.81 14.63 22.70 264007 +string 8574 1.19 3.81 7.73 11.21 15.80 262147 +io 8911 1.19 3.80 7.55 15.25 60.01 262183 +───────────────────────────────────────────────────────────────── 512kB +array 6951 1.49 5.03 10.28 15.90 25.08 524378 +chunk 5234 2.03 6.56 13.57 20.46 32.15 527862 +string 6438 1.55 5.04 10.12 16.28 72.87 524275 +io 8533 1.15 4.62 8.79 48.15 70.51 524327 +───────────────────────────────────────────────────────────────── 1024kB +array 4122 1.80 15.59 41.87 67.79 121.00 1048565 +chunk 3158 2.82 15.22 31.00 71.39 99.90 1055654 +string 4710 2.24 6.66 13.65 20.38 70.44 1048575 +io 8355 1.23 3.95 7.94 14.08 68.54 1048498 +───────────────────────────────────────────────────────────────── 2048kB +array 2454 4.12 14.02 27.70 43.48 88.89 2097415 +chunk 1743 6.26 17.65 36.98 55.78 92.10 2111358 +string 2479 4.38 12.52 25.65 38.44 95.62 2097502 +io 8264 1.25 3.83 7.76 11.73 65.69 2097090 + +Body ────────── req/sec ────────── ─────── req 50% times ─────── + KB array chunk string io array chunk string io +1 13710 13502 13794 9615 0.745 0.757 0.741 1.160 +10 13458 13066 13500 9293 0.760 0.784 0.759 1.180 +100 11315 9916 10948 8901 0.960 1.100 1.000 1.210 +256 9217 7339 8574 8911 1.150 1.450 1.190 1.190 +512 6951 5234 6438 8533 1.490 2.030 1.550 1.150 +1024 4122 3158 4710 8355 1.800 2.820 2.240 1.230 +2048 2454 1743 2479 8264 4.120 6.260 4.380 1.250 +───────────────────────────────────────────────────────────────────── +wrk -t8 -c16 -d10s +benchmarks/local/response_time_wrk.sh -w2 -t5:5 -s tcp6 -Y +Server cluster mode -w2 -t5:5, bind: tcp6 +Puma repo branch 00-response-refactor +ruby 3.2.0dev (2022-06-14T01:21:55Z master 048f14221c) +YJIT [x86_64-linux] + +[2136] - Gracefully shutting down workers... +[2136] === puma shutdown: 2022-06-13 21:16:13 -0500 === +[2136] - Goodbye! + + 5:15 Total Time +``` +

+ +## bench_base.sh, bench_base.rb + +These two files setup parameters for the Puma server, which is normally started in a shell +script. It then starts a Ruby file (a subclass of BenchBase), passing arguments to it. The +Ruby file is normally used to generate a client request stream(s). + +### Puma Configuration + +The following arguments are used for the Puma server: + +* **`-C`** - configuration file +* **`-d`** - app delay +* **`-r`** - rackup file, often defaults to test/rackup/ci_select.ru +* **`-s`** - bind socket type, default is tcp/tcp4, also tcp6, ssl/ssl4, ssl6, unix, or aunix + (unix & abstract unix are not available with wrk). +* **`-t`** - threads, expressed as '5:5', same as Puma --thread +* **`-w`** - workers, same as Puma --worker +* **`-Y`** - enable Ruby YJIT + +### Request Stream Configuration + +The following arguments are used for request streams: + +* **`-b`** - response body configuration. Body type options are a array, c chunked, s string, + and i for File/IO. None or any combination can be specified, they should start the option. + Then, any combination of comma separated integers can be used for the response body size + in kB. The string 'ac50,100' would create four runs, 50kb array, 50kB chunked, 100kB array, + and 100kB chunked. See 'Testing - test/rackup/ci-*.ru files' for more info. +* **`-c`** - connections per client request stream thread, defaults to 2 for wrk. +* **`-D`** - duration of client request stream in seconds. +* **`-T`** - number of threads in the client request stream. For wrk, this defaults to + 80% of Puma workers * max_threads. + +### Notes - Configuration + +The above lists script arguments. + +`bench_base.sh` contains most server defaults. Many can be set via ENV variables. + +`bench_base.rb` contains the client request stream defaults. The default value for +`-b` is `acsi1,10,100,256,512,1024,2048`, which is a 4 x 7 matrix, and hence, runs +28 jobs. Also, the i body type (File/IO) generates files, they are placed in the +`"#{Dir.tmpdir}/.puma_response_body_io"` directory, which is created. + +### Notes - wrk + +The shell scripts use `-T` for wrk's thread count, since `-t` is used for Puma +server threads. Regarding the `-c` argument, wrk has an interesting behavior. +The total number of connections is set by `(connections/threads).to_i`. The scripts +here use `-c` as connections per thread. Hence, using `-T4 -c2` will yield a total +of eight wrk connections, two per thread. The equivalent wrk arguments would be `-t4 -c8`. + +Puma can only process so many requests, and requests will queue in the backlog +until Puma can respond to them. With wrk, if the number of total connections is +too high, one will see the upper latency times increase, pushing into the lower +latency times as the connections are increased. The default values for wrk's +threads and connections were chosen to minimize requests' time in the backlog. + +An example with four wrk runs using `-b s10`. Notice that `req/sec` varies by +less than 1%, but the `75%` times increase by an order of magnitude: +``` +req/sec 50% 75% 90% 99% 100% Resp Size wrk cmd line +───────────────────────────────────────────────────────────────────────────── + 13597 0.755 2.550 5.260 7.800 13.310 12040 wrk -t8 -c16 -d10 + 13549 0.793 4.430 8.140 11.220 16.600 12002 wrk -t10 -c20 -d10 + 13570 1.040 25.790 40.010 49.070 58.300 11982 wrk -t8 -c64 -d10 + 13684 1.050 25.820 40.080 49.160 66.190 12033 wrk -t16 -c64 -d10 +``` +Finally, wrk's output may cause rounding errors, so the response body size calculation is +imprecise. + +[wrk]: diff --git a/vendor/cache/puma-fba741b91780/docs/testing_test_rackup_ci_files.md b/vendor/cache/puma-fba741b91780/docs/testing_test_rackup_ci_files.md new file mode 100644 index 000000000..029d82568 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/docs/testing_test_rackup_ci_files.md @@ -0,0 +1,36 @@ +# Testing - test/rackup/ci-*.ru files + +## Overview + +Puma should efficiently handle a variety of response bodies, varying both by size +and by the type of object used for the body. + +Five rackup files are located in 'test/rackup' that can be used. All have their +request body size (in kB) set via `Body-Conf` header or with `ENV['CI_BODY_CONF']`. +Additionally, the ci_select.ru file can have it's body type set via a starting +character. + +* **ci_array.ru** - body is an `Array` of 1kB strings. `Content-Length` is not set. +* **ci_chunked.ru** - body is an `Enumerator` of 1kB strings. `Content-Length` is not set. +* **ci_io.ru** - body is a File/IO object. `Content-Length` is set. +* **ci_string.ru** - body is a single string. `Content-Length` is set. +* **ci_select.ru** - can be any of the above. + +All responses have 25 headers, total length approx 1kB. ci_array.ru and ci_chunked.ru +contain 1kB items. + +All can be delayed by a float value (seconds) specified by the `Dly` header + +Note that rhe `Body-Conf` header takes precedence, and `ENV['CI_BODY_CONF']` is +only read on load. + +## ci_select.ru + +The ci_select.ru file allows a starting character to specify the body type in the +`Body-Conf` header or with `ENV['CI_BODY_CONF']`. +* **a** - array of strings +* **c** - chunked (enum) +* **s** - single string +* **i** - File/IO + +A value of `a100` would return a body as an array of 100 1kB strings. diff --git a/vendor/cache/puma-fba741b91780/examples/generate_all_certs b/vendor/cache/puma-fba741b91780/examples/generate_all_certs new file mode 100755 index 000000000..d58f1129c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/generate_all_certs @@ -0,0 +1,69 @@ +#!/bin/bash + +# exit on error +set -e + +if [[ "$OSTYPE" == "darwin"* ]]; then + Dash="\n\e[93m────────────────────────────────────────────────────" +else + Dash="\n\e[38;2;223;223;16m────────────────────────────────────────────────────" +fi + +Reset="\e[0m\n" + +# change to script folder +SCRIPT=$(realpath "$0") +CERT_HOME=$(dirname "$SCRIPT") + +#——————————————————————————————— process ssl files in examples/puma +printf "$Dash process ssl files in examples/puma$Reset" +cd $CERT_HOME/puma +ruby ../generate_server_test.rb +printf "Done with ../generate_server_test.rb\n\n" + +rm -f server.p12 +openssl pkcs12 -export -password pass:jruby_puma -inkey puma_keypair.pem -in cert_puma.pem -name puma -out server.p12 +printf "Done with server.p12\n\n" + +rm -f keystore.jks +keytool -importkeystore -srckeystore server.p12 -srcstoretype pkcs12 -srcstorepass jruby_puma -destkeystore keystore.jks -deststoretype JKS -storepass jruby_puma +printf "Done with keystore.jks\n\n" + +rm -f encrypted_puma_keypair.pem +openssl rsa -aes256 -in puma_keypair.pem -out encrypted_puma_keypair.pem --passout pass:'hello world' +printf "Done with encrypted_puma_keypair.pem\n\n" + +#——————————————————————————————— process ssl files in examples/puma/chain_cert +printf "$Dash process ssl files in examples/puma/chain_cert$Reset" +cd $CERT_HOME/puma/chain_cert +ruby ../../generate_chain_test.rb +printf "Done with ../../generate_chain_test.rb\n\n" + +#——————————————————————————————— process ssl files in examples/puma/client_certs +printf "$Dash process ssl files in examples/puma/client_certs$Reset" +cd $CERT_HOME/puma/client_certs +ruby ../../generate_client_test.rb +printf "Done with ../../generate_client_test.rb\n\n" + +rm -f server.p12 +openssl pkcs12 -chain -CAfile ./ca.crt -export -password pass:jruby_puma -inkey server.key -in server.crt -name server -out server.p12 +printf "Done with server.p12\n\n" + +rm -f keystore.jks +keytool -importkeystore -srckeystore server.p12 -srcstoretype pkcs12 -srcstorepass jruby_puma -destkeystore keystore.jks -deststoretype pkcs12 -storepass jruby_puma +keytool -importcert -alias ca -noprompt -trustcacerts -file ca.crt -keystore keystore.jks -storepass jruby_puma +printf "Done with keystore.jks\n\n" + +rm -f ca_store.p12 +openssl pkcs12 -export -password pass:jruby_puma -inkey ca.key -in ca.crt -name ca -out ca_store.p12 +printf "Done with ca_store.p12\n\n" + +rm -f ca_store.jks +keytool -importcert -alias mykey -noprompt -trustcacerts -file ca.crt -keystore ca_store.jks -deststoretype jks -keypass jruby_puma -storepass jruby_puma +printf "Done with ca_store.jks\n\n" + +rm -f unknown_ca_store.p12 +openssl pkcs12 -export -password pass:jruby_puma -inkey unknown_ca.key -in unknown_ca.crt -name server -out unknown_ca_store.p12 +printf "Done with unknown_ca_store.p12\n\n" + +printf "$Dash Done$Reset" diff --git a/vendor/cache/puma-fba741b91780/examples/generate_all_certs.md b/vendor/cache/puma-fba741b91780/examples/generate_all_certs.md new file mode 100644 index 000000000..71f1a0ef4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/generate_all_certs.md @@ -0,0 +1,7 @@ +## Updating Puma test cert files + +One must have openssl and keytool in their Path. Keytool is part of the JDK. + +It maybe helpful to install the ['certificate_authority'](https://github.com/cchandler/certificate_authority) gem. + +Then run 'examples/generate_all_certs' from a bash shell. diff --git a/vendor/cache/puma-fba741b91780/examples/generate_chain_test.rb b/vendor/cache/puma-fba741b91780/examples/generate_chain_test.rb new file mode 100644 index 000000000..2676760fc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/generate_chain_test.rb @@ -0,0 +1,121 @@ +# frozen_string_literal: true + +=begin +regenerates ca.pem, ca_keypair.pem, + subca.pem, subca_keypair.pem, + cert.pem, cert_keypair.pem + ca_chain.pem, + cert_chain.pem + +certs before date will be the first of the current month +=end + +require 'bundler/inline' +gemfile(true) do + source 'https://rubygems.org' + gem 'certificate_authority' +end + +module GenerateChainCerts + + CA = "ca.crt" + CA_KEY = "ca.key" + INTERMEDIATE = "intermediate.crt" + INTERMEDIATE_KEY = "intermediate.key" + CERT = "cert.crt" + CERT_KEY = "cert.key" + + CA_CHAIN = "ca_chain.pem" + CERT_CHAIN = "cert_chain.pem" + + class << self + + def before_after + @before_after ||= ( + now = Time.now.utc + mo = now.month + yr = now.year + zone = '+00:00' + + { + not_before: Time.new(yr, mo, 1, 0, 0, 0, zone), + not_after: Time.new(yr+4, mo, 1, 0, 0, 0, zone) + } + ) + end + + def root_ca + @root_ca ||= generate_ca + end + + def intermediate_ca + @intermediate_ca ||= generate_ca(common_name: "intermediate.puma.localhost", parent: root_ca) + end + + def generate_ca(common_name: "ca.puma.localhost", parent: nil) + ca = CertificateAuthority::Certificate.new + + ca.subject.common_name = common_name + ca.signing_entity = true + ca.not_before = before_after[:not_before] + ca.not_after = before_after[:not_after] + + ca.key_material.generate_key + + if parent + ca.serial_number.number = parent.serial_number.number + 10 + ca.parent = parent + else + ca.serial_number.number = 1 + end + + signing_profile = {"extensions" => {"keyUsage" => {"usage" => ["critical", "keyCertSign"] }} } + + ca.sign!(signing_profile) + + ca + end + + def generate_cert(common_name: "test.puma.localhost", parent: intermediate_ca) + + cert = CertificateAuthority::Certificate.new + + cert.subject.common_name = common_name + cert.serial_number.number = parent.serial_number.number + 100 + cert.parent = parent + + cert.key_material.generate_key + cert.sign! + + cert + end + + def run + cert = generate_cert + + path = "#{__dir__}/puma/chain_cert" + + Dir.chdir path do + File.write CA, root_ca.to_pem, mode: 'wb' + File.write CA_KEY, root_ca.key_material.private_key.to_pem, mode: 'wb' + + File.write INTERMEDIATE, intermediate_ca.to_pem, mode: 'wb' + File.write INTERMEDIATE_KEY, intermediate_ca.key_material.private_key.to_pem, mode: 'wb' + + File.write CERT, cert.to_pem, mode: 'wb' + File.write CERT_KEY, cert.key_material.private_key.to_pem, mode: 'wb' + + ca_chain = intermediate_ca.to_pem + root_ca.to_pem + File.write CA_CHAIN, ca_chain, mode: 'wb' + + cert_chain = cert.to_pem + ca_chain + File.write CERT_CHAIN, cert_chain, mode: 'wb' + end + rescue => e + puts "error: #{e.message}" + exit 1 + end + end +end + +GenerateChainCerts.run diff --git a/vendor/cache/puma-fba741b91780/examples/generate_client_test.rb b/vendor/cache/puma-fba741b91780/examples/generate_client_test.rb new file mode 100644 index 000000000..f8759163e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/generate_client_test.rb @@ -0,0 +1,129 @@ +# frozen_string_literal: false + +=begin +run code to generate all certs +certs before date will be the first of the current month +=end + +require "openssl" + +module GenerateClientCerts + + KEY_LEN = 2048 + SIGN_ALGORITHM = OpenSSL::Digest::SHA256 + + CA_EXTS = [ + ["basicConstraints","CA:TRUE",true], + ["keyUsage","cRLSign,keyCertSign",true], + ] + EE_EXTS = [ + #["keyUsage","keyEncipherment,digitalSignature",true], + ["keyUsage","keyEncipherment,dataEncipherment,digitalSignature",true], + ] + + class << self + def run + set_dates + output_info + setup_issue + write_files + rescue => e + puts "error: #{e.message}" + exit 1 + end + + private + + def setup_issue + ca = OpenSSL::X509::Name.parse "/DC=net/DC=puma/CN=CA" + ca_u = OpenSSL::X509::Name.parse "/DC=net/DC=puma/CN=CAU" + svr = OpenSSL::X509::Name.parse "/DC=net/DC=puma/CN=localhost" + cli = OpenSSL::X509::Name.parse "/DC=net/DC=puma/CN=localhost" + cli_u = OpenSSL::X509::Name.parse "/DC=net/DC=puma/CN=localhost" + + [:@ca_key, :@svr_key, :@cli_key, :@ca_key_u, :@cli_key_u].each do |k| + instance_variable_set k, OpenSSL::PKey::RSA.generate(KEY_LEN) + end + + @ca_cert = issue_cert ca , @ca_key , 3, @before, @after, CA_EXTS, nil , nil , SIGN_ALGORITHM.new + @svr_cert = issue_cert svr, @svr_key, 7, @before, @after, EE_EXTS, @ca_cert, @ca_key, SIGN_ALGORITHM.new + @cli_cert = issue_cert cli, @cli_key, 11, @before, @after, EE_EXTS, @ca_cert, @ca_key, SIGN_ALGORITHM.new + + # unknown certs + @ca_cert_u = issue_cert ca_u , @ca_key_u , 17, @before, @after, CA_EXTS, nil , nil , SIGN_ALGORITHM.new + @cli_cert_u = issue_cert cli_u, @cli_key_u, 19, @before, @after, EE_EXTS, @ca_cert_u, @ca_key_u, SIGN_ALGORITHM.new + + # expired cert is identical to client cert with different dates + @cli_cert_exp = issue_cert cli, @cli_key, 23, @b_exp, @a_exp, EE_EXTS, @ca_cert, @ca_key, SIGN_ALGORITHM.new + end + + def issue_cert(dn, key, serial, not_before, not_after, extensions, issuer, issuer_key, digest) + cert = OpenSSL::X509::Certificate.new + issuer = cert unless issuer + issuer_key = key unless issuer_key + cert.version = 2 + cert.serial = serial + cert.subject = dn + cert.issuer = issuer.subject + cert.public_key = key.public_key + cert.not_before = not_before + cert.not_after = not_after + ef = OpenSSL::X509::ExtensionFactory.new + ef.subject_certificate = cert + ef.issuer_certificate = issuer + extensions.each { |oid, value, critical| + cert.add_extension(ef.create_extension(oid, value, critical)) + } + cert.sign(issuer_key, digest) + cert + end + + def write_files + path = "#{__dir__}/puma/client_certs" + + Dir.chdir path do + File.write "ca.crt" , @ca_cert.to_pem , mode: 'wb' + File.write "ca.key" , @ca_key.to_pem , mode: 'wb' + File.write "server.crt", @svr_cert.to_pem, mode: 'wb' + File.write "server.key", @svr_key.to_pem , mode: 'wb' + File.write "client.crt", @cli_cert.to_pem, mode: 'wb' + File.write "client.key", @cli_key.to_pem , mode: 'wb' + + File.write "unknown_ca.crt", @ca_cert_u.to_pem, mode: 'wb' + File.write "unknown_ca.key", @ca_key_u.to_pem , mode: 'wb' + + File.write "client_unknown.crt", @cli_cert_u.to_pem, mode: 'wb' + File.write "client_unknown.key", @cli_key_u.to_pem , mode: 'wb' + + File.write "client_expired.crt", @cli_cert_exp.to_pem, mode: 'wb' + File.write "client_expired.key", @cli_key.to_pem , mode: 'wb' + end + end + + def set_dates + now = Time.now.utc + mo = now.month + yr = now.year + zone = '+00:00' + + @before = Time.new yr , mo, 1, 0, 0, 0, zone + @after = Time.new yr+4, mo, 1, 0, 0, 0, zone + + @b_exp = Time.new yr-1, mo, 1, 0, 0, 0, zone + @a_exp = Time.new yr , mo, 1, 0, 0, 0, zone + end + + def output_info + puts <<~INFO + Key length: #{KEY_LEN} + sign_algorithm: #{SIGN_ALGORITHM} + + Normal cert dates: #{@before} to #{@after} + + Expired cert dates: #{@b_exp} to #{@a_exp} + INFO + end + end +end + +GenerateClientCerts.run diff --git a/vendor/cache/puma-fba741b91780/examples/generate_server_test.rb b/vendor/cache/puma-fba741b91780/examples/generate_server_test.rb new file mode 100644 index 000000000..cf9364a4f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/generate_server_test.rb @@ -0,0 +1,52 @@ +# frozen_string_literal: true + +=begin +regenerates cert_puma.pem and puma_keypair.pem +dates, key length & sign_algorithm are changed +=end + +require 'openssl' + +module GenerateServerCerts + + KEY_LEN = 2048 + SIGN_ALGORITHM = OpenSSL::Digest::SHA256 + + FNC = 'cert_puma.pem' + FNK = 'puma_keypair.pem' + + class << self + + def run + path = "#{__dir__}/puma" + ca_key = OpenSSL::PKey::RSA.new KEY_LEN + key = OpenSSL::PKey::RSA.new KEY_LEN + + raw = File.read File.join(path, FNC), mode: 'rb' + + cert = OpenSSL::X509::Certificate.new raw + puts "\nOld:", cert.to_text, "" + + now = Time.now.utc + mo = now.month + yr = now.year + zone = '+00:00' + + cert.not_before = Time.new yr , mo, 1, 0, 0, 0, zone + cert.not_after = Time.new yr+4, mo, 1, 0, 0, 0, zone + cert.public_key = key.public_key + cert.sign ca_key, SIGN_ALGORITHM.new + puts "New:", cert.to_text, "" + + Dir.chdir path do + File.write FNC, cert.to_pem, mode: 'wb' + File.write FNK, key.to_pem , mode: 'wb' + end + rescue => e + puts "error: #{e.message}" + exit 1 + end + end +end + +GenerateServerCerts.run diff --git a/vendor/cache/puma-fba741b91780/examples/plugins/redis_stop_puma.rb b/vendor/cache/puma-fba741b91780/examples/plugins/redis_stop_puma.rb new file mode 100644 index 000000000..5140e6c2d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/plugins/redis_stop_puma.rb @@ -0,0 +1,46 @@ +require 'puma/plugin' +require 'redis' + +# How to stop Puma on Heroku +# - You can't use normal methods because the dyno is not accessible +# - There's no file system, no way to send signals +# but ... +# - You can use Redis or Memcache; any network distributed key-value +# store + +# 1. Add this plugin to your 'lib' directory +# 2. In the `puma.rb` config file add the following lines +# === Plugins === +# require './lib/puma/plugin/redis_stop_puma' +# plugin 'redis_stop_puma' +# 3. Now, when you set the redis key "puma::restart::web.1", your web.1 dyno +# will restart +# 4. Sniffing the Heroku logs for R14 errors is application (and configuration) +# specific. I use the Logentries service, watch for the pattern and the call +# a webhook back into my app to set the Redis key. YMMV + +# You can test this locally by setting the DYNO environment variable when +# when starting puma, e.g. `DYNO=pants.1 puma` + +Puma::Plugin.create do + def start(launcher) + + hostname = ENV['DYNO'] + return unless hostname + + redis = Redis.new(url: ENV.fetch('REDIS_URL', nil)) + return unless redis.ping == 'PONG' + + in_background do + while true + sleep 2 + if message = redis.get("puma::restart::#{hostname}") + redis.del("puma::restart::#{hostname}") + $stderr.puts message + launcher.stop + break + end + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/examples/puma/cert_puma.pem b/vendor/cache/puma-fba741b91780/examples/puma/cert_puma.pem new file mode 100644 index 000000000..6cbb9c1e5 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/cert_puma.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDgjCCAmqgAwIBAgIBAjANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJVUzEO +MAwGA1UECgwFbG9jYWwxDTALBgNVBAsMBGFlcm8xCzAJBgNVBAMMAkNBMB4XDTI0 +MDgwMTAwMDAwMFoXDTI4MDgwMTAwMDAwMFowSDELMAkGA1UEBhMCVVMxDjAMBgNV +BAoMBWxvY2FsMQ0wCwYDVQQLDARhZXJvMQswCQYDVQQLDAJDQTENMAsGA1UEAwwE +cHVtYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPOVzZNAYuKXr8k4 +3QXP+FX6JFZ58f7YPG0kJjCfAx8FlRklusAHk7t/ya1wtiIIx/+pYszVM9yR6uTa +2w9SlYd+51P46XX00GK03OoZ7K0UMePBkbJimmSrld5HzEPoiEBt3iEtNEGSFZRi +IVuh84jIQhS4EWd9RGNmjcY8kIp2SKQdu6gOGVdFzAKRjXsQTa5yuAzbkwjiF3B+ +Ypsw4K13muSEINiNx0Cj9rL5DyfdYJbJqfbW9MsaQmdyNoR6IxJaEHF/wCQyaq5i +YeVmDs9PPmBSja4E966tM/ACdMD38rpAfHSon5rEpAgZk+3K4PR1/HPH3B/b/5Wb +4MzoEh8CAwEAAaOBhTCBgjAMBgNVHRMBAf8EAjAAMDEGCWCGSAGG+EIBDQQkFiJS +dWJ5L09wZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTyDyJl +mYBDwfWdRj6lWGvoY43k9DALBgNVHQ8EBAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUH +AwEwDQYJKoZIhvcNAQELBQADggEBAFqmtTNf5/I0PbziaN2h/iPDw8mFQDOtN6gT +WFELM/Yh/0Nt2UJLkO78bzl+4b4uScBuq0E6Uf/oSg3z8gjx3OZbPhYvjqPPo3Qc +FsqxRlxMLxAYw7MU6ygTagrEeixHU9dlkLbf6YY/WmfJnTAFlYPvEcJ3jWGuTmj9 +yVYUW/ruSBG0aCYdjdTgvOB4XUtULRqT/kK+K529MmYoxeUPgLrPGnvXoSvj6T5N +S8sx9O6u2BJMtgeCTxbGDNMkVsxIFw+hMiGykYgkOJl1XJOLshsCpaPgGyFvYYmy +C52c6DIqT0WHGRwDO92D3FYd2UbhZBWABabOa1M+pRM0NqJTi9w= +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca.crt b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca.crt new file mode 100644 index 000000000..9acfe7967 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDOTCCAiGgAwIBAgIBATANBgkqhkiG9w0BAQ0FADAcMRowGAYDVQQDDBFjYS5w +dW1hLmxvY2FsaG9zdDAeFw0yNDA4MDEwMDAwMDBaFw0yODA4MDEwMDAwMDBaMBwx +GjAYBgNVBAMMEWNhLnB1bWEubG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuKZ43p+68AFTSVno4p8rxk2Ixngw9CR9K5/11LCS+nM9jPph +E0aWluwRAdWNs0CLN9A9ZKqJq+G67Y6CAw87g4RfUaZUkBkClwH76WUYzMTo9P3e +UMpwzVtdrijW2Y/9MnxMCyt14WVfoY0itjEyfeGT3FbP8yKuk2qYoz3YecrhiRGu +xv5gFZOd0hxIL0HkRe0lrpgWDj4MrvsBqUnb4aeqDgjz4lPUWo/GSvgU7xcV5Q6c +Ti8FB9k3HZfphb2JHydZfxIqCCeflZlAQBBKNevWbSxhfsfjzqHmC64M9ELoJ/1J +JFoX+zdlv9DqOn2k67N4rZcfptoEKPQThPmX+wIDAQABo4GFMIGCMB0GA1UdDgQW +BBQuEpl4C66N1A3ZvoVOcO8k6G21nTAOBgNVHQ8BAf8EBAMCAgQwEwYDVR0lBAww +CgYIKwYBBQUHAwEwDAYDVR0TBAUwAwEB/zAuBgNVHSMEJzAloSCkHjAcMRowGAYD +VQQDDBFjYS5wdW1hLmxvY2FsaG9zdIIBATANBgkqhkiG9w0BAQ0FAAOCAQEAOy3A +9GPAQvTeqbmvMlkJh8jtTRUOKmcGUNPFQqJ4aTx/BXh8x415zjW9ZSjLN7ZSNxjJ +nFm1XeD9I1EB1JjXwnkNTgC7zUn+kyuIMBxEvfsM0Pfdumr8VOrlidIJeTLBs029 +tYd/kVlaZfoDS/cSu8ezS7urwY/usHRuku6MUCs4L1WZlDptweGUCWsESpIEGF7d +ZJC8f6C8rwQimOyhhouVSlOh1NxC338TvQSflhRtWiP4lIgDVNMmb4MRDAUzbkrg +SH4+Oho7EeoK2VSmgqSAyIpOTBBhIcdEgib7Hz+Uq+Y6KekuD8Y8SslbXitBqgS3 +8uByucqDNovFjYNMKw== +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca.key b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca.key new file mode 100644 index 000000000..c88448798 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAuKZ43p+68AFTSVno4p8rxk2Ixngw9CR9K5/11LCS+nM9jPph +E0aWluwRAdWNs0CLN9A9ZKqJq+G67Y6CAw87g4RfUaZUkBkClwH76WUYzMTo9P3e +UMpwzVtdrijW2Y/9MnxMCyt14WVfoY0itjEyfeGT3FbP8yKuk2qYoz3YecrhiRGu +xv5gFZOd0hxIL0HkRe0lrpgWDj4MrvsBqUnb4aeqDgjz4lPUWo/GSvgU7xcV5Q6c +Ti8FB9k3HZfphb2JHydZfxIqCCeflZlAQBBKNevWbSxhfsfjzqHmC64M9ELoJ/1J +JFoX+zdlv9DqOn2k67N4rZcfptoEKPQThPmX+wIDAQABAoIBAAKsQOT9bEKmd5gr +jwLIyboYdo8OP6x+Yrl2RU62Mkp3JJWdLOPkilrWQUtvgJpEUuo/P27s8QqQkpjM +Y/a7X7cq/AjoiXd/L9m+aPEbSjKcnxVSH3Jim84ShoFLjLX1hErDddkA4LdJ37Us +9SsB/KMfLXlrtZ2QC6A3/iGkIYq8bhgNh7tRIJ/ZzsIOLk/rpd1R8LNbUPgRUXKn +xmvZC+gF69dg/Hx2fdc7ZYcTTA8UXwgWNKvej6XGlO/0FOx3duZkP4FAS8/c3O9K +WX481+jE+KiTiUKjRi5Et8sXPjpjrxxqnwhIQxoCGEc7hmgQSKcczDhKWbiKYxch +iJUlSZECgYEAxOsshOEH+WbB9oCGwE5ml7FbIeV3GA6x7MYlIijauGsGtRe0Y3e+ +fEXMfvEjCf2wXgS/NZ42nwxygXlt7cXgQxY05Cd6UTQm7QQMN/p++k97nbez4gXu +H2jDv3cp1o+HHIxC+l6psTFojf0F7IjUmUS/I8opLcH5Qz/wklTfFr0CgYEA8Az+ +8p6rZeETJ0QEnFwZmmV9I/90YO4zBjDTbHdnG/qLC6OIO3i2LyNcWZLRf5j0F0fH +jPx0qSyh4njkfA0eB3fRBkxQI5/UOf45hsOWfVb/wep9OC8gbXLwH8ulf4hguFTr +NbO1muMH+Wd0vFIgTo7+8jBGAPH7THbJp1LcERcCgYEAtOEs0JYABChOjIX8wRKD +DF3eTyF/JrsA1nQf0ePjHLk6vQecLC3m7DIVKJmJTPkZkxMbLD47ABen6qRGxL0Z +RaZVSSm2InccNmSccKMJFhYCf5opgrldxL4v6GSmKq9GNYPlViO0Dc5LsvkzAtWh +K3RJeq+M6boeo5ZM/a02CqUCgYEAqQdNogNuBw/kT7rKUFtdv+UEqAthpDs1/H72 +8KJnHUFfFTPyYoYOKsUIax5BnEpUXewyLCk78D+4QeMEaAFIitOogHnN2Dfb1XYY +hbdFST6wWphF0gEFIWxUft8Dyvh5unI5KRhAoS6J1+ksPsrJauevx1OMh2/clqvy +NHrY8MMCgYEAtMtBJRDYAstjc/kuHn+ccXUd4fy+xrhJRh9xVdvY65EFjVztfUFn +uDCkJ373lV7fhhWK4NJiowsYbY8XoGz/tU3hg3I6H2ekn+AHQ/ia+W2kte2WRx/l +dBEr4xL5HnxhtJD/tEysHKoR9HNlknAqUdLich6fL9uu+3TpJsFuAEM= +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca_chain.pem b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca_chain.pem new file mode 100644 index 000000000..7d500ef1b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/ca_chain.pem @@ -0,0 +1,40 @@ +-----BEGIN CERTIFICATE----- +MIIDMjCCAhqgAwIBAgIBCzANBgkqhkiG9w0BAQ0FADAcMRowGAYDVQQDDBFjYS5w +dW1hLmxvY2FsaG9zdDAeFw0yNDA4MDEwMDAwMDBaFw0yODA4MDEwMDAwMDBaMCYx +JDAiBgNVBAMMG2ludGVybWVkaWF0ZS5wdW1hLmxvY2FsaG9zdDCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKe0Vb0OyhNyWi4/u1jwLOkDYGx7+aXOJJyN +5ExJNS0goWy8+OQKVBwFgZZ5jqqfrXlLheOpAEpRm7TIJVTIwD1/gi/zrNN1ec3D +uJMBvlTzNVozfBncQ9EJ62JRC3I/56W1xYMekCIG2SjzSnAfNSojyN6SLxGmtT89 +xN6parFq/GcvEtssDRVVUm6pyIOgpDOgTuUcqT3qUgOkN9w+5ZgjKWAc1BuMipR9 +f8wwjuhXx2XlH2lodVWl94xo4ipY7sd2frF/oil9SXDC3ZrwGdkt9TQaCMYlt74V +yNxPAX5XwRH8b2OAsIGkPNh7TvIkKR33JkKcXXpZbxlRP6PDWR0CAwEAAaN1MHMw +HQYDVR0OBBYEFNVfAg8SLxqTN54wYWiWfxVic+iHMA4GA1UdDwEB/wQEAwICBDAT +BgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMEBTADAQH/MB8GA1UdIwQYMBaAFC4S +mXgLro3UDdm+hU5w7yTobbWdMA0GCSqGSIb3DQEBDQUAA4IBAQAOdOtD3py2k4OO +0Q5KvmvyybaeSlFPM6/iXNakmqJY3lELnJgjWYgBXlOK91M7F2xG8YRFoBrUipXd +rMAburoWjnKvMoPaGZqbpligfkPIueo+ON/Q27UhvH2EYFzTOGjfnCiSCzpekesC +32MhK7mB9Uzu9yxDgtRu0IRqCk9WbU35qDzx7mUEZwCHe11T6tV4k9ZHAvhqEvaq +sqAgA+OoXBev6wx1tWgiqf40vuKZBRutWUSTHgGIXqain2l5b7Jv6TkCNI4HttHN +43oME4M6P1/iH+l99Y7fZBkLGnkGYPiWXQH9NyydQVHOBm3dN26QJzAmV06ZLxEm +iX6YKGNd +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOTCCAiGgAwIBAgIBATANBgkqhkiG9w0BAQ0FADAcMRowGAYDVQQDDBFjYS5w +dW1hLmxvY2FsaG9zdDAeFw0yNDA4MDEwMDAwMDBaFw0yODA4MDEwMDAwMDBaMBwx +GjAYBgNVBAMMEWNhLnB1bWEubG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuKZ43p+68AFTSVno4p8rxk2Ixngw9CR9K5/11LCS+nM9jPph +E0aWluwRAdWNs0CLN9A9ZKqJq+G67Y6CAw87g4RfUaZUkBkClwH76WUYzMTo9P3e +UMpwzVtdrijW2Y/9MnxMCyt14WVfoY0itjEyfeGT3FbP8yKuk2qYoz3YecrhiRGu +xv5gFZOd0hxIL0HkRe0lrpgWDj4MrvsBqUnb4aeqDgjz4lPUWo/GSvgU7xcV5Q6c +Ti8FB9k3HZfphb2JHydZfxIqCCeflZlAQBBKNevWbSxhfsfjzqHmC64M9ELoJ/1J +JFoX+zdlv9DqOn2k67N4rZcfptoEKPQThPmX+wIDAQABo4GFMIGCMB0GA1UdDgQW +BBQuEpl4C66N1A3ZvoVOcO8k6G21nTAOBgNVHQ8BAf8EBAMCAgQwEwYDVR0lBAww +CgYIKwYBBQUHAwEwDAYDVR0TBAUwAwEB/zAuBgNVHSMEJzAloSCkHjAcMRowGAYD +VQQDDBFjYS5wdW1hLmxvY2FsaG9zdIIBATANBgkqhkiG9w0BAQ0FAAOCAQEAOy3A +9GPAQvTeqbmvMlkJh8jtTRUOKmcGUNPFQqJ4aTx/BXh8x415zjW9ZSjLN7ZSNxjJ +nFm1XeD9I1EB1JjXwnkNTgC7zUn+kyuIMBxEvfsM0Pfdumr8VOrlidIJeTLBs029 +tYd/kVlaZfoDS/cSu8ezS7urwY/usHRuku6MUCs4L1WZlDptweGUCWsESpIEGF7d +ZJC8f6C8rwQimOyhhouVSlOh1NxC338TvQSflhRtWiP4lIgDVNMmb4MRDAUzbkrg +SH4+Oho7EeoK2VSmgqSAyIpOTBBhIcdEgib7Hz+Uq+Y6KekuD8Y8SslbXitBqgS3 +8uByucqDNovFjYNMKw== +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert.crt b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert.crt new file mode 100644 index 000000000..8467b479c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIBbzANBgkqhkiG9w0BAQ0FADAmMSQwIgYDVQQDDBtpbnRl +cm1lZGlhdGUucHVtYS5sb2NhbGhvc3QwHhcNMjQwODExMDAwMDAwWhcNMjUwODEx +MDAwMDAwWjAeMRwwGgYDVQQDDBN0ZXN0LnB1bWEubG9jYWxob3N0MIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkH1hQqC0EFwYxi5+Ihqat9Yqebpk0A9x +ZzdO68eBSw4glnhh/+8VNB238q3nVD+0oL5oOBFVPAA34jUrrngS3LSl7tx4y1tj +jqn2LwKkdjAaaQn7uNKf/Ow6JX8dsaFpEUmabOzQBbhQtUDdbHmX1jgJYy8siSdU +ExXtNHAA+kqlif0Hcj1Xx8dlXQlLiDgdO1I5h84rjfdX+TUGwnJO5MkIZ5X5r6tE +hk8s/2dvbrU2AKcP+pTY4jw/fyDNd9q8OdW186RlBdS+uyX3D2hh+8aVLONQIJOx +wRyQaImYzHPWMDO6MuQ5/5LAxFFYWCGSLvUTdrillB0Snmppe9uGhwIDAQABo28w +bTAdBgNVHQ4EFgQUQT/iPOTx/zDEIgWHiraGJdrud74wCwYDVR0PBAQDAgbAMBMG +A1UdJQQMMAoGCCsGAQUFBwMBMAkGA1UdEwQCMAAwHwYDVR0jBBgwFoAU1V8CDxIv +GpM3njBhaJZ/FWJz6IcwDQYJKoZIhvcNAQENBQADggEBADHQ75un1UtxDI9oluXu +wEJL0515nIAKjaWFCl0Z/CGaAWkkJERk0JBxTe9J/MrYY68NYCCAusRoUoRbvcA2 +GQ8vKae5V6/md+npZpKQznmUk+nZMzNKr+QoMY/nvtLA5tZ85fsU40H4nbvb23EY +3o6IiEbLD0G3FbIFtkgQ7+j9w6bvvMwd+3Ed0jSjRl4wKi4cXgD1UIyce/MVzAzT +bXW2SLWjfRzcTsrAnrbno5S3g+KMD1w9zGNppYiFH0zydpatKjJW18r/6XgJqoim +Rl5OGVnMNgIAw+S8/G3+/IIPXWHFE29R3j3oXjm/F8570TpQTYyx8wOmEr+zJPv0 +8gU= +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert.key b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert.key new file mode 100644 index 000000000..ed2bd0d82 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAkH1hQqC0EFwYxi5+Ihqat9Yqebpk0A9xZzdO68eBSw4glnhh +/+8VNB238q3nVD+0oL5oOBFVPAA34jUrrngS3LSl7tx4y1tjjqn2LwKkdjAaaQn7 +uNKf/Ow6JX8dsaFpEUmabOzQBbhQtUDdbHmX1jgJYy8siSdUExXtNHAA+kqlif0H +cj1Xx8dlXQlLiDgdO1I5h84rjfdX+TUGwnJO5MkIZ5X5r6tEhk8s/2dvbrU2AKcP ++pTY4jw/fyDNd9q8OdW186RlBdS+uyX3D2hh+8aVLONQIJOxwRyQaImYzHPWMDO6 +MuQ5/5LAxFFYWCGSLvUTdrillB0Snmppe9uGhwIDAQABAoIBAAP+IvjnXfJxyRF4 +6W1h1NKtrWDNwMlrE9QpWBn12mHLkyT1UKf04F4fUBz5p0HRvNkQSer6t6M/0kuC +8qEQmlzU9RzjpMbh0J95nN7xsL+/Vot+hiE2WMtxb63nlFd7QV8D7KuD47+0ty47 +C6NavNJjnAbMn+tuVcltOq/PRIemC8j2IypizQfeOh/1TKtetotPkCiIZnl32UJe +TH2Z3y7Ig9N6bUerYFUkH1Olri2SvZf20Xy1gpARqh2kRz+VaCZjEbAubSu4GOtG +xMQq7ZL4GYjYQt3e0rOHeFCMc6N2UdzC5vW8dRNBLRWbUCaqqCoe9iYgkzNMqhny +rvUhdxkCgYEAxW/4LoF0piecrUAXoqd2K+ojAvYqExgsO0BMdphGt//lQu9bl6qo +kZDGwHZuDweFrdRA0p85FhwZqpv26kri5MAsuywr5f7OUF9ZJM7GusGoU3ND78yO +NYJOhwV+MTksTTyQW3MGGM5HisEvW2m+s1SpNTQh6wO7TuKk/5dUvQsCgYEAu1jv +M55gdC56ckF+7ehWPV1TedeIz/Y+Ezh55kQhBKMPOT48QTX4GM4rxKJGuWxJPE5E +ZCfnhp1kSYAEBNalm9nk56kP+4soRFKDu6Tl0xAKHNahQTX0uWdZuIb7i8Z1AakI +w3KghinEKnOZC8bVQNhw3a6gJ0lMOLEO7u/qsfUCgYEAsklF51jnKRMkHB6AOVTX +FRi8vPPlUpDFgEuzwHQJiCX/tM3H+9ETnz8I+sr234N/Z1T/pDI7rod5deKYuUEe +WkpfGjXs/id1zoYLTX2bXxF5GsI/Ws9wq0kb+Dkxlul149kI0kuv+b5sRBPm27lF +h1Kv6WdGKZ6ZCccID/Vrg7kCgYAic9RpVbK/KtzxrgG9loBlgIgdW/mY5Nu9hSFF +qzm3NJVUHZHPHDGnviJmYsFG0BcXs/z3vi7hnogWzCfxC9ggtQ1XXMvHomuhRNtc +4pIxIj35166H43hBsDPbr1cK+VADIiauaqICiiO8+s0mUUZeSm9XFQn2r5AIzObG +IfCzEQKBgHpa6qjC9HMG1JFfM/I9piFPurtTVcdA+o8fPymUERX4S6B9H4vUY2L/ +jgR/ckxJbje7OU9TkBBKQPDLXXvm2Ut2qe20FrrQhDVazAtgPm4XonRivc0+SMop +e4omuh48CV4OUKYdYFNsswZhUnuiJNYnZ7oQSC/e3HCWooVDUJkz +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert_chain.pem b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert_chain.pem new file mode 100644 index 000000000..5f391f3e3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/cert_chain.pem @@ -0,0 +1,60 @@ +-----BEGIN CERTIFICATE----- +MIIDLjCCAhagAwIBAgIBbzANBgkqhkiG9w0BAQ0FADAmMSQwIgYDVQQDDBtpbnRl +cm1lZGlhdGUucHVtYS5sb2NhbGhvc3QwHhcNMjQwODExMDAwMDAwWhcNMjUwODEx +MDAwMDAwWjAeMRwwGgYDVQQDDBN0ZXN0LnB1bWEubG9jYWxob3N0MIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAkH1hQqC0EFwYxi5+Ihqat9Yqebpk0A9x +ZzdO68eBSw4glnhh/+8VNB238q3nVD+0oL5oOBFVPAA34jUrrngS3LSl7tx4y1tj +jqn2LwKkdjAaaQn7uNKf/Ow6JX8dsaFpEUmabOzQBbhQtUDdbHmX1jgJYy8siSdU +ExXtNHAA+kqlif0Hcj1Xx8dlXQlLiDgdO1I5h84rjfdX+TUGwnJO5MkIZ5X5r6tE +hk8s/2dvbrU2AKcP+pTY4jw/fyDNd9q8OdW186RlBdS+uyX3D2hh+8aVLONQIJOx +wRyQaImYzHPWMDO6MuQ5/5LAxFFYWCGSLvUTdrillB0Snmppe9uGhwIDAQABo28w +bTAdBgNVHQ4EFgQUQT/iPOTx/zDEIgWHiraGJdrud74wCwYDVR0PBAQDAgbAMBMG +A1UdJQQMMAoGCCsGAQUFBwMBMAkGA1UdEwQCMAAwHwYDVR0jBBgwFoAU1V8CDxIv +GpM3njBhaJZ/FWJz6IcwDQYJKoZIhvcNAQENBQADggEBADHQ75un1UtxDI9oluXu +wEJL0515nIAKjaWFCl0Z/CGaAWkkJERk0JBxTe9J/MrYY68NYCCAusRoUoRbvcA2 +GQ8vKae5V6/md+npZpKQznmUk+nZMzNKr+QoMY/nvtLA5tZ85fsU40H4nbvb23EY +3o6IiEbLD0G3FbIFtkgQ7+j9w6bvvMwd+3Ed0jSjRl4wKi4cXgD1UIyce/MVzAzT +bXW2SLWjfRzcTsrAnrbno5S3g+KMD1w9zGNppYiFH0zydpatKjJW18r/6XgJqoim +Rl5OGVnMNgIAw+S8/G3+/IIPXWHFE29R3j3oXjm/F8570TpQTYyx8wOmEr+zJPv0 +8gU= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDMjCCAhqgAwIBAgIBCzANBgkqhkiG9w0BAQ0FADAcMRowGAYDVQQDDBFjYS5w +dW1hLmxvY2FsaG9zdDAeFw0yNDA4MDEwMDAwMDBaFw0yODA4MDEwMDAwMDBaMCYx +JDAiBgNVBAMMG2ludGVybWVkaWF0ZS5wdW1hLmxvY2FsaG9zdDCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKe0Vb0OyhNyWi4/u1jwLOkDYGx7+aXOJJyN +5ExJNS0goWy8+OQKVBwFgZZ5jqqfrXlLheOpAEpRm7TIJVTIwD1/gi/zrNN1ec3D +uJMBvlTzNVozfBncQ9EJ62JRC3I/56W1xYMekCIG2SjzSnAfNSojyN6SLxGmtT89 +xN6parFq/GcvEtssDRVVUm6pyIOgpDOgTuUcqT3qUgOkN9w+5ZgjKWAc1BuMipR9 +f8wwjuhXx2XlH2lodVWl94xo4ipY7sd2frF/oil9SXDC3ZrwGdkt9TQaCMYlt74V +yNxPAX5XwRH8b2OAsIGkPNh7TvIkKR33JkKcXXpZbxlRP6PDWR0CAwEAAaN1MHMw +HQYDVR0OBBYEFNVfAg8SLxqTN54wYWiWfxVic+iHMA4GA1UdDwEB/wQEAwICBDAT +BgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMEBTADAQH/MB8GA1UdIwQYMBaAFC4S +mXgLro3UDdm+hU5w7yTobbWdMA0GCSqGSIb3DQEBDQUAA4IBAQAOdOtD3py2k4OO +0Q5KvmvyybaeSlFPM6/iXNakmqJY3lELnJgjWYgBXlOK91M7F2xG8YRFoBrUipXd +rMAburoWjnKvMoPaGZqbpligfkPIueo+ON/Q27UhvH2EYFzTOGjfnCiSCzpekesC +32MhK7mB9Uzu9yxDgtRu0IRqCk9WbU35qDzx7mUEZwCHe11T6tV4k9ZHAvhqEvaq +sqAgA+OoXBev6wx1tWgiqf40vuKZBRutWUSTHgGIXqain2l5b7Jv6TkCNI4HttHN +43oME4M6P1/iH+l99Y7fZBkLGnkGYPiWXQH9NyydQVHOBm3dN26QJzAmV06ZLxEm +iX6YKGNd +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOTCCAiGgAwIBAgIBATANBgkqhkiG9w0BAQ0FADAcMRowGAYDVQQDDBFjYS5w +dW1hLmxvY2FsaG9zdDAeFw0yNDA4MDEwMDAwMDBaFw0yODA4MDEwMDAwMDBaMBwx +GjAYBgNVBAMMEWNhLnB1bWEubG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAuKZ43p+68AFTSVno4p8rxk2Ixngw9CR9K5/11LCS+nM9jPph +E0aWluwRAdWNs0CLN9A9ZKqJq+G67Y6CAw87g4RfUaZUkBkClwH76WUYzMTo9P3e +UMpwzVtdrijW2Y/9MnxMCyt14WVfoY0itjEyfeGT3FbP8yKuk2qYoz3YecrhiRGu +xv5gFZOd0hxIL0HkRe0lrpgWDj4MrvsBqUnb4aeqDgjz4lPUWo/GSvgU7xcV5Q6c +Ti8FB9k3HZfphb2JHydZfxIqCCeflZlAQBBKNevWbSxhfsfjzqHmC64M9ELoJ/1J +JFoX+zdlv9DqOn2k67N4rZcfptoEKPQThPmX+wIDAQABo4GFMIGCMB0GA1UdDgQW +BBQuEpl4C66N1A3ZvoVOcO8k6G21nTAOBgNVHQ8BAf8EBAMCAgQwEwYDVR0lBAww +CgYIKwYBBQUHAwEwDAYDVR0TBAUwAwEB/zAuBgNVHSMEJzAloSCkHjAcMRowGAYD +VQQDDBFjYS5wdW1hLmxvY2FsaG9zdIIBATANBgkqhkiG9w0BAQ0FAAOCAQEAOy3A +9GPAQvTeqbmvMlkJh8jtTRUOKmcGUNPFQqJ4aTx/BXh8x415zjW9ZSjLN7ZSNxjJ +nFm1XeD9I1EB1JjXwnkNTgC7zUn+kyuIMBxEvfsM0Pfdumr8VOrlidIJeTLBs029 +tYd/kVlaZfoDS/cSu8ezS7urwY/usHRuku6MUCs4L1WZlDptweGUCWsESpIEGF7d +ZJC8f6C8rwQimOyhhouVSlOh1NxC338TvQSflhRtWiP4lIgDVNMmb4MRDAUzbkrg +SH4+Oho7EeoK2VSmgqSAyIpOTBBhIcdEgib7Hz+Uq+Y6KekuD8Y8SslbXitBqgS3 +8uByucqDNovFjYNMKw== +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/intermediate.crt b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/intermediate.crt new file mode 100644 index 000000000..cc9895e45 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/intermediate.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMjCCAhqgAwIBAgIBCzANBgkqhkiG9w0BAQ0FADAcMRowGAYDVQQDDBFjYS5w +dW1hLmxvY2FsaG9zdDAeFw0yNDA4MDEwMDAwMDBaFw0yODA4MDEwMDAwMDBaMCYx +JDAiBgNVBAMMG2ludGVybWVkaWF0ZS5wdW1hLmxvY2FsaG9zdDCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKe0Vb0OyhNyWi4/u1jwLOkDYGx7+aXOJJyN +5ExJNS0goWy8+OQKVBwFgZZ5jqqfrXlLheOpAEpRm7TIJVTIwD1/gi/zrNN1ec3D +uJMBvlTzNVozfBncQ9EJ62JRC3I/56W1xYMekCIG2SjzSnAfNSojyN6SLxGmtT89 +xN6parFq/GcvEtssDRVVUm6pyIOgpDOgTuUcqT3qUgOkN9w+5ZgjKWAc1BuMipR9 +f8wwjuhXx2XlH2lodVWl94xo4ipY7sd2frF/oil9SXDC3ZrwGdkt9TQaCMYlt74V +yNxPAX5XwRH8b2OAsIGkPNh7TvIkKR33JkKcXXpZbxlRP6PDWR0CAwEAAaN1MHMw +HQYDVR0OBBYEFNVfAg8SLxqTN54wYWiWfxVic+iHMA4GA1UdDwEB/wQEAwICBDAT +BgNVHSUEDDAKBggrBgEFBQcDATAMBgNVHRMEBTADAQH/MB8GA1UdIwQYMBaAFC4S +mXgLro3UDdm+hU5w7yTobbWdMA0GCSqGSIb3DQEBDQUAA4IBAQAOdOtD3py2k4OO +0Q5KvmvyybaeSlFPM6/iXNakmqJY3lELnJgjWYgBXlOK91M7F2xG8YRFoBrUipXd +rMAburoWjnKvMoPaGZqbpligfkPIueo+ON/Q27UhvH2EYFzTOGjfnCiSCzpekesC +32MhK7mB9Uzu9yxDgtRu0IRqCk9WbU35qDzx7mUEZwCHe11T6tV4k9ZHAvhqEvaq +sqAgA+OoXBev6wx1tWgiqf40vuKZBRutWUSTHgGIXqain2l5b7Jv6TkCNI4HttHN +43oME4M6P1/iH+l99Y7fZBkLGnkGYPiWXQH9NyydQVHOBm3dN26QJzAmV06ZLxEm +iX6YKGNd +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/intermediate.key b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/intermediate.key new file mode 100644 index 000000000..92699491a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/chain_cert/intermediate.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAp7RVvQ7KE3JaLj+7WPAs6QNgbHv5pc4knI3kTEk1LSChbLz4 +5ApUHAWBlnmOqp+teUuF46kASlGbtMglVMjAPX+CL/Os03V5zcO4kwG+VPM1WjN8 +GdxD0QnrYlELcj/npbXFgx6QIgbZKPNKcB81KiPI3pIvEaa1Pz3E3qlqsWr8Zy8S +2ywNFVVSbqnIg6CkM6BO5RypPepSA6Q33D7lmCMpYBzUG4yKlH1/zDCO6FfHZeUf +aWh1VaX3jGjiKljux3Z+sX+iKX1JcMLdmvAZ2S31NBoIxiW3vhXI3E8BflfBEfxv +Y4CwgaQ82HtO8iQpHfcmQpxdellvGVE/o8NZHQIDAQABAoIBAACPItwLARR4hKFz +1/GPd27dcoRaE3aAjfGxrWmtss4wNimRJblI6YfgYtK4HAgomSD25f7g0N3ukLjV +E/7I3o817h6vzsszsJDFjsd6sYJB/1MKKedCKz22rUC6KXIbkW+lf/+UagGbCT1Q +NlbSW9CEpwZK8Xo7Fo6b65ycbv0hb0yJgPkATQQ2CIKW8fEp6CV6yJu93HprgMeL +sUhyXtyPQBDdNJ/Nr+VWaM91/c8VUSzPwY7Ycw7LZlBKIqNA4TqYBEzkEUZpqPMU +xdNm88zL3Hr15AKCH7NJfbh/rpa3DrDN5IfJ0Bg40TCXhjUIbOQ5LeYxcP55JrxO +UG8oBFcCgYEA3TqNSE4Dc5xGCQSJfEeai/ztk8/XOGJA7pGjjrhbmOeQ9W71vITV +5WGggrPoLNrbe0Ic53qDwePTkN6e0j5qDdV4eAGO8p09KRMykJdhKxUP17OiBVvK +QlsFzknw+uznqegU0XofPQ3maiNcmqOSVZHTzoQxFKaFaEzl/GZnmu8CgYEAwhAo +hD+Cy0DDdTTeplJ2NQ0v/dYKBCdA4VaAkYtlkxGvWLbW2XtmvcYhWvoNfFOiqIwj +LBkQKhQkScCYWnM8OgUJR9ISd+s6bf78yQBNq7Eux5ijZfNH4d3b1Rd18QOABJRs +yjimIM/RQh5Qu9sCZBGezjo2V5Kh/OPLDfcePLMCgYEAsYm1rBr/Zh45ji8X7qa/ +/ivitUaMn3EqTsqOCZEl4RWlHXIIv1uTFFw+peWfI7AXh1lNPO7zIfyl4pLlvi/c +iHGSFiyZ/VX2qSoA4Ey77AkhB6ffFJHPRWUqyZxJSfYpDA919Y4d1696BB+Qx4ER +hJ5c7lTj1oC0ezmLL/02k/0CgYA9fyFXx0iLYFqQKdDko/0RkrxClDFYX5gP0Hmt +pZ7qnBAU7Nvoz1IfkUnAqrzbSSWig8Yhk0+sBfX56S5ee2X6fN4UBF4V4lUBxmwU +xiPW5K8OUTB2fkGpMcUYIfceCR3oVaCj9I+wliqZ6UFsRlrbmznX4FqYJb9vhEA0 +jLroQQKBgHnopmgOQAtQRkF0c2wnby4xrNjkP86q/DfwCpfyNQ9QMREfz6UwZH4w +qqQ6vWpJWVyjH3650PP+OYRsJmjRYVZ9b22K3+KwRjyErYlhsU7UOkiC4FRiYojB +hN4kzsrk2R9DM8mwYI7+vgOkmV/nwdMA6LdBmenz/YwdqTXAToUy +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca.crt b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca.crt new file mode 100644 index 000000000..c3d39dc71 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDDjCCAfagAwIBAgIBAzANBgkqhkiG9w0BAQsFADA4MRMwEQYKCZImiZPyLGQB +GRYDbmV0MRQwEgYKCZImiZPyLGQBGRYEcHVtYTELMAkGA1UEAwwCQ0EwHhcNMjQw +ODAxMDAwMDAwWhcNMjgwODAxMDAwMDAwWjA4MRMwEQYKCZImiZPyLGQBGRYDbmV0 +MRQwEgYKCZImiZPyLGQBGRYEcHVtYTELMAkGA1UEAwwCQ0EwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDPVFa7U4cqfq2Nux7VuXIUhYtaiQwxEEZuDvGI +tlSVzpENnqFLbqH0QsOuxGgIZofsrSf4BrCzh4AoL3qY+HgVRCmkD09FnoWjFTfT +X6v7QaCtUsrb7RNASrrgyP7rB9y7sp9oc8FNJQA3gtJQxghffO9zGhnEvPt8EGGU +axh29BYVxvQpX1Av0SkYjTgpRus1s7r50tpoDHXhKyAHP6WEEVaN/8x1p9eXqVCT +XIhlJJ+dgwZz/Hfnb6gr97W5G16Ceo7ybS8syKBJf2D20DOL8kLAiNpjWm9spoJi +oxHvFMdDB3nUqZyRO+QU8gSoQBVYYqRlEnOXvouzqslUVdbLAgMBAAGjIzAhMA8G +A1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IB +AQC9iO5jvBDNgDmUq8+NfsAZngWLOG7IR+Ez5h3CzGU9yzcJgn8y6PMjzhgc20le +WYzAHEG/KXFIfubUW/9iA8E6We0hm0LM1s9o7A9JAqsHcZ/pK2QmDW71JupU8o1z +3Qcu7jRzPc7zYgNwaD4/ccpJTAPGg6keV3YGN2Z5RpWykOFzP7WswaMIcKQE869c +Tyi8i71XkVrL+xx2pRiiNDOmfMkmm8TZ9tBP7n6BcRd556TTIxhjONvmamCraBQP +msPnMtb8+H3/IsMsUw02/fQTJ91FVa3t914Y24+q5xmkasyrhinTNmsa/S939P6I +JNNoAM6TUZrskKPtb/pMzaqn +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca.key b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca.key new file mode 100644 index 000000000..c689bd47d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAz1RWu1OHKn6tjbse1blyFIWLWokMMRBGbg7xiLZUlc6RDZ6h +S26h9ELDrsRoCGaH7K0n+Aaws4eAKC96mPh4FUQppA9PRZ6FoxU301+r+0GgrVLK +2+0TQEq64Mj+6wfcu7KfaHPBTSUAN4LSUMYIX3zvcxoZxLz7fBBhlGsYdvQWFcb0 +KV9QL9EpGI04KUbrNbO6+dLaaAx14SsgBz+lhBFWjf/MdafXl6lQk1yIZSSfnYMG +c/x352+oK/e1uRtegnqO8m0vLMigSX9g9tAzi/JCwIjaY1pvbKaCYqMR7xTHQwd5 +1KmckTvkFPIEqEAVWGKkZRJzl76Ls6rJVFXWywIDAQABAoIBAEBT3K02l2OZ9mNZ +QZHyffDcFoa6quu2LwQK4uoWGFx6wQrGwyjSZLGS3YAmNFkW5umKODJ+cOjjC+Js +CM/Zl/iRM5JRpbjmHHSjKrUMpCHLFP1yCLr/7c8u4JjKBD4MLHKYU1lnQPyclFqk +3nhtWT+3VdwSpxghwq2Vr5ELQYX2WoHBDKj67yb2+yRVewjrmWd0f3Lm3KrPYDy2 +McOOOB6HcZNsyXDihPYviCOmthv0AEb+HUmEwYn5A/TcTL1IVpt8YG51/VrYWysl +A4rD3t6L5yp3hTv05dezm+hU88hSv8w2M0CQDqrxVN6zMyQ7ixj4cCbTqBJwVjmj +IxkWyzECgYEA2My20inP2nai+ANkY56QF+2Mvof1qEpZh9J7oPjzStG3jqfjqVWT +ZkY25SLLGO7SB06XRAgFVrq5Xyb72pXENhV2KUzOHN+osERaAyZroFlRbJxfCL8s +p24JKVN7oJPDOJnYYv/cEEKAT1/ocxKHbi6oDZb/tJA7UHfT3NBIuV0CgYEA9NFD +uGuV53XfuyaGh/dHurGBKu8HBjt16COasSqMxH3U7rb4JRF56wB2pVAAE8AFivNd +A2om4b0CQK/F7TJvQY6lBKUcy4lOshwoYR2Ga7x58BJNbS6Da1OTwmk12BKxt10x +NgThGNjRL/A7x1M2r1CPvp9+WiJF9hdWWRy3RkcCgYBX1rGn+wrdD7490FoHRsJI +/5VLyRhOJDmcHzq54MPqJ4hAg2NpQ2PiR0aMlglFw9VS4pPFG57WIfdjRw854/5I +j299kkHcTv4w1LDHn/RxVl74K28Rwytbdui9Vra7O8MYFrUmf1XxPJByGIC8xuAa +U0sBnpFh3iI6GyK6z6T3fQKBgQCWofvNCUVe+UQD3xx/pELaOEWFqTTlPMi7YMzZ +nZinfdT6XFIqXKprUgmuhZ7GiCvg5wSgCTyLQMSWEJDz9UhHSgF27jDde0qUhIbD +2mrO91T/xgMl3qy16OLNh/a9UCNQ82JIHpBxvVZY7LObXTQUgrkvTOttghER7iIP +CoAqGQKBgQC+26VjLWtLGRqJvWnvDW5jS3/lFB/AZPvfeLNsSs4G9Fgrqx6qNoQu +dVI39k/kMLSqLP1iMIR5cbujF5akpWTX0dJReoeOYXJuf9hkm1+kWNiHBbNTbfqN +htxGidymslRrSsOv9zJys5zosfHvbcKbi1dPLlV+gPN1v4eXjtLOLw== +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca_store.jks b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca_store.jks new file mode 100644 index 000000000..f52292602 Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca_store.jks differ diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca_store.p12 b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca_store.p12 new file mode 100644 index 000000000..2af10d994 Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/ca_store.p12 differ diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client.crt b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client.crt new file mode 100644 index 000000000..79e7f6fca --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDBDCCAeygAwIBAgIBCzANBgkqhkiG9w0BAQsFADA4MRMwEQYKCZImiZPyLGQB +GRYDbmV0MRQwEgYKCZImiZPyLGQBGRYEcHVtYTELMAkGA1UEAwwCQ0EwHhcNMjQw +ODAxMDAwMDAwWhcNMjgwODAxMDAwMDAwWjA/MRMwEQYKCZImiZPyLGQBGRYDbmV0 +MRQwEgYKCZImiZPyLGQBGRYEcHVtYTESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzl14vsZCU7E1qf/a4Yxs/SbIf/Pz +PW3Thbhno3JAbsTUpf9zkdDRB8fCn+R8SnJVZhCRYhUA3lNdZIskQp1h2Ix9TZxj +QSo29ixzik0fTd7nd4KC8WOpZUyDs0U2ugmqomJXP0e/AiFpN+3HmgTWJpDx2oET +Lcdl70+wRBkF2FDRJU/5KvArUnaGXrqW645GH3iXMadQRpEWBxFP/+aiz0FXqd9Z +IWSoobf03dY/tIaxCrFe2+yljBB6eI/oHWs7Y861JN+nRcJrtp4LmeCdDXzQN7/y +xDg+NVQy0sEltIMTgMyoIsb1UPD+IkepT9GKd09M6p63BX0SXr5YPhz9iQIDAQAB +oxIwEDAOBgNVHQ8BAf8EBAMCBLAwDQYJKoZIhvcNAQELBQADggEBAHKYzIAjmllB +n+CRjYjtwPrkVT8azecL9efdBvE90KkGnIvRzNe4vQdH0mT4R+RUOXO4mbhAT6tX +zOpO2gPldBAQ1GAcjs1XJPGxSqKUO8B69Nv3mXqwR3FKR4ZGseC5MXjQb5dZc3An +NJZy6t+7gO0nDfV361JJjXvNdHd0cfx0aCWdTgQWxt9RSnH3QxvVbdXLAPvYyb1g +Ueao9M5PMMx5cAgq4ORM+TxPJxTRj2BuRj5coHsPn7R4dkwWP59+h6Vgdf0AmeUG +Jmdrul5ESeACbs2GixZdtqLT9QUJhTAFaXpJd3IUYyML1V7qRk1FJqRvatYxhBxQ +gjZp0ouJRCY= +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client.key b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client.key new file mode 100644 index 000000000..371a4a152 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzl14vsZCU7E1qf/a4Yxs/SbIf/PzPW3Thbhno3JAbsTUpf9z +kdDRB8fCn+R8SnJVZhCRYhUA3lNdZIskQp1h2Ix9TZxjQSo29ixzik0fTd7nd4KC +8WOpZUyDs0U2ugmqomJXP0e/AiFpN+3HmgTWJpDx2oETLcdl70+wRBkF2FDRJU/5 +KvArUnaGXrqW645GH3iXMadQRpEWBxFP/+aiz0FXqd9ZIWSoobf03dY/tIaxCrFe +2+yljBB6eI/oHWs7Y861JN+nRcJrtp4LmeCdDXzQN7/yxDg+NVQy0sEltIMTgMyo +Isb1UPD+IkepT9GKd09M6p63BX0SXr5YPhz9iQIDAQABAoIBAB+oJB0z4bnf/ej5 +uFSQS6nb+Lb0L9S1E9U4ECH5JVw4spxSXyK3w1ErujX4AX8UIfiZwwumSC9S34lM +dQAJD5j+GDy/KFS3Sp4h/IlzQkGaV6y0Jy0rijFhdvSZwgbw862cY2DwOyNzB6aO +QeflJQreglNwKIaFx6PZ+CLCKezgVkSmRDSjYWv/qYkpZmdEjCsI7FrYgxwmkLYr +fO7bisPaW7vYF6S48y9Lv/e9K7qlOz4W7aAHAUUWd72271o5dJQrN5jlj/tKgBww ++rDV1MsKNi1oqxjZnQ7rrJ/AAsc3FkaWPmUluhy9XbaZKSFJIC+TSV86US/I5QVW +Kets+9kCgYEA19mtBjbBeZFCvR/iaFtvhlslkLiS0hRcEKO+JaRQhTB7VWTqeIv2 +Y1bEFh1t/8NinxtPCDi2Pl57M5ffKnauxl8QsggLebTFdwnj3kFmc9g0XFlpErYe +xHUcJ4UXpubGZNdnau8GoUGby/zY/yWX7tGsbHBBjwwvS5AvkbYEhi0CgYEA9MAh +473dsUynCMTzdvPC1tkC5MPLTGkJ89zoxnRgQbfAEtdTpBloNnDu+XAajEL0/YsN +cxGohipESzBs+uM68M62m7BJEpDV9eRgRH80/4O2GBTPbutJAYxbJRMazvHzMA0p +ed9Ss0HNcyFzTlgNpUEk6LCPB3Q6ZBCx9b06ak0CgYA2Hq3zUOdRw8ooAUaTQ9nC +pjfVAXJOmiw7P9L9PSWzP2StFJB1iSJN5gNkSBy8dBDWM9KtDQskuCvL09hwnTFV +K6DUApBeA6U/I4uuvlch1KPMQ97EgkVUnMhiYEx93H8B6cBUX3oaFIkrrEn8L0DY +8Fl7FupZI4T4PT6wGiMoQQKBgFZZmxtQl3DATIZJhmRHaEyIz/6KHaXdwUcqf23V +DUjiXX/IPJg+5oeenJGgvZ0S2oDBmIMqjbyYGPQ2J5z0ooznQYH4Vi+YrgRNCLHh +937Wp14jqXPGy0Eild2kueadcmFXo3R+GB1qb+PYv2e6pUXSkAUQlcXZeehoA4Fh +u8RZAoGBAMNlsWXiTAZkXCQnHy7hEt3rONumzHIwBhWrMoNRQAbZSM/WWEtZuMso +QvhR9Fmz/fiaVvBcXzBui6ffkoSsyvyhigfaiF1AwhW1S1JMXttEiGaFO7pgysmw +26goSWo9KKpTxUcAW33qKz1HBb6a51CeucpeO3PmQRUJ5CZmLG2Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_expired.crt b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_expired.crt new file mode 100644 index 000000000..6a6fefa83 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_expired.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDBDCCAeygAwIBAgIBFzANBgkqhkiG9w0BAQsFADA4MRMwEQYKCZImiZPyLGQB +GRYDbmV0MRQwEgYKCZImiZPyLGQBGRYEcHVtYTELMAkGA1UEAwwCQ0EwHhcNMjMw +ODAxMDAwMDAwWhcNMjQwODAxMDAwMDAwWjA/MRMwEQYKCZImiZPyLGQBGRYDbmV0 +MRQwEgYKCZImiZPyLGQBGRYEcHVtYTESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzl14vsZCU7E1qf/a4Yxs/SbIf/Pz +PW3Thbhno3JAbsTUpf9zkdDRB8fCn+R8SnJVZhCRYhUA3lNdZIskQp1h2Ix9TZxj +QSo29ixzik0fTd7nd4KC8WOpZUyDs0U2ugmqomJXP0e/AiFpN+3HmgTWJpDx2oET +Lcdl70+wRBkF2FDRJU/5KvArUnaGXrqW645GH3iXMadQRpEWBxFP/+aiz0FXqd9Z +IWSoobf03dY/tIaxCrFe2+yljBB6eI/oHWs7Y861JN+nRcJrtp4LmeCdDXzQN7/y +xDg+NVQy0sEltIMTgMyoIsb1UPD+IkepT9GKd09M6p63BX0SXr5YPhz9iQIDAQAB +oxIwEDAOBgNVHQ8BAf8EBAMCBLAwDQYJKoZIhvcNAQELBQADggEBAHclwiffmiFl +uMlhnVbcw5UILFnZLhrT5Ufxg+dpPVpd0s6vZ/emw0QgQHSUwBfo0l4De3rA4ZfK +wqXv87c38tA9fZ6Schj+0m/pGFbixX3ptw9jm05V79broPgzSflEUKpJiyYJmjnK +3ZfN1JoxYsd3WUrcbxnoOhNEbHuImyFn6sd1Aw7o4+Zs7GcKvKfY5n+anATy42bL +ODnB/XJf8sZtevb57ceiKGIoGbVzrdi62QY3uQS8Jv1CAVzGHogW8mAw1yjE94v0 +vcV7U4/v2KNt2UqWLHjIpeefnN+7scbX4YAz+UXbBZ+1DwAgSZq+oyzrBn2RKggM +LSUeS8vdfds= +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_expired.key b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_expired.key new file mode 100644 index 000000000..371a4a152 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_expired.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzl14vsZCU7E1qf/a4Yxs/SbIf/PzPW3Thbhno3JAbsTUpf9z +kdDRB8fCn+R8SnJVZhCRYhUA3lNdZIskQp1h2Ix9TZxjQSo29ixzik0fTd7nd4KC +8WOpZUyDs0U2ugmqomJXP0e/AiFpN+3HmgTWJpDx2oETLcdl70+wRBkF2FDRJU/5 +KvArUnaGXrqW645GH3iXMadQRpEWBxFP/+aiz0FXqd9ZIWSoobf03dY/tIaxCrFe +2+yljBB6eI/oHWs7Y861JN+nRcJrtp4LmeCdDXzQN7/yxDg+NVQy0sEltIMTgMyo +Isb1UPD+IkepT9GKd09M6p63BX0SXr5YPhz9iQIDAQABAoIBAB+oJB0z4bnf/ej5 +uFSQS6nb+Lb0L9S1E9U4ECH5JVw4spxSXyK3w1ErujX4AX8UIfiZwwumSC9S34lM +dQAJD5j+GDy/KFS3Sp4h/IlzQkGaV6y0Jy0rijFhdvSZwgbw862cY2DwOyNzB6aO +QeflJQreglNwKIaFx6PZ+CLCKezgVkSmRDSjYWv/qYkpZmdEjCsI7FrYgxwmkLYr +fO7bisPaW7vYF6S48y9Lv/e9K7qlOz4W7aAHAUUWd72271o5dJQrN5jlj/tKgBww ++rDV1MsKNi1oqxjZnQ7rrJ/AAsc3FkaWPmUluhy9XbaZKSFJIC+TSV86US/I5QVW +Kets+9kCgYEA19mtBjbBeZFCvR/iaFtvhlslkLiS0hRcEKO+JaRQhTB7VWTqeIv2 +Y1bEFh1t/8NinxtPCDi2Pl57M5ffKnauxl8QsggLebTFdwnj3kFmc9g0XFlpErYe +xHUcJ4UXpubGZNdnau8GoUGby/zY/yWX7tGsbHBBjwwvS5AvkbYEhi0CgYEA9MAh +473dsUynCMTzdvPC1tkC5MPLTGkJ89zoxnRgQbfAEtdTpBloNnDu+XAajEL0/YsN +cxGohipESzBs+uM68M62m7BJEpDV9eRgRH80/4O2GBTPbutJAYxbJRMazvHzMA0p +ed9Ss0HNcyFzTlgNpUEk6LCPB3Q6ZBCx9b06ak0CgYA2Hq3zUOdRw8ooAUaTQ9nC +pjfVAXJOmiw7P9L9PSWzP2StFJB1iSJN5gNkSBy8dBDWM9KtDQskuCvL09hwnTFV +K6DUApBeA6U/I4uuvlch1KPMQ97EgkVUnMhiYEx93H8B6cBUX3oaFIkrrEn8L0DY +8Fl7FupZI4T4PT6wGiMoQQKBgFZZmxtQl3DATIZJhmRHaEyIz/6KHaXdwUcqf23V +DUjiXX/IPJg+5oeenJGgvZ0S2oDBmIMqjbyYGPQ2J5z0ooznQYH4Vi+YrgRNCLHh +937Wp14jqXPGy0Eild2kueadcmFXo3R+GB1qb+PYv2e6pUXSkAUQlcXZeehoA4Fh +u8RZAoGBAMNlsWXiTAZkXCQnHy7hEt3rONumzHIwBhWrMoNRQAbZSM/WWEtZuMso +QvhR9Fmz/fiaVvBcXzBui6ffkoSsyvyhigfaiF1AwhW1S1JMXttEiGaFO7pgysmw +26goSWo9KKpTxUcAW33qKz1HBb6a51CeucpeO3PmQRUJ5CZmLG2Y +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_unknown.crt b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_unknown.crt new file mode 100644 index 000000000..ed467f140 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_unknown.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDBTCCAe2gAwIBAgIBEzANBgkqhkiG9w0BAQsFADA5MRMwEQYKCZImiZPyLGQB +GRYDbmV0MRQwEgYKCZImiZPyLGQBGRYEcHVtYTEMMAoGA1UEAwwDQ0FVMB4XDTI0 +MDgwMTAwMDAwMFoXDTI4MDgwMTAwMDAwMFowPzETMBEGCgmSJomT8ixkARkWA25l +dDEUMBIGCgmSJomT8ixkARkWBHB1bWExEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALpCL06kbLE9BNknrrRVfW5YgkoJ +ih7EaPNNNqmUgnIsLBjPcCZ7qlsMaUa2ft0mRFxniFkccdrHKox1oAZQ4WrOV5Ls +pcLktNTESkuMG5oiobsASItz125YMoEs/q+cLhErf24EaUAhXjn3/ebvCwtza1il +7bL1RS0iwfkCS68cQi8MI+tQlBebcSUoWazoitg5YituPKNj493tS1nO5pL3vZuG +6tNmhLr8lW+3n4Fi8SElC/pBWiIvsUhaUZzRjrcyPeMS7H2+zdJXDgTj++RNyP0U +TLZzFigeZJXNmHia3uneHw7o3d4fyZERuBl2d990iboLbtPh/9Pf82bBIU0CAwEA +AaMSMBAwDgYDVR0PAQH/BAQDAgSwMA0GCSqGSIb3DQEBCwUAA4IBAQBO7O5IQJHh +4ENYDw/kJFdye3l204JSDrFMP8uMIWQLd1RiI22CWjoyhUSfoyumrzykbPNhxf4w +kj1E+z097/8Q491wxbbLbE1hfNdTtx35dnCUVGPb9lCP4YLK3v3swXw4OklVdsCb +k/cf8s6BdNKVPdhO0zXRBdCbq2Z2PPpzQtaZ2qH739v2tX6CulB68hUO/hQeQINA +eJGMck7mlR4FOE76IeHKYy4F2V4S9iNtkqvkf7M0wfdRwZFJqbdZBXTjRpOVMXgr +XFGvANl9HByFudJJHk8t+NmkIFP6rj5xQSV4i31KQQHMGNr3hCKgkk7X6Wn95BH5 +FSJcNlLR8BuJ +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_unknown.key b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_unknown.key new file mode 100644 index 000000000..efa436f13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/client_unknown.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAukIvTqRssT0E2SeutFV9bliCSgmKHsRo8002qZSCciwsGM9w +JnuqWwxpRrZ+3SZEXGeIWRxx2scqjHWgBlDhas5XkuylwuS01MRKS4wbmiKhuwBI +i3PXblgygSz+r5wuESt/bgRpQCFeOff95u8LC3NrWKXtsvVFLSLB+QJLrxxCLwwj +61CUF5txJShZrOiK2DliK248o2Pj3e1LWc7mkve9m4bq02aEuvyVb7efgWLxISUL ++kFaIi+xSFpRnNGOtzI94xLsfb7N0lcOBOP75E3I/RRMtnMWKB5klc2YeJre6d4f +Dujd3h/JkRG4GXZ333SJugtu0+H/09/zZsEhTQIDAQABAoIBADYZfjsl6Aos8okO +jSCFCkswOxu9HJimLOOqZDuqTp9R9Db6nZUX61nJx5RJ8pxSfp2mVsSnTeyhV9r3 +WVUsTeUjbO1YficXHe0E3J2H4waIk5hI00lqCsNaq8S4aHtibkD/dOJkNpvxdTh6 +Lw3SFc+sJRxZCasmsYKUK2ojwA7qs72mWAW8k4pZT9p2fn+1O5G0J7I4u77CsKNp +3m2EYkYq70UuAPyESxah97QLynQxczxygyMkJyJjyrr7lnJRotu0UOw3cX4qOmP0 +s2gmTwBqsZ7IpyBSJHyEY4E62yfgweSoUIXtS6wWUj4WBToyAiX0zMC6XTMN27iM +2/mHkAMCgYEA57VRh2Hj48scDoj5tFhijcqSiJyizW2CouYtF47NUi10CG7U+ATP +HGcCJhWxIf+kzxUKaP/sgwO4ynttNz6/KMRkVW8TRazDLZCwHXCb1NEhiDuP/9lZ +HUnIAvvI01HYJr7/4ktb7Ktxt1rA6WKcnM7Gzyrs5NdZPr4XTjiCcx8CgYEAzckR +TG3te1UqzDs6FZba8+cGRMjoJcSahAcRj74H5YY2Oz3g8TTS144ppfkI0R3gAQ8x +oe1DIa8XWswdcCTENnGN/D4C3qc+Cnvv2luE8d6POiO4oToRVYNycP3uCXCl7drw +x84ky5PD4xHK5V9HDapzOz93+Q8nJ4Rq0HdgqhMCgYEA3hlEC9P+ICRZQWl0iLTL +ywPoj4As4WJd/Q1LHbCpCGM/PLIX+6CIcb4fJihWwLMN491UPC9MIo0XvSawGmtP +yqDtV6L/wyKFd5Cf76g4BrdrP+1v6xsIO3W0fX+hEUX7PbCJD/2SIHp5pvobvdnj +Kr9de8i2h+gPyH9nUXgDOpsCgYEAycG+og6M7YScx0uyMQSMeuEgdkP7xzuwuo0r +nKCVoWUqun56kkJWDVymXMGC8XO28ZLx+G5kiRvfa/f/TLnfXfmLWal6qs2RuDm2 ++YENIQ4xorDvDbOe1xZfF4T+4mZ9aGxl8Z+c17NyNKh3Itd9FFrvS0F44MkIfGgJ +SYYhUd0CgYEA1S9NVhD4MVu75UxiFo01cZTMBw/MQpQcEE7YgjW4Af2MmPEnw49g +KeNjinwOS/q2JmXwprHCbewv8AuVOqNENwARCUR+o+1S/X6WwW5xt8ZAYwrdm6HY +G62HYnzYmRwSVpzMCMMTAG0BM5dxL3YalzVBY+NTx5EVjhrzhAERI30= +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/keystore.jks b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/keystore.jks new file mode 100644 index 000000000..17e7d5b54 Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/keystore.jks differ diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/run_server_with_certs.rb b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/run_server_with_certs.rb new file mode 100644 index 000000000..22d7580d3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/run_server_with_certs.rb @@ -0,0 +1,26 @@ +require "bundler/setup" +require "puma" +require "puma/detect" +require "puma/puma_http11" +require "puma/minissl" + +app = proc {|env| + p env['puma.peercert'] + [200, {}, [ env['puma.peercert'] ]] +} +log_writer = Puma::LogWriter.new($stdout, $stderr) +server = Puma::Server.new(app, log_writer) + +context = Puma::MiniSSL::Context.new +context.key = "certs/server.key" +context.cert = "certs/server.crt" +context.ca = "certs/ca.crt" +#context.verify_mode = Puma::MiniSSL::VERIFY_NONE +#context.verify_mode = Puma::MiniSSL::VERIFY_PEER +context.verify_mode = Puma::MiniSSL::VERIFY_PEER | Puma::MiniSSL::VERIFY_FAIL_IF_NO_PEER_CERT + +server.add_ssl_listener("127.0.0.1", 4000, context) + +server.run +sleep +#server.stop(true) diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.crt b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.crt new file mode 100644 index 000000000..1335881ba --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDBDCCAeygAwIBAgIBBzANBgkqhkiG9w0BAQsFADA4MRMwEQYKCZImiZPyLGQB +GRYDbmV0MRQwEgYKCZImiZPyLGQBGRYEcHVtYTELMAkGA1UEAwwCQ0EwHhcNMjQw +ODAxMDAwMDAwWhcNMjgwODAxMDAwMDAwWjA/MRMwEQYKCZImiZPyLGQBGRYDbmV0 +MRQwEgYKCZImiZPyLGQBGRYEcHVtYTESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxqbRfVxcH5wKCTGfuZTVisUKUIWS +TUOWkIoyIfBKVknREnVSN40aw72bs9GDh55F+R2IJ3y9F7TQXo0Y6PJ9tFXZqJMb +2NHXYTfhQCSN/w5wqKIrWOR7wbA4JHjs6x/vHYnjFVbvbx43M+wceKsMBlQ1eDLz +Ukr5vjLGASmdgzTq8AUd5bNKiNq+ERyAUTcuwXWC6ndXNdr3vdEgcYnzPWtgM7eo +1SpewqCqrlSeA1dzXThB8vCFkCNy7DYrJbCuGIIxm1D2garE2814LXztE/rhwdIG +zonh44/Zc43cFhzJQUhcJ/jOxFovgijHd5lvUfvqj6YoquC5yFVYEfsSwwIDAQAB +oxIwEDAOBgNVHQ8BAf8EBAMCBLAwDQYJKoZIhvcNAQELBQADggEBAK8iNpb7JRQ+ +CNHl/dUV2DW9Js1TndISLs0YqAcAJHgzwtp5OPV8GHUV/BF7sf91sHU8y6ivety8 +QsvG1ToZhWAVJXG+AStgJDM8Iw6g2xZbIY3wP3bBSuxAE5PGdcXM7nRfjwnAJM7i +Q1gdPcntc5J3dZ1Kt5d9iofI7UMBTuf0Ws+LWG73i/NeRj0lef6+VErQ6M93Yi7/ +knXVzvRkpK2664fRmUHlavXP2Alleuuibgg8kgJ9M1yjluwwx66LXUbhMClrS/OD +hPM29fPZmO+XHJ1dtR79DhdmwqX+GOqjD78asYHIg2Yjy3yZsOX6chKvD0MWk6pc +DyFSu5fEHQ0= +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.key b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.key new file mode 100644 index 000000000..ea6f4672b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAxqbRfVxcH5wKCTGfuZTVisUKUIWSTUOWkIoyIfBKVknREnVS +N40aw72bs9GDh55F+R2IJ3y9F7TQXo0Y6PJ9tFXZqJMb2NHXYTfhQCSN/w5wqKIr +WOR7wbA4JHjs6x/vHYnjFVbvbx43M+wceKsMBlQ1eDLzUkr5vjLGASmdgzTq8AUd +5bNKiNq+ERyAUTcuwXWC6ndXNdr3vdEgcYnzPWtgM7eo1SpewqCqrlSeA1dzXThB +8vCFkCNy7DYrJbCuGIIxm1D2garE2814LXztE/rhwdIGzonh44/Zc43cFhzJQUhc +J/jOxFovgijHd5lvUfvqj6YoquC5yFVYEfsSwwIDAQABAoIBAC1UQohLC4BxAD9B +0hMsWZ/tP2HJYO19hz1bM35YEPZoqKsAF28ypRFWUlGxdMR9ArWKHhf4mENxEH0K +0EzTjIiacGzNiLSsiWuXLzCORtKVDnMEckS5+l1joyP7bgBnDI6eeLFVYZRm/iy8 +a3DDgx5sl7jiU3CLSDq05ka8osMd5/WcGSx0VaenhPNUY9m40h0tuTO8NJyz12rO +Zdm/Uq94zEA3XdPuOgnZMUZVctZzy/2TGoqCWGT/MT1ko9PJ/9vwupyTo08gNqGG +YQGDPk/GM4f5GarLD6Ap4l1RD+5RfCdoWRLLN7Q06cQ0Sxo1OQ27cPPBIeIDyv5b +3lvfp0kCgYEA/mljvIyGVOlJclIuBiT8Mjb4yuJktPXZdbg+sI6Pm14yYvGR/1yg +Kas8ZSPS4VEl8v/xC5vwKEeeAByVP1+cFW5EUvj1zNFgNvfytQumnyXFyrMA+Mib +RfX2cL00V4Hvsy+kdzRjj/HpFCZDxGB5cwAnA4H24pNaxUDdUBF4OesCgYEAx+RP +l2Hc+JFhv0gjyj46SZVM5D2yx+U33a5FNl/NwB6AdKJ4r7j9jZFKCzniCOjkvh0D +a7AhnnB5fuMCR3qxcWMuTtdinrTydSQb6BFA5XbHgAsQvfp8AGJ+T1A9EAMFPA6R +QKUSd8tEYo/OQZcddvdyELDq5T/eaCVDCXPzPIkCgYAT6BstvbElI7vjfhqr0GkL +ZNHCzkQjBLmnRGtjVURXW9+FbnGGEbFWkVeAcnUlvDvD79LknacE/9WVHKdw9gI4 +ixA9jc1t4VQt+Oi80dSPz1zSp38D+G5t4vkNiUB3OGuREcFEsoy3zq32LahPzVyz +aueilqDCydToalLm2PcXUQKBgH0jTUB0H6XtPchGHUHLWyodsZlhlr7VnPfG1cWI +jIItljVuUSeRJAqwXi5Q+lShmDH8b9vG1AUgXK825oJhKRzW/nKVHERG+1KiXGYY +BwygI5zhUIXefiqvfmugy9luSqSiagSSUtF0C0e4x2MAe42ys1IHr8X9Cfsg+Kq+ +UBQhAoGAEMLXpCq0PnwWRoUZYxwDDvLOoEyul0jghAspec8d31j0MBUNTn19YlmA +jci2NHSY3TMywR/USSCN56aT6LstlMyRzmoJC5DYNJvEEkMHQam/BfFO9VT0rkMG +n6BAyqoComrkP3mg4sn9dI262Rz4I83obXj4DyV0eayOuY2mhHU= +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.p12 b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.p12 new file mode 100644 index 000000000..7f7e8ff6c Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/server.p12 differ diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca.crt b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca.crt new file mode 100644 index 000000000..5c50d8f0b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEDCCAfigAwIBAgIBETANBgkqhkiG9w0BAQsFADA5MRMwEQYKCZImiZPyLGQB +GRYDbmV0MRQwEgYKCZImiZPyLGQBGRYEcHVtYTEMMAoGA1UEAwwDQ0FVMB4XDTI0 +MDgwMTAwMDAwMFoXDTI4MDgwMTAwMDAwMFowOTETMBEGCgmSJomT8ixkARkWA25l +dDEUMBIGCgmSJomT8ixkARkWBHB1bWExDDAKBgNVBAMMA0NBVTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKh5VmSGmeN97MEPb9Td++UYcWHhYVj+yxi9 +leDzcLpAgqtd+z0yBYCS2ULRQqomf8uIYODEuWxaepVNjjRN5Y22Hzlodnp+kJ3G +CXQva6GtwxbTIxyZ21Yn8APPgb7OTHZ/js1EhZpJUSjum19PnSS8rPfQRkr6m2eg +HKDkzV3jrJXTpNdvUhTi2NVVo0vNzLYV5Kj45vqirlTEesLxdxHtDNx8vanYHaJY +JY7OryIJhPsxKYplSbQ7ydJuHtpx5SiFjRckw9ddhRzGjYsAQFE2eVBPjhiYJnDL +tmEnb2e+tgRNiru9ttffpfzN4EG3/d9qnyZV9nua03yn+tqe0sECAwEAAaMjMCEw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQAD +ggEBAAp+1M6+Z8T/CA+C0288CRF27tXKn5Kd5Iffrx1Dg7I5HrFGJl87tkzLRoZi +MKj+FfpF7fJ/QSyQt+UZa53R2vmJig6JUEQwfzXc5Bp+fusJgmkGPZQ1A5tnbwCZ +TTuyy8Sy9reCWdJkmdDWyK3ah55mjiuOwZ+woA3RAoC9vCRtIT4w08NqrISBYp2i +NeQCv3gooiQrbJuCUuo56Qm73juJ4T7GXElRMF+SupmhC2SHwFVGAbu0/g6Yc+ye +i6/T36t/GTT0fG3kKfGfFMq3NCvY3WI3vcPzYtK9fN8QYVy3LjdnsjcTV2L3SaxG +3fYURa9nn7mrxlMBX88UWLUIPlc= +-----END CERTIFICATE----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca.key b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca.key new file mode 100644 index 000000000..830cd9133 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAqHlWZIaZ433swQ9v1N375RhxYeFhWP7LGL2V4PNwukCCq137 +PTIFgJLZQtFCqiZ/y4hg4MS5bFp6lU2ONE3ljbYfOWh2en6QncYJdC9roa3DFtMj +HJnbVifwA8+Bvs5Mdn+OzUSFmklRKO6bX0+dJLys99BGSvqbZ6AcoOTNXeOsldOk +129SFOLY1VWjS83MthXkqPjm+qKuVMR6wvF3Ee0M3Hy9qdgdolgljs6vIgmE+zEp +imVJtDvJ0m4e2nHlKIWNFyTD112FHMaNiwBAUTZ5UE+OGJgmcMu2YSdvZ762BE2K +u72219+l/M3gQbf932qfJlX2e5rTfKf62p7SwQIDAQABAoIBAAzm9fnO4Rwt+aKz +ZvIbaf4aiBxh4U2qfa1QX5kRq7Fn3Vn4YZRHL2qwVFkso3laZ3ET6a9tAXJN35Nt +lBVnXcwjhXKlEGXDKZobn3+5XfnMz+MAmnDHS6fmeRghnSMas3pjvHD17XFqaf/D +fjynYTZ+CJUV6zw0wxAFkAu2Wg/1jrHWey9I/sO+xgq+IhxVM6O4JQHHtcR4vrSE +vnjNF5NhxPYHLot+3n2kQ4Pfld5nJ9RHS7E9sZsoK4aQ3J1JFs70W6v+NaYHRrrY +jVNG+uTRcYwCOgUShXsv9jwxxRToxito0/l7e3vlI5V2WVVG69CoKCqJjDFjy22C +lNMAfa0CgYEAueRrHrNZFGnh+hvJ6gmR0KHB+pfQuC6Bub+ewOJco4e9k2rpWV5G +RT7IGuOkYwj4wd/8er58YSIKD4hINkPEOU+Qy2j6VilIJ38wrB0nkrwURW3xGSrl +gzzMOXgBz3JAMreNSNrfIk5y1mhqblUkG8XWMsmh3/zdSDu376zq80cCgYEA6AM2 +lnmQZezTLek6RjdNlU5VNBSw2fb/sLsfwkP2sB4I5W8ewy7iFCRg52qWFiJL3c+1 +1PkH03ZOJ4HgyTZXxaeFyUMpK+vA9nmPjA2vJno8c8Wc8BfaMKh5N9ckO8yYN0qO +0BFaX7nmb6hIx3sGYyrFMbh6aeSzz6Ms03M1PbcCgYBZDlI3WgqyGevV65w3EDCY +N/Z7fGHU6wrhFykSFR/2hCBRUKXMV7roj6REJYzqmTC7iPgnQbzKuAkD2fFmdd66 +6iBfIsLTpmFX8ro5qpn71TkM7hfUS9W6otTgL1sGrCCPa5o3Lgw6HkUr7MgYZZev +6n6sxCZV7YmBgoKjQz8oHwKBgGxHo6BSl7uZ7W0N3i6K/OBjsIwf4/G1vttCWa4W +ffjv64L4e0johoaT+83FBsrJstKQb/I3Txm3qv3xNXFytT//0QZ/G6Xt3x6o2P+q +G0Y/4hq97WJ4H1J6LiiLddOaxqd3WnNsxdrLLeqKhXzjw7VgbqunxynInMqZAwCk +UgK9AoGAUzFOXlnloBSpVCJYv9yR4BzJndK1ivULYI6J3UXKfgTO7F1J1GyLAL8o +0LaRoB8aUKrh6wd2sB+5y68GxJ+pUJ+xAjY9hxWNMeuYKniBzWaA2vHOpicmkkYq +IL0vHDK0GMuaZR4IaBPsm8Pb/Wbi8Ei0pcpYnQelRTrOdewy/iQ= +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca_store.p12 b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca_store.p12 new file mode 100644 index 000000000..c74c9400b Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/client_certs/unknown_ca_store.p12 differ diff --git a/vendor/cache/puma-fba741b91780/examples/puma/csr_puma.pem b/vendor/cache/puma-fba741b91780/examples/puma/csr_puma.pem new file mode 100644 index 000000000..5434d478d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/csr_puma.pem @@ -0,0 +1,11 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIBhzCB8QIBADBIMQswCQYDVQQGEwJVUzEOMAwGA1UECgwFbG9jYWwxDTALBgNV +BAsMBGFlcm8xCzAJBgNVBAsMAkNBMQ0wCwYDVQQDDARwdW1hMIGfMA0GCSqGSIb3 +DQEBAQUAA4GNADCBiQKBgQCvF80yn6D+kqGwMSQHcpHUwCRt+c39Qoy99fCWdenP +thfUscecy62Ij8+rKYCnoE9y766a5baowdDKqq3IBOZn2Ove3zfueGbHAbWehFop +G2xySf0UPjdmWk+DRDlCeFLig6xfAnOKWo+N0MViso3dNK8gYzb6FWqlWgZgAcMp +swIDAQABoAAwDQYJKoZIhvcNAQEEBQADgYEAmRsmIQ0pF9iPOO7V1NeHxrVpFz1B +CZK0yAIGlCWqzpFO/OILN1hJfFnsFl7hZWipoARk15fN1sSXQF3Xb7/sc/8qVhyz +oY38uu/8CE9CTdUutniLzP/4sUomXjslKNVV0qKtmfsFkj2tHtWjJkGAyZUcoKeG +hDJxQlIHhZa7Xvw= +-----END CERTIFICATE REQUEST----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/encrypted_puma_keypair.pem b/vendor/cache/puma-fba741b91780/examples/puma/encrypted_puma_keypair.pem new file mode 100644 index 000000000..d87659440 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/encrypted_puma_keypair.pem @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFLTBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQwwHAQISBM1VRFFvlsCAggA +MAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBAKCsPjHKrhK1EnlPvxcBeWBIIE +0DwRydTdGeqyWPblEhdZZYpjbCMtTEDucl9Non/a6yXAO+igJCSPRze5OxAp/Smt +iGk4LFcGTj/OlQWghqP0rBN6PPctjWRogh1t6hLfASBkmALg9Rsy5pnm103rwvJ2 +s9hDDwavJ5guGFm6KE9Gd3ijbD0pd6JXftu3qry2jajsLEn0pkBFfTTotj45nMNz +ByONJcs9daHduSyKmn6LimemgzK4ylRwjaycwIqmByNbN9psrLDYvgSndeDCnOk5 +x1IwDzzBJfprjgc/ldQ+XKX/7naVN4KXiQ40cUHCzZciZzzx1M1GPk5zJlJavSmT +BZHbTbj0M+txYArnxExBjaaA4mc/fk2JMhgJ4eWU4UHJMeS2dE7l4rBLDusmRs/r +lG4zLEeK1vlSb8pxMIOkbm699WZQJyzcSF/RmyvVUVUJFt3swm2duWQN/8U1U269 +uqiORRShsPaIXtGbdyklhMEVRb9jAB9nkvx4H9SYwT/YQU4oebxGZGWP8LctXJMi +aAfFAQWs3b1PWBJ0tZee+qjmzevsQ7CdDPfVfvMz2YHGtcNgHljQDnkehFsmrp/w +ulPwSVe3pOh9ce2Y7pIejsm+V8a/AVdpmgPGslWqOjNPdUHwBbg3ckuGV1TpuCK3 +ku6QR65Mpx1+MBBa+P49+7QhCJXQk63F8rAc1ms2ggIDAbvf7K19UCIngiqGGts/ +srlZMRB3dd+2U5CZ9jr3FC6czeb4mqFuDD/TZFnupvQ6f27ho+6Wx8DhOLGjdZYt +kwKMbvI6eHjId5buFApcShQdkkv6hQq9smrgvL9OpkVCjT2diYnxeyH0nznNDPyE +OUwqZ1lcNT0BKtv1EAirt7oRM8uK1xKgJfWCB4FVBaz7yBAE+hW21hKaJxU2hm+w +u6TKBtEg7YVCF7VPadT1tfcPUaO3MtGZ80iesjgi/2+zNC5gr7595KteKSJoC5pj +pI+HM8Yx0YlKu/bYLkxcltQEdLoMqjk/Zk+EruNvS2j71pvNRpIQ1qhVBH9P11G5 +xMebXxiG/oDF3XTdbM54+yX9orElyfIKmAk8lz/YmK+9OpW/SVmFv73owJEvEvxE +fZgEZmyavG3G+WosaJBCBG5blSRY1f68DeaBrMqZdYyyw2YC8q22xZW9u/DyzkOI +IPafwTStUDAl0CJnNMOZIG144QLUcR5YI5RBinNC7qAvcXd+bZPH0mhzP2nWt1A/ +nB6n2ar71PkRh9/uv4j2ySD1L2hmd98QAXBZNDc6CGfgUYqY5LqUG+XfgHKAjKiH +ysRzv5867WwqJoZ98YOVTqVpm895pCaJwg2BWTCX3N3nxuLkPDEUi1Jy5r5BsMRc +cC1LDRZM+9+btOBU3DDMp/37V1EqGbh40waBJBJdRZ7V/yCwEgi4ioLc9Z5AGsJh +gQptVJXGzbfjRrxyjlu7Lim5XJOq82ARTwIIyDwEJaI1B9UREFCH594fze+4AfKe +ftFmibsBpqlaWBedYCpqBInK9KcRLjXf1L6cA2yRto3I4JP1jKo1vMSa4uVTMft0 +hHa7L53vf6+jF0nS3EkTk80PcMET9WyC5LeafSGn5MqYa7XepVyu7BF+WlSH61uO +aDJls27YgbLjkGRh9NNETCafXGRclSHag+qHNk92c1vC +-----END ENCRYPTED PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/key_password_command.sh b/vendor/cache/puma-fba741b91780/examples/puma/key_password_command.sh new file mode 100755 index 000000000..aa6cfe19f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/key_password_command.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "hello world" diff --git a/vendor/cache/puma-fba741b91780/examples/puma/keystore.jks b/vendor/cache/puma-fba741b91780/examples/puma/keystore.jks new file mode 100644 index 000000000..e36f31473 Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/keystore.jks differ diff --git a/vendor/cache/puma-fba741b91780/examples/puma/puma_keypair.pem b/vendor/cache/puma-fba741b91780/examples/puma/puma_keypair.pem new file mode 100644 index 000000000..1aded7879 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/examples/puma/puma_keypair.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA85XNk0Bi4pevyTjdBc/4VfokVnnx/tg8bSQmMJ8DHwWVGSW6 +wAeTu3/JrXC2IgjH/6lizNUz3JHq5NrbD1KVh37nU/jpdfTQYrTc6hnsrRQx48GR +smKaZKuV3kfMQ+iIQG3eIS00QZIVlGIhW6HziMhCFLgRZ31EY2aNxjyQinZIpB27 +qA4ZV0XMApGNexBNrnK4DNuTCOIXcH5imzDgrXea5IQg2I3HQKP2svkPJ91glsmp +9tb0yxpCZ3I2hHojEloQcX/AJDJqrmJh5WYOz08+YFKNrgT3rq0z8AJ0wPfyukB8 +dKifmsSkCBmT7crg9HX8c8fcH9v/lZvgzOgSHwIDAQABAoIBABlpfvrGRFVkbXk+ +M/IidMOE0bFYjoe/nRTmfmTN1LWx+jtB5ZOZbBmrEWoam1MTUcjZLeQLDW/iTzxU +nXb3RCn7HNIfYpJ6RG/lHjOpbw4i0GOfnajGba9j++zRK70dRMhdTfJtygkEibjN +wLIpGVmjSXWU5cfuAypQQtPex1Y9IGbFvoJgmSC+zHUqs1Y/SWGikdG3FavNaBCg +I1GnjcbK1uAhGG3HG/G/7Gx76qxTfjdBGEYvF7w+ixaeQaL+F/wypjf1fTIe1x3b +LMOCUMycRlhiHMrVGw6sMP8jaQiw4xaot3oL7nwP9Kk8krroD5Skofc1fDoMgNDV +Y/p8XpkCgYEA9ZJIGoDo69fK3M3M17XY0LdiTcLtyMk1jX5FOd8mJ7zPdRdUhZMK +k/cgpMhR/THy3++BidW2/0KnFFqAhYYBEjZ8iJSc0wEG3rAFidbZuDUK/YP3DaGY +0vNACl79uqsD+3zZYMLZx944g+whrhmEymm8ti8j564U3xJ1hqihwGkCgYEA/e3t +kdJqTwb7Ff9s/qw2jBnbL6/eku6KI/be+yzF9K3PwWUFszbm/RA38rqetvEGCJws +t3Ld2fAUgCMSeAqCHYJ8O8abTkCt+yi0ksNEaylIB8yCJYvJX3s42vADaoxMhnyO +guBAoqGEJE7eaC+KjufFyeWIRlxlVrdfzuxHbUcCgYEAjWkWIkT3W9Ag3nOYMONj +nhTsA9FrMkNDll4PKRsqEpn0cP3lSyH4ZmpZnupH5GCd9nwqASBTR7yvD9Zpyfn3 +pfBAWDoT8KHXveZo2g4hw6Sv1nrmii/zHA69Yvt+ar7OBUTIvKKX3DNIIheY8XN6 +d0DaTaLpryEcnbQ4fzmeI3kCgYB4JLri48uo2IS0Q8YuOQ0V/KXYSj3lBzgVDBk0 +flqrChHiTwoDYVapJaNTFTlDFYblS/9cTNzq4MzSU8XJRi+KBtMA9VdGDxRCYWAO +kJUunrmV9w0c9qawpnygGi+Dmh4bA5PqGHB8gi8jfq18XgIQ21iZziBMKitIfDq4 +jNK+wQKBgQCwPHbmE7zCJmceJs6j2GSerJVwMAq8nrEmIy9PIdXUh6kguLhwxktq +E5haUwSECH1cXtQRhLokhjGa0FKkmmYL8BhCXDEKGb6tEfcLj9oUctTEcXvAkPeV +flF+AXAjGlpma918q7EUQFNOvsnEXADnChAUoBOkn6MFAJV7PokkWg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/cache/puma-fba741b91780/examples/puma/server.p12 b/vendor/cache/puma-fba741b91780/examples/puma/server.p12 new file mode 100644 index 000000000..d928a1980 Binary files /dev/null and b/vendor/cache/puma-fba741b91780/examples/puma/server.p12 differ diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/.sitearchdir.-.puma.time b/vendor/cache/puma-fba741b91780/ext/puma_http11/.sitearchdir.-.puma.time new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/Makefile b/vendor/cache/puma-fba741b91780/ext/puma_http11/Makefile new file mode 100644 index 000000000..bc830025c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/Makefile @@ -0,0 +1,267 @@ + +SHELL = /bin/sh + +# V=0 quiet, V=1 verbose. other values don't work. +V = 0 +V0 = $(V:0=) +Q1 = $(V:1=) +Q = $(Q1:0=@) +ECHO1 = $(V:1=@ :) +ECHO = $(ECHO1:0=@ echo) +NULLCMD = : + +#### Start of system configuration section. #### + +srcdir = . +topdir = /usr/local/include/ruby-3.1.0 +hdrdir = $(topdir) +arch_hdrdir = /usr/local/include/ruby-3.1.0/x86_64-linux +PATH_SEPARATOR = : +VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby +prefix = $(DESTDIR)/usr/local +rubysitearchprefix = $(rubylibprefix)/$(sitearch) +rubyarchprefix = $(rubylibprefix)/$(arch) +rubylibprefix = $(libdir)/$(RUBY_BASE_NAME) +exec_prefix = $(prefix) +vendorarchhdrdir = $(vendorhdrdir)/$(sitearch) +sitearchhdrdir = $(sitehdrdir)/$(sitearch) +rubyarchhdrdir = $(rubyhdrdir)/$(arch) +vendorhdrdir = $(rubyhdrdir)/vendor_ruby +sitehdrdir = $(rubyhdrdir)/site_ruby +rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME) +vendorarchdir = $(vendorlibdir)/$(sitearch) +vendorlibdir = $(vendordir)/$(ruby_version) +vendordir = $(rubylibprefix)/vendor_ruby +sitearchdir = $(DESTDIR)./.gem.20241122-8-zg6b3h +sitelibdir = $(DESTDIR)./.gem.20241122-8-zg6b3h +sitedir = $(rubylibprefix)/site_ruby +rubyarchdir = $(rubylibdir)/$(arch) +rubylibdir = $(rubylibprefix)/$(ruby_version) +sitearchincludedir = $(includedir)/$(sitearch) +archincludedir = $(includedir)/$(arch) +sitearchlibdir = $(libdir)/$(sitearch) +archlibdir = $(libdir)/$(arch) +ridir = $(datarootdir)/$(RI_BASE_NAME) +mandir = $(datarootdir)/man +localedir = $(datarootdir)/locale +libdir = $(exec_prefix)/lib +psdir = $(docdir) +pdfdir = $(docdir) +dvidir = $(docdir) +htmldir = $(docdir) +infodir = $(datarootdir)/info +docdir = $(datarootdir)/doc/$(PACKAGE) +oldincludedir = $(DESTDIR)/usr/include +includedir = $(prefix)/include +runstatedir = $(localstatedir)/run +localstatedir = $(prefix)/var +sharedstatedir = $(prefix)/com +sysconfdir = $(prefix)/etc +datadir = $(datarootdir) +datarootdir = $(prefix)/share +libexecdir = $(exec_prefix)/libexec +sbindir = $(exec_prefix)/sbin +bindir = $(exec_prefix)/bin +archdir = $(rubyarchdir) + + +CC_WRAPPER = +CC = gcc +CXX = g++ +LIBRUBY = $(LIBRUBY_SO) +LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a +LIBRUBYARG_SHARED = -Wl,-rpath,$(libdir) -L$(libdir) -l$(RUBY_SO_NAME) +LIBRUBYARG_STATIC = -Wl,-rpath,$(libdir) -L$(libdir) -l$(RUBY_SO_NAME)-static $(MAINLIBS) +empty = +OUTFLAG = -o $(empty) +COUTFLAG = -o $(empty) +CSRCFLAG = $(empty) + +RUBY_EXTCONF_H = +cflags = $(optflags) $(debugflags) $(warnflags) +cxxflags = +optflags = -O3 -fno-fast-math +debugflags = -ggdb3 +warnflags = -Wall -Wextra -Wdeprecated-declarations -Wduplicated-cond -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wwrite-strings -Wimplicit-fallthrough=0 -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-packed-bitfield-compat -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wsuggest-attribute=format -Wsuggest-attribute=noreturn -Wunused-variable -Wundef +cppflags = +CCDLFLAGS = -fPIC +CFLAGS = $(CCDLFLAGS) $(cflags) -fPIC $(ARCH_FLAG) +INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir) +DEFS = +CPPFLAGS = -DHAVE_OPENSSL_BIO_H -DHAVE_DTLS_METHOD -DHAVE_SSL_CTX_SET_SESSION_CACHE_MODE -DHAVE_TLS_SERVER_METHOD -DHAVE_SSL_CTX_SET_MIN_PROTO_VERSION -DHAVE_SSL_CTX_SET_DH_AUTO -DHAVE_SSL_CTX_SET_CIPHERSUITES -DHAVE_RANDOM_BYTES $(DEFS) $(cppflags) +CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG) +ldflags = -L. -fstack-protector-strong -rdynamic -Wl,-export-dynamic -Wl,--no-as-needed +dldflags = -Wl,--compress-debug-sections=zlib +ARCH_FLAG = +DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG) +LDSHARED = $(CC) -shared +LDSHAREDXX = $(CXX) -shared +AR = gcc-ar +EXEEXT = + +RUBY_INSTALL_NAME = $(RUBY_BASE_NAME) +RUBY_SO_NAME = ruby +RUBYW_INSTALL_NAME = +RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version) +RUBYW_BASE_NAME = rubyw +RUBY_BASE_NAME = ruby + +arch = x86_64-linux +sitearch = $(arch) +ruby_version = 3.1.0 +ruby = $(bindir)/$(RUBY_BASE_NAME) +RUBY = $(ruby) +ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h + +RM = rm -f +RM_RF = rm -fr +RMDIRS = rmdir --ignore-fail-on-non-empty -p +MAKEDIRS = /bin/mkdir -p +INSTALL = /usr/bin/install -c +INSTALL_PROG = $(INSTALL) -m 0755 +INSTALL_DATA = $(INSTALL) -m 644 +COPY = cp +TOUCH = exit > + +#### End of system configuration section. #### + +preload = +libpath = . $(libdir) +LIBPATH = -L. -L$(libdir) -Wl,-rpath,$(libdir) +DEFFILE = + +CLEANFILES = mkmf.log +DISTCLEANFILES = +DISTCLEANDIRS = + +extout = +extout_prefix = +target_prefix = /puma +LOCAL_LIBS = +LIBS = $(LIBRUBYARG_SHARED) -lssl -lcrypto -lm -lc +ORIG_SRCS = http11_parser.c mini_ssl.c puma_http11.c +SRCS = $(ORIG_SRCS) +OBJS = http11_parser.o mini_ssl.o puma_http11.o +HDRS = $(srcdir)/ext_help.h $(srcdir)/http11_parser.h +LOCAL_HDRS = +TARGET = puma_http11 +TARGET_NAME = puma_http11 +TARGET_ENTRY = Init_$(TARGET_NAME) +DLLIB = $(TARGET).so +EXTSTATIC = +STATIC_LIB = + +TIMESTAMP_DIR = . +BINDIR = $(bindir) +RUBYCOMMONDIR = $(sitedir)$(target_prefix) +RUBYLIBDIR = $(sitelibdir)$(target_prefix) +RUBYARCHDIR = $(sitearchdir)$(target_prefix) +HDRDIR = $(sitehdrdir)$(target_prefix) +ARCHHDRDIR = $(sitearchhdrdir)$(target_prefix) +TARGET_SO_DIR = +TARGET_SO = $(TARGET_SO_DIR)$(DLLIB) +CLEANLIBS = $(TARGET_SO) false +CLEANOBJS = *.o *.bak + +all: $(DLLIB) +static: $(STATIC_LIB) +.PHONY: all install static install-so install-rb +.PHONY: clean clean-so clean-static clean-rb + +clean-static:: +clean-rb-default:: +clean-rb:: +clean-so:: +clean: clean-so clean-static clean-rb-default clean-rb + -$(Q)$(RM_RF) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time + +distclean-rb-default:: +distclean-rb:: +distclean-so:: +distclean-static:: +distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb + -$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log + -$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES) + -$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true + +realclean: distclean +install: install-so install-rb + +install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.-.puma.time + $(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR) +clean-static:: + -$(Q)$(RM) $(STATIC_LIB) +install-rb: pre-install-rb do-install-rb install-rb-default +install-rb-default: pre-install-rb-default do-install-rb-default +pre-install-rb: Makefile +pre-install-rb-default: Makefile +do-install-rb: +do-install-rb-default: +pre-install-rb-default: + @$(NULLCMD) +$(TIMESTAMP_DIR)/.sitearchdir.-.puma.time: + $(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR) + $(Q) $(TOUCH) $@ + +site-install: site-install-so site-install-rb +site-install-so: install-so +site-install-rb: install-rb + +.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S + +.cc.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cc.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.mm.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.mm.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cxx.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cxx.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.cpp.o: + $(ECHO) compiling $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.cpp.S: + $(ECHO) translating $(<) + $(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.c.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.c.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +.m.o: + $(ECHO) compiling $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$< + +.m.S: + $(ECHO) translating $(<) + $(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$< + +$(TARGET_SO): $(OBJS) Makefile + $(ECHO) linking shared-object puma/$(DLLIB) + -$(Q)$(RM) $(@) + $(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS) + + + +$(OBJS): $(HDRS) $(ruby_headers) diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/PumaHttp11Service.java b/vendor/cache/puma-fba741b91780/ext/puma_http11/PumaHttp11Service.java new file mode 100644 index 000000000..00f63aa2b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/PumaHttp11Service.java @@ -0,0 +1,17 @@ +package puma; + +import java.io.IOException; + +import org.jruby.Ruby; +import org.jruby.runtime.load.BasicLibraryService; + +import org.jruby.puma.Http11; +import org.jruby.puma.MiniSSL; + +public class PumaHttp11Service implements BasicLibraryService { + public boolean basicLoad(final Ruby runtime) throws IOException { + Http11.createHttp11(runtime); + MiniSSL.createMiniSSL(runtime); + return true; + } +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/ext_help.h b/vendor/cache/puma-fba741b91780/ext/puma_http11/ext_help.h new file mode 100644 index 000000000..ba09e6dc4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/ext_help.h @@ -0,0 +1,15 @@ +#ifndef ext_help_h +#define ext_help_h + +#define RAISE_NOT_NULL(T) if(T == NULL) rb_raise(rb_eArgError, "%s", "NULL found for " # T " when shouldn't be."); +#define DATA_GET(from,type,data_type,name) TypedData_Get_Struct(from,type,data_type,name); RAISE_NOT_NULL(name); +#define REQUIRE_TYPE(V, T) if(TYPE(V) != T) rb_raise(rb_eTypeError, "%s", "Wrong argument type for " # V " required " # T); +#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) + +#ifdef DEBUG +#define TRACE() fprintf(stderr, "> %s:%d:%s\n", __FILE__, __LINE__, __FUNCTION__) +#else +#define TRACE() +#endif + +#endif diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/extconf.rb b/vendor/cache/puma-fba741b91780/ext/puma_http11/extconf.rb new file mode 100644 index 000000000..5748647f7 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/extconf.rb @@ -0,0 +1,80 @@ +require 'mkmf' + +dir_config("puma_http11") + +if $mingw + append_cflags '-fstack-protector-strong -D_FORTIFY_SOURCE=2' + append_ldflags '-fstack-protector-strong -l:libssp.a' + have_library 'ssp' +end + +unless ENV["PUMA_DISABLE_SSL"] + # don't use pkg_config('openssl') if '--with-openssl-dir' is used + has_openssl_dir = dir_config('openssl').any? || + RbConfig::CONFIG['configure_args']&.include?('openssl') + + found_pkg_config = !has_openssl_dir && pkg_config('openssl') + + found_ssl = if !$mingw && found_pkg_config + puts '──── Using OpenSSL pkgconfig (openssl.pc) ────' + true + elsif have_library('libcrypto', 'BIO_read') && have_library('libssl', 'SSL_CTX_new') + true + elsif %w'crypto libeay32'.find {|crypto| have_library(crypto, 'BIO_read')} && + %w'ssl ssleay32'.find {|ssl| have_library(ssl, 'SSL_CTX_new')} + true + else + puts '** Puma will be compiled without SSL support' + false + end + + if found_ssl + have_header "openssl/bio.h" + + ssl_h = "openssl/ssl.h".freeze + + puts "\n──── Below are yes for 1.0.2 & later ────" + have_func "DTLS_method" , ssl_h + have_func "SSL_CTX_set_session_cache_mode(NULL, 0)", ssl_h + + puts "\n──── Below are yes for 1.1.0 & later ────" + have_func "TLS_server_method" , ssl_h + have_func "SSL_CTX_set_min_proto_version(NULL, 0)" , ssl_h + + puts "\n──── Below is yes for 1.1.0 and later, but isn't documented until 3.0.0 ────" + # https://github.com/openssl/openssl/blob/OpenSSL_1_1_0/include/openssl/ssl.h#L1159 + have_func "SSL_CTX_set_dh_auto(NULL, 0)" , ssl_h + + puts "\n──── Below is yes for 1.1.1 & later ────" + have_func "SSL_CTX_set_ciphersuites(NULL, \"\")" , ssl_h + + puts "\n──── Below is yes for 3.0.0 & later ────" + have_func "SSL_get1_peer_certificate" , ssl_h + + puts '' + + # Random.bytes available in Ruby 2.5 and later, Random::DEFAULT deprecated in 3.0 + if Random.respond_to?(:bytes) + $defs.push "-DHAVE_RANDOM_BYTES" + puts "checking for Random.bytes... yes" + else + puts "checking for Random.bytes... no" + end + end +end + +if ENV["PUMA_MAKE_WARNINGS_INTO_ERRORS"] + # Make all warnings into errors + # Except `implicit-fallthrough` since most failures comes from ragel state machine generated code + if respond_to?(:append_cflags, true) # Ruby 2.5 and later + append_cflags(config_string('WERRORFLAG') || '-Werror') + append_cflags '-Wno-implicit-fallthrough' + else + # flag may not exist on some platforms, -Werror may not be defined on some platforms, but + # works with all in current CI + $CFLAGS << " #{config_string('WERRORFLAG') || '-Werror'}" + $CFLAGS << ' -Wno-implicit-fallthrough' + end +end + +create_makefile("puma/puma_http11") diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.c b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.c new file mode 100644 index 000000000..388a098fc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.c @@ -0,0 +1,1057 @@ + +#line 1 "ext/puma_http11/http11_parser.rl" +/** + * Copyright (c) 2005 Zed A. Shaw + * You can redistribute it and/or modify it under the same terms as Ruby. + * License 3-clause BSD + */ +#include "http11_parser.h" +#include +#include +#include +#include +#include + +/* + * capitalizes all lower-case ASCII characters, + * converts dashes to underscores, and underscores to commas. + */ +static void snake_upcase_char(char *c) +{ + if (*c >= 'a' && *c <= 'z') + *c &= ~0x20; + else if (*c == '_') + *c = ','; + else if (*c == '-') + *c = '_'; +} + +#define LEN(AT, FPC) (FPC - buffer - parser->AT) +#define MARK(M,FPC) (parser->M = (FPC) - buffer) +#define PTR_TO(F) (buffer + parser->F) + +/** Machine **/ + + +#line 81 "ext/puma_http11/http11_parser.rl" + + +/** Data **/ + +#line 42 "ext/puma_http11/http11_parser.c" +static const int puma_parser_start = 1; +static const int puma_parser_first_final = 46; +static const int puma_parser_error = 0; + + +#line 85 "ext/puma_http11/http11_parser.rl" + +int puma_parser_init(puma_parser *parser) { + int cs = 0; + +#line 53 "ext/puma_http11/http11_parser.c" + { + cs = puma_parser_start; + } + +#line 89 "ext/puma_http11/http11_parser.rl" + parser->cs = cs; + parser->body_start = 0; + parser->content_len = 0; + parser->mark = 0; + parser->nread = 0; + parser->field_len = 0; + parser->field_start = 0; + parser->request = Qnil; + parser->body = Qnil; + + return 1; +} + + +/** exec **/ +size_t puma_parser_execute(puma_parser *parser, const char *buffer, size_t len, size_t off) { + const char *p, *pe; + int cs = parser->cs; + + assert(off <= len && "offset past end of buffer"); + + p = buffer+off; + pe = buffer+len; + + /* assert(*pe == '\0' && "pointer does not end on NUL"); */ + assert((size_t) (pe - p) == len - off && "pointers aren't same distance"); + + +#line 87 "ext/puma_http11/http11_parser.c" + { + if ( p == pe ) + goto _test_eof; + switch ( cs ) + { +case 1: + switch( (*p) ) { + case 36: goto tr0; + case 95: goto tr0; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto tr0; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto tr0; + } else + goto tr0; + goto st0; +st0: +cs = 0; + goto _out; +tr0: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st2; +st2: + if ( ++p == pe ) + goto _test_eof2; +case 2: +#line 118 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st27; + case 95: goto st27; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st27; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st27; + } else + goto st27; + goto st0; +tr2: +#line 50 "ext/puma_http11/http11_parser.rl" + { + parser->request_method(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st3; +st3: + if ( ++p == pe ) + goto _test_eof3; +case 3: +#line 143 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 42: goto tr4; + case 43: goto tr5; + case 47: goto tr6; + case 58: goto tr7; + } + if ( (*p) < 65 ) { + if ( 45 <= (*p) && (*p) <= 57 ) + goto tr5; + } else if ( (*p) > 90 ) { + if ( 97 <= (*p) && (*p) <= 122 ) + goto tr5; + } else + goto tr5; + goto st0; +tr4: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st4; +st4: + if ( ++p == pe ) + goto _test_eof4; +case 4: +#line 167 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr8; + case 35: goto tr9; + } + goto st0; +tr8: +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st5; +tr31: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } +#line 56 "ext/puma_http11/http11_parser.rl" + { + parser->fragment(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st5; +tr33: +#line 56 "ext/puma_http11/http11_parser.rl" + { + parser->fragment(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st5; +tr37: +#line 69 "ext/puma_http11/http11_parser.rl" + { + parser->request_path(parser, PTR_TO(mark), LEN(mark,p)); + } +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st5; +tr41: +#line 60 "ext/puma_http11/http11_parser.rl" + { MARK(query_start, p); } +#line 61 "ext/puma_http11/http11_parser.rl" + { + parser->query_string(parser, PTR_TO(query_start), LEN(query_start, p)); + } +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st5; +tr44: +#line 61 "ext/puma_http11/http11_parser.rl" + { + parser->query_string(parser, PTR_TO(query_start), LEN(query_start, p)); + } +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st5; +st5: + if ( ++p == pe ) + goto _test_eof5; +case 5: +#line 229 "ext/puma_http11/http11_parser.c" + if ( (*p) == 72 ) + goto tr10; + goto st0; +tr10: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st6; +st6: + if ( ++p == pe ) + goto _test_eof6; +case 6: +#line 241 "ext/puma_http11/http11_parser.c" + if ( (*p) == 84 ) + goto st7; + goto st0; +st7: + if ( ++p == pe ) + goto _test_eof7; +case 7: + if ( (*p) == 84 ) + goto st8; + goto st0; +st8: + if ( ++p == pe ) + goto _test_eof8; +case 8: + if ( (*p) == 80 ) + goto st9; + goto st0; +st9: + if ( ++p == pe ) + goto _test_eof9; +case 9: + if ( (*p) == 47 ) + goto st10; + goto st0; +st10: + if ( ++p == pe ) + goto _test_eof10; +case 10: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st11; + goto st0; +st11: + if ( ++p == pe ) + goto _test_eof11; +case 11: + if ( (*p) == 46 ) + goto st12; + if ( 48 <= (*p) && (*p) <= 57 ) + goto st11; + goto st0; +st12: + if ( ++p == pe ) + goto _test_eof12; +case 12: + if ( 48 <= (*p) && (*p) <= 57 ) + goto st13; + goto st0; +st13: + if ( ++p == pe ) + goto _test_eof13; +case 13: + if ( (*p) == 13 ) + goto tr18; + if ( 48 <= (*p) && (*p) <= 57 ) + goto st13; + goto st0; +tr18: +#line 65 "ext/puma_http11/http11_parser.rl" + { + parser->server_protocol(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st14; +tr26: +#line 46 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } +#line 47 "ext/puma_http11/http11_parser.rl" + { + parser->http_field(parser, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, p)); + } + goto st14; +tr29: +#line 47 "ext/puma_http11/http11_parser.rl" + { + parser->http_field(parser, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, p)); + } + goto st14; +st14: + if ( ++p == pe ) + goto _test_eof14; +case 14: +#line 322 "ext/puma_http11/http11_parser.c" + if ( (*p) == 10 ) + goto st15; + goto st0; +st15: + if ( ++p == pe ) + goto _test_eof15; +case 15: + switch( (*p) ) { + case 13: goto st16; + case 33: goto tr21; + case 124: goto tr21; + case 126: goto tr21; + } + if ( (*p) < 45 ) { + if ( (*p) > 39 ) { + if ( 42 <= (*p) && (*p) <= 43 ) + goto tr21; + } else if ( (*p) >= 35 ) + goto tr21; + } else if ( (*p) > 46 ) { + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr21; + } else if ( (*p) > 90 ) { + if ( 94 <= (*p) && (*p) <= 122 ) + goto tr21; + } else + goto tr21; + } else + goto tr21; + goto st0; +st16: + if ( ++p == pe ) + goto _test_eof16; +case 16: + if ( (*p) == 10 ) + goto tr22; + goto st0; +tr22: +#line 73 "ext/puma_http11/http11_parser.rl" + { + parser->body_start = p - buffer + 1; + parser->header_done(parser, p + 1, pe - p - 1); + {p++; cs = 46; goto _out;} + } + goto st46; +st46: + if ( ++p == pe ) + goto _test_eof46; +case 46: +#line 373 "ext/puma_http11/http11_parser.c" + goto st0; +tr21: +#line 40 "ext/puma_http11/http11_parser.rl" + { MARK(field_start, p); } +#line 41 "ext/puma_http11/http11_parser.rl" + { snake_upcase_char((char *)p); } + goto st17; +tr23: +#line 41 "ext/puma_http11/http11_parser.rl" + { snake_upcase_char((char *)p); } + goto st17; +st17: + if ( ++p == pe ) + goto _test_eof17; +case 17: +#line 389 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 33: goto tr23; + case 58: goto tr24; + case 124: goto tr23; + case 126: goto tr23; + } + if ( (*p) < 45 ) { + if ( (*p) > 39 ) { + if ( 42 <= (*p) && (*p) <= 43 ) + goto tr23; + } else if ( (*p) >= 35 ) + goto tr23; + } else if ( (*p) > 46 ) { + if ( (*p) < 65 ) { + if ( 48 <= (*p) && (*p) <= 57 ) + goto tr23; + } else if ( (*p) > 90 ) { + if ( 94 <= (*p) && (*p) <= 122 ) + goto tr23; + } else + goto tr23; + } else + goto tr23; + goto st0; +tr24: +#line 42 "ext/puma_http11/http11_parser.rl" + { + parser->field_len = LEN(field_start, p); + } + goto st18; +tr27: +#line 46 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st18; +st18: + if ( ++p == pe ) + goto _test_eof18; +case 18: +#line 428 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 13: goto tr26; + case 32: goto tr27; + case 127: goto st0; + } + if ( (*p) > 8 ) { + if ( 10 <= (*p) && (*p) <= 31 ) + goto st0; + } else if ( (*p) >= 0 ) + goto st0; + goto tr25; +tr25: +#line 46 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st19; +st19: + if ( ++p == pe ) + goto _test_eof19; +case 19: +#line 448 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 13: goto tr29; + case 127: goto st0; + } + if ( (*p) > 8 ) { + if ( 10 <= (*p) && (*p) <= 31 ) + goto st0; + } else if ( (*p) >= 0 ) + goto st0; + goto st19; +tr9: +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st20; +tr38: +#line 69 "ext/puma_http11/http11_parser.rl" + { + parser->request_path(parser, PTR_TO(mark), LEN(mark,p)); + } +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st20; +tr42: +#line 60 "ext/puma_http11/http11_parser.rl" + { MARK(query_start, p); } +#line 61 "ext/puma_http11/http11_parser.rl" + { + parser->query_string(parser, PTR_TO(query_start), LEN(query_start, p)); + } +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st20; +tr45: +#line 61 "ext/puma_http11/http11_parser.rl" + { + parser->query_string(parser, PTR_TO(query_start), LEN(query_start, p)); + } +#line 53 "ext/puma_http11/http11_parser.rl" + { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, p)); + } + goto st20; +st20: + if ( ++p == pe ) + goto _test_eof20; +case 20: +#line 501 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr31; + case 60: goto st0; + case 62: goto st0; + case 127: goto st0; + } + if ( (*p) > 31 ) { + if ( 34 <= (*p) && (*p) <= 35 ) + goto st0; + } else if ( (*p) >= 0 ) + goto st0; + goto tr30; +tr30: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st21; +st21: + if ( ++p == pe ) + goto _test_eof21; +case 21: +#line 522 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr33; + case 60: goto st0; + case 62: goto st0; + case 127: goto st0; + } + if ( (*p) > 31 ) { + if ( 34 <= (*p) && (*p) <= 35 ) + goto st0; + } else if ( (*p) >= 0 ) + goto st0; + goto st21; +tr5: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st22; +st22: + if ( ++p == pe ) + goto _test_eof22; +case 22: +#line 543 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 43: goto st22; + case 58: goto st23; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st22; + } else if ( (*p) > 57 ) { + if ( (*p) > 90 ) { + if ( 97 <= (*p) && (*p) <= 122 ) + goto st22; + } else if ( (*p) >= 65 ) + goto st22; + } else + goto st22; + goto st0; +tr7: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st23; +st23: + if ( ++p == pe ) + goto _test_eof23; +case 23: +#line 568 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr8; + case 34: goto st0; + case 35: goto tr9; + case 60: goto st0; + case 62: goto st0; + case 127: goto st0; + } + if ( 0 <= (*p) && (*p) <= 31 ) + goto st0; + goto st23; +tr6: +#line 37 "ext/puma_http11/http11_parser.rl" + { MARK(mark, p); } + goto st24; +st24: + if ( ++p == pe ) + goto _test_eof24; +case 24: +#line 588 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr37; + case 34: goto st0; + case 35: goto tr38; + case 60: goto st0; + case 62: goto st0; + case 63: goto tr39; + case 127: goto st0; + } + if ( 0 <= (*p) && (*p) <= 31 ) + goto st0; + goto st24; +tr39: +#line 69 "ext/puma_http11/http11_parser.rl" + { + parser->request_path(parser, PTR_TO(mark), LEN(mark,p)); + } + goto st25; +st25: + if ( ++p == pe ) + goto _test_eof25; +case 25: +#line 611 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr41; + case 34: goto st0; + case 35: goto tr42; + case 60: goto st0; + case 62: goto st0; + case 127: goto st0; + } + if ( 0 <= (*p) && (*p) <= 31 ) + goto st0; + goto tr40; +tr40: +#line 60 "ext/puma_http11/http11_parser.rl" + { MARK(query_start, p); } + goto st26; +st26: + if ( ++p == pe ) + goto _test_eof26; +case 26: +#line 631 "ext/puma_http11/http11_parser.c" + switch( (*p) ) { + case 32: goto tr44; + case 34: goto st0; + case 35: goto tr45; + case 60: goto st0; + case 62: goto st0; + case 127: goto st0; + } + if ( 0 <= (*p) && (*p) <= 31 ) + goto st0; + goto st26; +st27: + if ( ++p == pe ) + goto _test_eof27; +case 27: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st28; + case 95: goto st28; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st28; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st28; + } else + goto st28; + goto st0; +st28: + if ( ++p == pe ) + goto _test_eof28; +case 28: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st29; + case 95: goto st29; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st29; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st29; + } else + goto st29; + goto st0; +st29: + if ( ++p == pe ) + goto _test_eof29; +case 29: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st30; + case 95: goto st30; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st30; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st30; + } else + goto st30; + goto st0; +st30: + if ( ++p == pe ) + goto _test_eof30; +case 30: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st31; + case 95: goto st31; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st31; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st31; + } else + goto st31; + goto st0; +st31: + if ( ++p == pe ) + goto _test_eof31; +case 31: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st32; + case 95: goto st32; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st32; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st32; + } else + goto st32; + goto st0; +st32: + if ( ++p == pe ) + goto _test_eof32; +case 32: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st33; + case 95: goto st33; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st33; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st33; + } else + goto st33; + goto st0; +st33: + if ( ++p == pe ) + goto _test_eof33; +case 33: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st34; + case 95: goto st34; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st34; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st34; + } else + goto st34; + goto st0; +st34: + if ( ++p == pe ) + goto _test_eof34; +case 34: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st35; + case 95: goto st35; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st35; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st35; + } else + goto st35; + goto st0; +st35: + if ( ++p == pe ) + goto _test_eof35; +case 35: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st36; + case 95: goto st36; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st36; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st36; + } else + goto st36; + goto st0; +st36: + if ( ++p == pe ) + goto _test_eof36; +case 36: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st37; + case 95: goto st37; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st37; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st37; + } else + goto st37; + goto st0; +st37: + if ( ++p == pe ) + goto _test_eof37; +case 37: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st38; + case 95: goto st38; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st38; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st38; + } else + goto st38; + goto st0; +st38: + if ( ++p == pe ) + goto _test_eof38; +case 38: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st39; + case 95: goto st39; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st39; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st39; + } else + goto st39; + goto st0; +st39: + if ( ++p == pe ) + goto _test_eof39; +case 39: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st40; + case 95: goto st40; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st40; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st40; + } else + goto st40; + goto st0; +st40: + if ( ++p == pe ) + goto _test_eof40; +case 40: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st41; + case 95: goto st41; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st41; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st41; + } else + goto st41; + goto st0; +st41: + if ( ++p == pe ) + goto _test_eof41; +case 41: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st42; + case 95: goto st42; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st42; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st42; + } else + goto st42; + goto st0; +st42: + if ( ++p == pe ) + goto _test_eof42; +case 42: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st43; + case 95: goto st43; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st43; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st43; + } else + goto st43; + goto st0; +st43: + if ( ++p == pe ) + goto _test_eof43; +case 43: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st44; + case 95: goto st44; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st44; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st44; + } else + goto st44; + goto st0; +st44: + if ( ++p == pe ) + goto _test_eof44; +case 44: + switch( (*p) ) { + case 32: goto tr2; + case 36: goto st45; + case 95: goto st45; + } + if ( (*p) < 48 ) { + if ( 45 <= (*p) && (*p) <= 46 ) + goto st45; + } else if ( (*p) > 57 ) { + if ( 65 <= (*p) && (*p) <= 90 ) + goto st45; + } else + goto st45; + goto st0; +st45: + if ( ++p == pe ) + goto _test_eof45; +case 45: + if ( (*p) == 32 ) + goto tr2; + goto st0; + } + _test_eof2: cs = 2; goto _test_eof; + _test_eof3: cs = 3; goto _test_eof; + _test_eof4: cs = 4; goto _test_eof; + _test_eof5: cs = 5; goto _test_eof; + _test_eof6: cs = 6; goto _test_eof; + _test_eof7: cs = 7; goto _test_eof; + _test_eof8: cs = 8; goto _test_eof; + _test_eof9: cs = 9; goto _test_eof; + _test_eof10: cs = 10; goto _test_eof; + _test_eof11: cs = 11; goto _test_eof; + _test_eof12: cs = 12; goto _test_eof; + _test_eof13: cs = 13; goto _test_eof; + _test_eof14: cs = 14; goto _test_eof; + _test_eof15: cs = 15; goto _test_eof; + _test_eof16: cs = 16; goto _test_eof; + _test_eof46: cs = 46; goto _test_eof; + _test_eof17: cs = 17; goto _test_eof; + _test_eof18: cs = 18; goto _test_eof; + _test_eof19: cs = 19; goto _test_eof; + _test_eof20: cs = 20; goto _test_eof; + _test_eof21: cs = 21; goto _test_eof; + _test_eof22: cs = 22; goto _test_eof; + _test_eof23: cs = 23; goto _test_eof; + _test_eof24: cs = 24; goto _test_eof; + _test_eof25: cs = 25; goto _test_eof; + _test_eof26: cs = 26; goto _test_eof; + _test_eof27: cs = 27; goto _test_eof; + _test_eof28: cs = 28; goto _test_eof; + _test_eof29: cs = 29; goto _test_eof; + _test_eof30: cs = 30; goto _test_eof; + _test_eof31: cs = 31; goto _test_eof; + _test_eof32: cs = 32; goto _test_eof; + _test_eof33: cs = 33; goto _test_eof; + _test_eof34: cs = 34; goto _test_eof; + _test_eof35: cs = 35; goto _test_eof; + _test_eof36: cs = 36; goto _test_eof; + _test_eof37: cs = 37; goto _test_eof; + _test_eof38: cs = 38; goto _test_eof; + _test_eof39: cs = 39; goto _test_eof; + _test_eof40: cs = 40; goto _test_eof; + _test_eof41: cs = 41; goto _test_eof; + _test_eof42: cs = 42; goto _test_eof; + _test_eof43: cs = 43; goto _test_eof; + _test_eof44: cs = 44; goto _test_eof; + _test_eof45: cs = 45; goto _test_eof; + + _test_eof: {} + _out: {} + } + +#line 117 "ext/puma_http11/http11_parser.rl" + + if (!puma_parser_has_error(parser)) + parser->cs = cs; + parser->nread += p - (buffer + off); + + assert(p <= pe && "buffer overflow after parsing execute"); + assert(parser->nread <= len && "nread longer than length"); + assert(parser->body_start <= len && "body starts after buffer end"); + assert(parser->mark < len && "mark is after buffer end"); + assert(parser->field_len <= len && "field has length longer than whole buffer"); + assert(parser->field_start < len && "field starts after buffer end"); + + return(parser->nread); +} + +int puma_parser_finish(puma_parser *parser) +{ + if (puma_parser_has_error(parser) ) { + return -1; + } else if (puma_parser_is_finished(parser) ) { + return 1; + } else { + return 0; + } +} + +int puma_parser_has_error(puma_parser *parser) { + return parser->cs == puma_parser_error; +} + +int puma_parser_is_finished(puma_parser *parser) { + return parser->cs >= puma_parser_first_final; +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.h b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.h new file mode 100644 index 000000000..e0545a9bf --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.h @@ -0,0 +1,65 @@ +/** + * Copyright (c) 2005 Zed A. Shaw + * You can redistribute it and/or modify it under the same terms as Ruby. + * License 3-clause BSD + */ + +#ifndef http11_parser_h +#define http11_parser_h + +#define RSTRING_NOT_MODIFIED 1 +#include "ruby.h" + +#include + +#if defined(_WIN32) +#include +#endif + +#define BUFFER_LEN 1024 + +struct puma_parser; + +typedef void (*element_cb)(struct puma_parser* hp, + const char *at, size_t length); + +typedef void (*field_cb)(struct puma_parser* hp, + const char *field, size_t flen, + const char *value, size_t vlen); + +typedef struct puma_parser { + int cs; + int content_len; + size_t body_start; + size_t nread; + size_t mark; + size_t field_start; + size_t field_len; + size_t query_start; + + VALUE request; + VALUE body; + + field_cb http_field; + element_cb request_method; + element_cb request_uri; + element_cb fragment; + element_cb request_path; + element_cb query_string; + element_cb server_protocol; + element_cb header_done; + + char buf[BUFFER_LEN]; + +} puma_parser; + +int puma_parser_init(puma_parser *parser); +int puma_parser_finish(puma_parser *parser); +size_t puma_parser_execute(puma_parser *parser, const char *data, + size_t len, size_t off); +int puma_parser_has_error(puma_parser *parser); +int puma_parser_is_finished(puma_parser *parser); + +#define puma_parser_nread(parser) (parser)->nread + +#endif diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.java.rl b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.java.rl new file mode 100644 index 000000000..c5cb65830 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.java.rl @@ -0,0 +1,145 @@ +package org.jruby.puma; + +import org.jruby.Ruby; +import org.jruby.RubyHash; +import org.jruby.util.ByteList; + +public class Http11Parser { + +/** Machine **/ + +%%{ + + machine puma_parser; + + action mark {parser.mark = fpc; } + + action start_field { parser.field_start = fpc; } + action snake_upcase_field { /* FIXME stub */ } + action write_field { + parser.field_len = fpc-parser.field_start; + } + + action start_value { parser.mark = fpc; } + action write_value { + Http11.http_field(runtime, parser.data, parser.buffer, parser.field_start, parser.field_len, parser.mark, fpc-parser.mark); + } + action request_method { + Http11.request_method(runtime, parser.data, parser.buffer, parser.mark, fpc-parser.mark); + } + action request_uri { + Http11.request_uri(runtime, parser.data, parser.buffer, parser.mark, fpc-parser.mark); + } + action fragment { + Http11.fragment(runtime, parser.data, parser.buffer, parser.mark, fpc-parser.mark); + } + + action start_query {parser.query_start = fpc; } + action query_string { + Http11.query_string(runtime, parser.data, parser.buffer, parser.query_start, fpc-parser.query_start); + } + + action server_protocol { + Http11.server_protocol(runtime, parser.data, parser.buffer, parser.mark, fpc-parser.mark); + } + + action request_path { + Http11.request_path(runtime, parser.data, parser.buffer, parser.mark, fpc-parser.mark); + } + + action done { + parser.body_start = fpc + 1; + http.header_done(runtime, parser.data, parser.buffer, fpc + 1, pe - fpc - 1); + fbreak; + } + + include puma_parser_common "http11_parser_common.rl"; + +}%% + +/** Data **/ +%% write data noentry; + + public static interface ElementCB { + public void call(Ruby runtime, RubyHash data, ByteList buffer, int at, int length); + } + + public static interface FieldCB { + public void call(Ruby runtime, RubyHash data, ByteList buffer, int field, int flen, int value, int vlen); + } + + public static class HttpParser { + int cs; + int body_start; + int content_len; + int nread; + int mark; + int field_start; + int field_len; + int query_start; + + RubyHash data; + ByteList buffer; + + public void init() { + cs = 0; + + %% write init; + + body_start = 0; + content_len = 0; + mark = 0; + nread = 0; + field_len = 0; + field_start = 0; + } + } + + public final HttpParser parser = new HttpParser(); + + public int execute(Ruby runtime, Http11 http, ByteList buffer, int off) { + int p, pe; + int cs = parser.cs; + int len = buffer.length(); + assert off<=len : "offset past end of buffer"; + + p = off; + pe = len; + // get a copy of the bytes, since it may not start at 0 + // FIXME: figure out how to just use the bytes in-place + byte[] data = buffer.bytes(); + parser.buffer = buffer; + + %% write exec; + + parser.cs = cs; + parser.nread += (p - off); + + assert p <= pe : "buffer overflow after parsing execute"; + assert parser.nread <= len : "nread longer than length"; + assert parser.body_start <= len : "body starts after buffer end"; + assert parser.mark < len : "mark is after buffer end"; + assert parser.field_len <= len : "field has length longer than whole buffer"; + assert parser.field_start < len : "field starts after buffer end"; + + return parser.nread; + } + + public int finish() { + if(has_error()) { + return -1; + } else if(is_finished()) { + return 1; + } else { + return 0; + } + } + + public boolean has_error() { + return parser.cs == puma_parser_error; + } + + public boolean is_finished() { + return parser.cs == puma_parser_first_final; + } +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.rl b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.rl new file mode 100644 index 000000000..f1ef5a116 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser.rl @@ -0,0 +1,149 @@ +/** + * Copyright (c) 2005 Zed A. Shaw + * You can redistribute it and/or modify it under the same terms as Ruby. + * License 3-clause BSD + */ +#include "http11_parser.h" +#include +#include +#include +#include +#include + +/* + * capitalizes all lower-case ASCII characters, + * converts dashes to underscores, and underscores to commas. + */ +static void snake_upcase_char(char *c) +{ + if (*c >= 'a' && *c <= 'z') + *c &= ~0x20; + else if (*c == '_') + *c = ','; + else if (*c == '-') + *c = '_'; +} + +#define LEN(AT, FPC) (FPC - buffer - parser->AT) +#define MARK(M,FPC) (parser->M = (FPC) - buffer) +#define PTR_TO(F) (buffer + parser->F) + +/** Machine **/ + +%%{ + + machine puma_parser; + + action mark { MARK(mark, fpc); } + + + action start_field { MARK(field_start, fpc); } + action snake_upcase_field { snake_upcase_char((char *)fpc); } + action write_field { + parser->field_len = LEN(field_start, fpc); + } + + action start_value { MARK(mark, fpc); } + action write_value { + parser->http_field(parser, PTR_TO(field_start), parser->field_len, PTR_TO(mark), LEN(mark, fpc)); + } + action request_method { + parser->request_method(parser, PTR_TO(mark), LEN(mark, fpc)); + } + action request_uri { + parser->request_uri(parser, PTR_TO(mark), LEN(mark, fpc)); + } + action fragment { + parser->fragment(parser, PTR_TO(mark), LEN(mark, fpc)); + } + + action start_query { MARK(query_start, fpc); } + action query_string { + parser->query_string(parser, PTR_TO(query_start), LEN(query_start, fpc)); + } + + action server_protocol { + parser->server_protocol(parser, PTR_TO(mark), LEN(mark, fpc)); + } + + action request_path { + parser->request_path(parser, PTR_TO(mark), LEN(mark,fpc)); + } + + action done { + parser->body_start = fpc - buffer + 1; + parser->header_done(parser, fpc + 1, pe - fpc - 1); + fbreak; + } + + include puma_parser_common "http11_parser_common.rl"; + +}%% + +/** Data **/ +%% write data noentry; + +int puma_parser_init(puma_parser *parser) { + int cs = 0; + %% write init; + parser->cs = cs; + parser->body_start = 0; + parser->content_len = 0; + parser->mark = 0; + parser->nread = 0; + parser->field_len = 0; + parser->field_start = 0; + parser->request = Qnil; + parser->body = Qnil; + + return 1; +} + + +/** exec **/ +size_t puma_parser_execute(puma_parser *parser, const char *buffer, size_t len, size_t off) { + const char *p, *pe; + int cs = parser->cs; + + assert(off <= len && "offset past end of buffer"); + + p = buffer+off; + pe = buffer+len; + + /* assert(*pe == '\0' && "pointer does not end on NUL"); */ + assert((size_t) (pe - p) == len - off && "pointers aren't same distance"); + + %% write exec; + + if (!puma_parser_has_error(parser)) + parser->cs = cs; + parser->nread += p - (buffer + off); + + assert(p <= pe && "buffer overflow after parsing execute"); + assert(parser->nread <= len && "nread longer than length"); + assert(parser->body_start <= len && "body starts after buffer end"); + assert(parser->mark < len && "mark is after buffer end"); + assert(parser->field_len <= len && "field has length longer than whole buffer"); + assert(parser->field_start < len && "field starts after buffer end"); + + return(parser->nread); +} + +int puma_parser_finish(puma_parser *parser) +{ + if (puma_parser_has_error(parser) ) { + return -1; + } else if (puma_parser_is_finished(parser) ) { + return 1; + } else { + return 0; + } +} + +int puma_parser_has_error(puma_parser *parser) { + return parser->cs == puma_parser_error; +} + +int puma_parser_is_finished(puma_parser *parser) { + return parser->cs >= puma_parser_first_final; +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser_common.rl b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser_common.rl new file mode 100644 index 000000000..d61aafa4a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/http11_parser_common.rl @@ -0,0 +1,54 @@ +%%{ + + machine puma_parser_common; + +#### HTTP PROTOCOL GRAMMAR +# line endings + CRLF = "\r\n"; + +# character types + CTL = (cntrl | 127); + safe = ("$" | "-" | "_" | "."); + extra = ("!" | "*" | "'" | "(" | ")" | ","); + reserved = (";" | "/" | "?" | ":" | "@" | "&" | "=" | "+"); + unsafe = (CTL | " " | "\"" | "#" | "%" | "<" | ">"); + national = any -- (alpha | digit | reserved | extra | safe | unsafe); + unreserved = (alpha | digit | safe | extra | national); + escape = ("%" xdigit xdigit); + uchar = (unreserved | escape | "%"); + pchar = (uchar | ":" | "@" | "&" | "=" | "+" | ";"); + tspecials = ("(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\\" | "\"" | "/" | "[" | "]" | "?" | "=" | "{" | "}" | " " | "\t"); + +# elements + token = (ascii -- (CTL | tspecials)); + +# URI schemes and absolute paths + scheme = ( alpha | digit | "+" | "-" | "." )* ; + absolute_uri = (scheme ":" (uchar | reserved )*); + + path = ( pchar+ ( "/" pchar* )* ) ; + query = ( uchar | reserved )* %query_string ; + param = ( pchar | "/" )* ; + params = ( param ( ";" param )* ) ; + rel_path = ( path? %request_path ) ("?" %start_query query)?; + absolute_path = ( "/"+ rel_path ); + + Request_URI = ( "*" | absolute_uri | absolute_path ) >mark %request_uri; + Fragment = ( uchar | reserved )* >mark %fragment; + Method = ( upper | digit | safe ){1,20} >mark %request_method; + + http_number = ( digit+ "." digit+ ) ; + Server_Protocol = ( "HTTP/" http_number ) >mark %server_protocol ; + Request_Line = ( Method " " Request_URI ("#" Fragment){0,1} " " Server_Protocol CRLF ) ; + + field_name = ( token -- ":" )+ >start_field $snake_upcase_field %write_field; + + field_value = ( (any -- CTL) | "\t" )* >start_value %write_value; + + message_header = field_name ":" " "* field_value :> CRLF; + + Request = Request_Line ( message_header )* ( CRLF @done ); + +main := Request; + +}%% diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/mini_ssl.c b/vendor/cache/puma-fba741b91780/ext/puma_http11/mini_ssl.c new file mode 100644 index 000000000..dc5a77fe9 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/mini_ssl.c @@ -0,0 +1,842 @@ +#define RSTRING_NOT_MODIFIED 1 + +#include +#include +#include + +#ifdef HAVE_OPENSSL_BIO_H + +#include +#include +#include +#include +#include + +#ifndef SSL_OP_NO_COMPRESSION +#define SSL_OP_NO_COMPRESSION 0 +#endif + +typedef struct { + BIO* read; + BIO* write; + SSL* ssl; + SSL_CTX* ctx; +} ms_conn; + +typedef struct { + unsigned char* buf; + int bytes; +} ms_cert_buf; + +VALUE eError; + +NORETURN(void raise_file_error(const char* caller, const char *filename)); + +void raise_file_error(const char* caller, const char *filename) { + rb_raise(eError, "%s: error in file '%s': %s", caller, filename, ERR_error_string(ERR_get_error(), NULL)); +} + +NORETURN(void raise_param_error(const char* caller, const char *param)); + +void raise_param_error(const char* caller, const char *param) { + rb_raise(eError, "%s: error with parameter '%s': %s", caller, param, ERR_error_string(ERR_get_error(), NULL)); +} + +void engine_free(void *ptr) { + ms_conn *conn = ptr; + ms_cert_buf* cert_buf = (ms_cert_buf*)SSL_get_app_data(conn->ssl); + if(cert_buf) { + OPENSSL_free(cert_buf->buf); + free(cert_buf); + } + SSL_free(conn->ssl); + SSL_CTX_free(conn->ctx); + + free(conn); +} + +const rb_data_type_t engine_data_type = { + "MiniSSL/ENGINE", + { 0, engine_free, 0 }, + 0, 0, RUBY_TYPED_FREE_IMMEDIATELY, +}; + +#ifndef HAVE_SSL_CTX_SET_DH_AUTO +DH *get_dh2048(void) { + /* `openssl dhparam -C 2048` + * -----BEGIN DH PARAMETERS----- + * MIIBCAKCAQEAjmh1uQHdTfxOyxEbKAV30fUfzqMDF/ChPzjfyzl2jcrqQMhrk76o + * 2NPNXqxHwsddMZ1RzvU8/jl+uhRuPWjXCFZbhET4N1vrviZM3VJhV8PPHuiVOACO + * y32jFd+Szx4bo2cXSK83hJ6jRd+0asP1awWjz9/06dFkrILCXMIfQLo0D8rqmppn + * EfDDAwuudCpM9kcDmBRAm9JsKbQ6gzZWjkc5+QWSaQofojIHbjvj3xzguaCJn+oQ + * vHWM+hsAnaOgEwCyeZ3xqs+/5lwSbkE/tqJW98cEZGygBUVo9jxZRZx6KOfjpdrb + * yenO9LJr/qtyrZB31WJbqxI0m0AKTAO8UwIBAg== + * -----END DH PARAMETERS----- + */ + static unsigned char dh2048_p[] = { + 0x8E, 0x68, 0x75, 0xB9, 0x01, 0xDD, 0x4D, 0xFC, 0x4E, 0xCB, + 0x11, 0x1B, 0x28, 0x05, 0x77, 0xD1, 0xF5, 0x1F, 0xCE, 0xA3, + 0x03, 0x17, 0xF0, 0xA1, 0x3F, 0x38, 0xDF, 0xCB, 0x39, 0x76, + 0x8D, 0xCA, 0xEA, 0x40, 0xC8, 0x6B, 0x93, 0xBE, 0xA8, 0xD8, + 0xD3, 0xCD, 0x5E, 0xAC, 0x47, 0xC2, 0xC7, 0x5D, 0x31, 0x9D, + 0x51, 0xCE, 0xF5, 0x3C, 0xFE, 0x39, 0x7E, 0xBA, 0x14, 0x6E, + 0x3D, 0x68, 0xD7, 0x08, 0x56, 0x5B, 0x84, 0x44, 0xF8, 0x37, + 0x5B, 0xEB, 0xBE, 0x26, 0x4C, 0xDD, 0x52, 0x61, 0x57, 0xC3, + 0xCF, 0x1E, 0xE8, 0x95, 0x38, 0x00, 0x8E, 0xCB, 0x7D, 0xA3, + 0x15, 0xDF, 0x92, 0xCF, 0x1E, 0x1B, 0xA3, 0x67, 0x17, 0x48, + 0xAF, 0x37, 0x84, 0x9E, 0xA3, 0x45, 0xDF, 0xB4, 0x6A, 0xC3, + 0xF5, 0x6B, 0x05, 0xA3, 0xCF, 0xDF, 0xF4, 0xE9, 0xD1, 0x64, + 0xAC, 0x82, 0xC2, 0x5C, 0xC2, 0x1F, 0x40, 0xBA, 0x34, 0x0F, + 0xCA, 0xEA, 0x9A, 0x9A, 0x67, 0x11, 0xF0, 0xC3, 0x03, 0x0B, + 0xAE, 0x74, 0x2A, 0x4C, 0xF6, 0x47, 0x03, 0x98, 0x14, 0x40, + 0x9B, 0xD2, 0x6C, 0x29, 0xB4, 0x3A, 0x83, 0x36, 0x56, 0x8E, + 0x47, 0x39, 0xF9, 0x05, 0x92, 0x69, 0x0A, 0x1F, 0xA2, 0x32, + 0x07, 0x6E, 0x3B, 0xE3, 0xDF, 0x1C, 0xE0, 0xB9, 0xA0, 0x89, + 0x9F, 0xEA, 0x10, 0xBC, 0x75, 0x8C, 0xFA, 0x1B, 0x00, 0x9D, + 0xA3, 0xA0, 0x13, 0x00, 0xB2, 0x79, 0x9D, 0xF1, 0xAA, 0xCF, + 0xBF, 0xE6, 0x5C, 0x12, 0x6E, 0x41, 0x3F, 0xB6, 0xA2, 0x56, + 0xF7, 0xC7, 0x04, 0x64, 0x6C, 0xA0, 0x05, 0x45, 0x68, 0xF6, + 0x3C, 0x59, 0x45, 0x9C, 0x7A, 0x28, 0xE7, 0xE3, 0xA5, 0xDA, + 0xDB, 0xC9, 0xE9, 0xCE, 0xF4, 0xB2, 0x6B, 0xFE, 0xAB, 0x72, + 0xAD, 0x90, 0x77, 0xD5, 0x62, 0x5B, 0xAB, 0x12, 0x34, 0x9B, + 0x40, 0x0A, 0x4C, 0x03, 0xBC, 0x53 + }; + static unsigned char dh2048_g[] = { 0x02 }; + + DH *dh; +#if !(OPENSSL_VERSION_NUMBER < 0x10100005L) + BIGNUM *p, *g; +#endif + + dh = DH_new(); + +#if OPENSSL_VERSION_NUMBER < 0x10100005L + dh->p = BN_bin2bn(dh2048_p, sizeof(dh2048_p), NULL); + dh->g = BN_bin2bn(dh2048_g, sizeof(dh2048_g), NULL); + + if ((dh->p == NULL) || (dh->g == NULL)) { + DH_free(dh); + return NULL; + } +#else + p = BN_bin2bn(dh2048_p, sizeof(dh2048_p), NULL); + g = BN_bin2bn(dh2048_g, sizeof(dh2048_g), NULL); + + if (p == NULL || g == NULL || !DH_set0_pqg(dh, p, NULL, g)) { + DH_free(dh); + BN_free(p); + BN_free(g); + return NULL; + } +#endif + + return dh; +} +#endif + +static void +sslctx_free(void *ptr) { + SSL_CTX *ctx = ptr; + SSL_CTX_free(ctx); +} + +static const rb_data_type_t sslctx_type = { + "MiniSSL/SSLContext", + { + 0, sslctx_free, + }, + 0, 0, RUBY_TYPED_FREE_IMMEDIATELY, +}; + +ms_conn* engine_alloc(VALUE klass, VALUE* obj) { + ms_conn* conn; + + *obj = TypedData_Make_Struct(klass, ms_conn, &engine_data_type, conn); + + conn->read = BIO_new(BIO_s_mem()); + BIO_set_nbio(conn->read, 1); + + conn->write = BIO_new(BIO_s_mem()); + BIO_set_nbio(conn->write, 1); + + conn->ssl = 0; + conn->ctx = 0; + + return conn; +} + +static int engine_verify_callback(int preverify_ok, X509_STORE_CTX* ctx) { + X509* err_cert; + SSL* ssl; + int bytes; + unsigned char* buf = NULL; + + if(!preverify_ok) { + err_cert = X509_STORE_CTX_get_current_cert(ctx); + if(err_cert) { + /* + * Save the failed certificate for inspection/logging. + */ + bytes = i2d_X509(err_cert, &buf); + if(bytes > 0) { + ms_cert_buf* cert_buf = (ms_cert_buf*)malloc(sizeof(ms_cert_buf)); + cert_buf->buf = buf; + cert_buf->bytes = bytes; + ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); + SSL_set_app_data(ssl, cert_buf); + } + } + } + + return preverify_ok; +} + +static int password_callback(char *buf, int size, int rwflag, void *userdata) { + const char *password = (const char *) userdata; + size_t len = strlen(password); + + if (len > (size_t) size) { + return 0; + } + + memcpy(buf, password, len); + return (int) len; +} + +static VALUE +sslctx_alloc(VALUE klass) { + SSL_CTX *ctx; + long mode = 0 | + SSL_MODE_ENABLE_PARTIAL_WRITE | + SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER | + SSL_MODE_RELEASE_BUFFERS; + +#ifdef HAVE_TLS_SERVER_METHOD + ctx = SSL_CTX_new(TLS_method()); + // printf("\nctx using TLS_method security_level %d\n", SSL_CTX_get_security_level(ctx)); +#else + ctx = SSL_CTX_new(SSLv23_method()); +#endif + if (!ctx) { + rb_raise(eError, "SSL_CTX_new"); + } + SSL_CTX_set_mode(ctx, mode); + + return TypedData_Wrap_Struct(klass, &sslctx_type, ctx); +} + +VALUE +sslctx_initialize(VALUE self, VALUE mini_ssl_ctx) { + SSL_CTX* ctx; + int ssl_options; + VALUE key, cert, ca, verify_mode, ssl_cipher_filter, ssl_ciphersuites, no_tlsv1, no_tlsv1_1, + verification_flags, session_id_bytes, cert_pem, key_pem, key_password_command, key_password; + BIO *bio; + X509 *x509 = NULL; + EVP_PKEY *pkey; + pem_password_cb *password_cb = NULL; + const char *password = NULL; +#ifdef HAVE_SSL_CTX_SET_MIN_PROTO_VERSION + int min; +#endif +#ifndef HAVE_SSL_CTX_SET_DH_AUTO + DH *dh; +#endif +#if OPENSSL_VERSION_NUMBER < 0x10002000L + EC_KEY *ecdh; +#endif +#ifdef HAVE_SSL_CTX_SET_SESSION_CACHE_MODE + VALUE reuse, reuse_cache_size, reuse_timeout; + + reuse = rb_funcall(mini_ssl_ctx, rb_intern_const("reuse"), 0); + reuse_cache_size = rb_funcall(mini_ssl_ctx, rb_intern_const("reuse_cache_size"), 0); + reuse_timeout = rb_funcall(mini_ssl_ctx, rb_intern_const("reuse_timeout"), 0); +#endif + + key = rb_funcall(mini_ssl_ctx, rb_intern_const("key"), 0); + + key_password_command = rb_funcall(mini_ssl_ctx, rb_intern_const("key_password_command"), 0); + + cert = rb_funcall(mini_ssl_ctx, rb_intern_const("cert"), 0); + + ca = rb_funcall(mini_ssl_ctx, rb_intern_const("ca"), 0); + + cert_pem = rb_funcall(mini_ssl_ctx, rb_intern_const("cert_pem"), 0); + + key_pem = rb_funcall(mini_ssl_ctx, rb_intern_const("key_pem"), 0); + + verify_mode = rb_funcall(mini_ssl_ctx, rb_intern_const("verify_mode"), 0); + + ssl_cipher_filter = rb_funcall(mini_ssl_ctx, rb_intern_const("ssl_cipher_filter"), 0); + + ssl_ciphersuites = rb_funcall(mini_ssl_ctx, rb_intern_const("ssl_ciphersuites"), 0); + + no_tlsv1 = rb_funcall(mini_ssl_ctx, rb_intern_const("no_tlsv1"), 0); + + no_tlsv1_1 = rb_funcall(mini_ssl_ctx, rb_intern_const("no_tlsv1_1"), 0); + + TypedData_Get_Struct(self, SSL_CTX, &sslctx_type, ctx); + + if (!NIL_P(cert)) { + StringValue(cert); + + if (SSL_CTX_use_certificate_chain_file(ctx, RSTRING_PTR(cert)) != 1) { + raise_file_error("SSL_CTX_use_certificate_chain_file", RSTRING_PTR(cert)); + } + } + + if (!NIL_P(key_password_command)) { + key_password = rb_funcall(mini_ssl_ctx, rb_intern_const("key_password"), 0); + + if (!NIL_P(key_password)) { + StringValue(key_password); + password_cb = password_callback; + password = RSTRING_PTR(key_password); + SSL_CTX_set_default_passwd_cb(ctx, password_cb); + SSL_CTX_set_default_passwd_cb_userdata(ctx, (void *) password); + } + } + + if (!NIL_P(key)) { + StringValue(key); + + if (SSL_CTX_use_PrivateKey_file(ctx, RSTRING_PTR(key), SSL_FILETYPE_PEM) != 1) { + raise_file_error("SSL_CTX_use_PrivateKey_file", RSTRING_PTR(key)); + } + } + + if (!NIL_P(cert_pem)) { + X509 *ca = NULL; + unsigned long err; + + bio = BIO_new(BIO_s_mem()); + BIO_puts(bio, RSTRING_PTR(cert_pem)); + + /** + * Much of this pulled as a simplified version of the `use_certificate_chain_file` method + * from openssl's `ssl_rsa.c` file. + */ + + /* first read the cert as the first item in the pem file */ + x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL); + if (NULL == x509) { + BIO_free_all(bio); + raise_param_error("PEM_read_bio_X509", "cert_pem"); + } + + /* Add the cert to the context */ + /* 1 is success - otherwise check the error codes */ + if (1 != SSL_CTX_use_certificate(ctx, x509)) { + BIO_free_all(bio); + raise_param_error("SSL_CTX_use_certificate", "cert_pem"); + } + + X509_free(x509); /* no longer need our reference */ + + /* Now lets load up the rest of the certificate chain */ + /* 1 is success 0 is error */ + if (0 == SSL_CTX_clear_chain_certs(ctx)) { + BIO_free_all(bio); + raise_param_error("SSL_CTX_clear_chain_certs","cert_pem"); + } + + while (1) { + ca = PEM_read_bio_X509(bio, NULL, NULL, NULL); + + if (NULL == ca) { + break; + } + + if (0 == SSL_CTX_add0_chain_cert(ctx, ca)) { + BIO_free_all(bio); + raise_param_error("SSL_CTX_add0_chain_cert","cert_pem"); + } + /* don't free ca - its now owned by the context */ + } + + /* ca is NULL - so its either the end of the file or an error */ + err = ERR_peek_last_error(); + + /* If its the end of the file - then we are done, in any case free the bio */ + BIO_free_all(bio); + + if ((ERR_GET_LIB(err) == ERR_LIB_PEM) && (ERR_GET_REASON(err) == PEM_R_NO_START_LINE)) { + ERR_clear_error(); + } else { + raise_param_error("PEM_read_bio_X509","cert_pem"); + } + } + + if (!NIL_P(key_pem)) { + bio = BIO_new(BIO_s_mem()); + BIO_puts(bio, RSTRING_PTR(key_pem)); + pkey = PEM_read_bio_PrivateKey(bio, NULL, password_cb, (void *) password); + + if (SSL_CTX_use_PrivateKey(ctx, pkey) != 1) { + BIO_free(bio); + raise_file_error("SSL_CTX_use_PrivateKey", RSTRING_PTR(key_pem)); + } + EVP_PKEY_free(pkey); + BIO_free(bio); + } + + verification_flags = rb_funcall(mini_ssl_ctx, rb_intern_const("verification_flags"), 0); + + if (!NIL_P(verification_flags)) { + X509_VERIFY_PARAM *param = SSL_CTX_get0_param(ctx); + X509_VERIFY_PARAM_set_flags(param, NUM2INT(verification_flags)); + SSL_CTX_set1_param(ctx, param); + } + + if (!NIL_P(ca)) { + StringValue(ca); + if (SSL_CTX_load_verify_locations(ctx, RSTRING_PTR(ca), NULL) != 1) { + raise_file_error("SSL_CTX_load_verify_locations", RSTRING_PTR(ca)); + } + } + + ssl_options = SSL_OP_CIPHER_SERVER_PREFERENCE | SSL_OP_SINGLE_ECDH_USE | SSL_OP_NO_COMPRESSION; + +#ifdef HAVE_SSL_CTX_SET_MIN_PROTO_VERSION + if (RTEST(no_tlsv1_1)) { + min = TLS1_2_VERSION; + } + else if (RTEST(no_tlsv1)) { + min = TLS1_1_VERSION; + } + else { + min = TLS1_VERSION; + } + + SSL_CTX_set_min_proto_version(ctx, min); + +#else + /* As of 1.0.2f, SSL_OP_SINGLE_DH_USE key use is always on */ + ssl_options |= SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_SINGLE_DH_USE; + + if (RTEST(no_tlsv1)) { + ssl_options |= SSL_OP_NO_TLSv1; + } + if(RTEST(no_tlsv1_1)) { + ssl_options |= SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1; + } +#endif + +#ifdef HAVE_SSL_CTX_SET_SESSION_CACHE_MODE + if (!NIL_P(reuse)) { + SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_SERVER); + if (!NIL_P(reuse_cache_size)) { + SSL_CTX_sess_set_cache_size(ctx, NUM2INT(reuse_cache_size)); + } + if (!NIL_P(reuse_timeout)) { + SSL_CTX_set_timeout(ctx, NUM2INT(reuse_timeout)); + } + } else { + SSL_CTX_set_session_cache_mode(ctx, SSL_SESS_CACHE_OFF); + } +#endif + + SSL_CTX_set_options(ctx, ssl_options); + + if (!NIL_P(ssl_cipher_filter)) { + StringValue(ssl_cipher_filter); + SSL_CTX_set_cipher_list(ctx, RSTRING_PTR(ssl_cipher_filter)); + } + else { + SSL_CTX_set_cipher_list(ctx, "HIGH:!aNULL@STRENGTH"); + } + +#if HAVE_SSL_CTX_SET_CIPHERSUITES + // Only override OpenSSL default ciphersuites if config option is supplied. + if (!NIL_P(ssl_ciphersuites)) { + StringValue(ssl_ciphersuites); + SSL_CTX_set_ciphersuites(ctx, RSTRING_PTR(ssl_ciphersuites)); + } +#endif + +#if OPENSSL_VERSION_NUMBER < 0x10002000L + // Remove this case if OpenSSL 1.0.1 (now EOL) support is no longer needed. + ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + if (ecdh) { + SSL_CTX_set_tmp_ecdh(ctx, ecdh); + EC_KEY_free(ecdh); + } +#elif OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER) + SSL_CTX_set_ecdh_auto(ctx, 1); +#endif + + if (NIL_P(verify_mode)) { + /* SSL_CTX_set_verify(ctx, SSL_VERIFY_NONE, NULL); */ + } else { + SSL_CTX_set_verify(ctx, NUM2INT(verify_mode), engine_verify_callback); + } + + // Random.bytes available in Ruby 2.5 and later, Random::DEFAULT deprecated in 3.0 + session_id_bytes = rb_funcall( +#ifdef HAVE_RANDOM_BYTES + rb_cRandom, +#else + rb_const_get(rb_cRandom, rb_intern_const("DEFAULT")), +#endif + rb_intern_const("bytes"), + 1, ULL2NUM(SSL_MAX_SSL_SESSION_ID_LENGTH)); + + SSL_CTX_set_session_id_context(ctx, + (unsigned char *) RSTRING_PTR(session_id_bytes), + SSL_MAX_SSL_SESSION_ID_LENGTH); + + // printf("\ninitialize end security_level %d\n", SSL_CTX_get_security_level(ctx)); + +#ifdef HAVE_SSL_CTX_SET_DH_AUTO + // https://www.openssl.org/docs/man3.0/man3/SSL_CTX_set_dh_auto.html + SSL_CTX_set_dh_auto(ctx, 1); +#else + dh = get_dh2048(); + SSL_CTX_set_tmp_dh(ctx, dh); +#endif + + rb_obj_freeze(self); + return self; +} + +VALUE engine_init_server(VALUE self, VALUE sslctx) { + ms_conn* conn; + VALUE obj; + SSL_CTX* ctx; + SSL* ssl; + + conn = engine_alloc(self, &obj); + + TypedData_Get_Struct(sslctx, SSL_CTX, &sslctx_type, ctx); + + ssl = SSL_new(ctx); + conn->ssl = ssl; + SSL_set_app_data(ssl, NULL); + SSL_set_bio(ssl, conn->read, conn->write); + SSL_set_accept_state(ssl); + return obj; +} + +VALUE engine_init_client(VALUE klass) { + VALUE obj; + ms_conn* conn = engine_alloc(klass, &obj); +#ifdef HAVE_DTLS_METHOD + conn->ctx = SSL_CTX_new(DTLS_method()); +#else + conn->ctx = SSL_CTX_new(DTLSv1_method()); +#endif + conn->ssl = SSL_new(conn->ctx); + SSL_set_app_data(conn->ssl, NULL); + SSL_set_verify(conn->ssl, SSL_VERIFY_NONE, NULL); + + SSL_set_bio(conn->ssl, conn->read, conn->write); + + SSL_set_connect_state(conn->ssl); + return obj; +} + +VALUE engine_inject(VALUE self, VALUE str) { + ms_conn* conn; + long used; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + + StringValue(str); + + used = BIO_write(conn->read, RSTRING_PTR(str), (int)RSTRING_LEN(str)); + + if(used == 0 || used == -1) { + return Qfalse; + } + + return INT2FIX(used); +} + +NORETURN(void raise_error(SSL* ssl, int result)); + +void raise_error(SSL* ssl, int result) { + char buf[512]; + char msg[768]; + const char* err_str; + int err = errno; + int mask = 4095; + int ssl_err = SSL_get_error(ssl, result); + int verify_err = (int) SSL_get_verify_result(ssl); + + if(SSL_ERROR_SYSCALL == ssl_err) { + snprintf(msg, sizeof(msg), "System error: %s - %d", strerror(err), err); + + } else if(SSL_ERROR_SSL == ssl_err) { + if(X509_V_OK != verify_err) { + err_str = X509_verify_cert_error_string(verify_err); + snprintf(msg, sizeof(msg), + "OpenSSL certificate verification error: %s - %d", + err_str, verify_err); + + } else { + err = (int) ERR_get_error(); + ERR_error_string_n(err, buf, sizeof(buf)); + snprintf(msg, sizeof(msg), "OpenSSL error: %s - %d", buf, err & mask); + } + } else { + snprintf(msg, sizeof(msg), "Unknown OpenSSL error: %d", ssl_err); + } + + ERR_clear_error(); + rb_raise(eError, "%s", msg); +} + +VALUE engine_read(VALUE self) { + ms_conn* conn; + char buf[512]; + int bytes, error; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + + ERR_clear_error(); + + bytes = SSL_read(conn->ssl, (void*)buf, sizeof(buf)); + + if(bytes > 0) { + return rb_str_new(buf, bytes); + } + + if(SSL_want_read(conn->ssl)) return Qnil; + + error = SSL_get_error(conn->ssl, bytes); + + if(error == SSL_ERROR_ZERO_RETURN) { + rb_eof_error(); + } else { + raise_error(conn->ssl, bytes); + } + + return Qnil; +} + +VALUE engine_write(VALUE self, VALUE str) { + ms_conn* conn; + int bytes; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + + StringValue(str); + + ERR_clear_error(); + + bytes = SSL_write(conn->ssl, (void*)RSTRING_PTR(str), (int)RSTRING_LEN(str)); + if(bytes > 0) { + return INT2FIX(bytes); + } + + if(SSL_want_write(conn->ssl)) return Qnil; + + raise_error(conn->ssl, bytes); + + return Qnil; +} + +VALUE engine_extract(VALUE self) { + ms_conn* conn; + int bytes; + size_t pending; + // https://www.openssl.org/docs/manmaster/man3/BIO_f_buffer.html + // crypto/bio/bf_buff.c DEFAULT_BUFFER_SIZE + char buf[4096]; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + + pending = BIO_pending(conn->write); + if(pending > 0) { + bytes = BIO_read(conn->write, buf, sizeof(buf)); + if(bytes > 0) { + return rb_str_new(buf, bytes); + } else if(!BIO_should_retry(conn->write)) { + raise_error(conn->ssl, bytes); + } + } + + return Qnil; +} + +VALUE engine_shutdown(VALUE self) { + ms_conn* conn; + int ok; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + + ERR_clear_error(); + + ok = SSL_shutdown(conn->ssl); + if (ok == 0) { + return Qfalse; + } + + return Qtrue; +} + +VALUE engine_init(VALUE self) { + ms_conn* conn; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + + return SSL_in_init(conn->ssl) ? Qtrue : Qfalse; +} + +VALUE engine_peercert(VALUE self) { + ms_conn* conn; + X509* cert; + int bytes; + unsigned char* buf = NULL; + ms_cert_buf* cert_buf = NULL; + VALUE rb_cert_buf; + + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + +#ifdef HAVE_SSL_GET1_PEER_CERTIFICATE + cert = SSL_get1_peer_certificate(conn->ssl); +#else + cert = SSL_get_peer_certificate(conn->ssl); +#endif + if(!cert) { + /* + * See if there was a failed certificate associated with this client. + */ + cert_buf = (ms_cert_buf*)SSL_get_app_data(conn->ssl); + if(!cert_buf) { + return Qnil; + } + buf = cert_buf->buf; + bytes = cert_buf->bytes; + + } else { + bytes = i2d_X509(cert, &buf); + X509_free(cert); + + if(bytes < 0) { + return Qnil; + } + } + + rb_cert_buf = rb_str_new((const char*)(buf), bytes); + if(!cert_buf) { + OPENSSL_free(buf); + } + + return rb_cert_buf; +} + +/* @see Puma::MiniSSL::Socket#ssl_version_state + * @version 5.0.0 + */ +static VALUE +engine_ssl_vers_st(VALUE self) { + ms_conn* conn; + TypedData_Get_Struct(self, ms_conn, &engine_data_type, conn); + return rb_ary_new3(2, rb_str_new2(SSL_get_version(conn->ssl)), rb_str_new2(SSL_state_string(conn->ssl))); +} + +VALUE noop(VALUE self) { + return Qnil; +} + +void Init_mini_ssl(VALUE puma) { + VALUE mod, eng, sslctx; + +/* Fake operation for documentation (RDoc, YARD) */ +#if 0 == 1 + puma = rb_define_module("Puma"); +#endif + + SSL_library_init(); + OpenSSL_add_ssl_algorithms(); + SSL_load_error_strings(); + ERR_load_crypto_strings(); + + mod = rb_define_module_under(puma, "MiniSSL"); + + eng = rb_define_class_under(mod, "Engine", rb_cObject); + rb_undef_alloc_func(eng); + + sslctx = rb_define_class_under(mod, "SSLContext", rb_cObject); + rb_define_alloc_func(sslctx, sslctx_alloc); + rb_define_method(sslctx, "initialize", sslctx_initialize, 1); + rb_undef_method(sslctx, "initialize_copy"); + + + // OpenSSL Build / Runtime/Load versions + + /* Version of OpenSSL that Puma was compiled with */ + rb_define_const(mod, "OPENSSL_VERSION", rb_str_new2(OPENSSL_VERSION_TEXT)); + +#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x10100000 + /* Version of OpenSSL that Puma loaded with */ + rb_define_const(mod, "OPENSSL_LIBRARY_VERSION", rb_str_new2(OpenSSL_version(OPENSSL_VERSION))); +#else + rb_define_const(mod, "OPENSSL_LIBRARY_VERSION", rb_str_new2(SSLeay_version(SSLEAY_VERSION))); +#endif + +#if defined(OPENSSL_NO_SSL3) || defined(OPENSSL_NO_SSL3_METHOD) + /* True if SSL3 is not available */ + rb_define_const(mod, "OPENSSL_NO_SSL3", Qtrue); +#else + rb_define_const(mod, "OPENSSL_NO_SSL3", Qfalse); +#endif + +#if defined(OPENSSL_NO_TLS1) || defined(OPENSSL_NO_TLS1_METHOD) + /* True if TLS1 is not available */ + rb_define_const(mod, "OPENSSL_NO_TLS1", Qtrue); +#else + rb_define_const(mod, "OPENSSL_NO_TLS1", Qfalse); +#endif + +#if defined(OPENSSL_NO_TLS1_1) || defined(OPENSSL_NO_TLS1_1_METHOD) + /* True if TLS1_1 is not available */ + rb_define_const(mod, "OPENSSL_NO_TLS1_1", Qtrue); +#else + rb_define_const(mod, "OPENSSL_NO_TLS1_1", Qfalse); +#endif + + rb_define_singleton_method(mod, "check", noop, 0); + + eError = rb_define_class_under(mod, "SSLError", rb_eStandardError); + + rb_define_singleton_method(eng, "server", engine_init_server, 1); + rb_define_singleton_method(eng, "client", engine_init_client, 0); + + rb_define_method(eng, "inject", engine_inject, 1); + rb_define_method(eng, "read", engine_read, 0); + + rb_define_method(eng, "write", engine_write, 1); + rb_define_method(eng, "extract", engine_extract, 0); + + rb_define_method(eng, "shutdown", engine_shutdown, 0); + + rb_define_method(eng, "init?", engine_init, 0); + + /* @!attribute [r] peercert + * Returns `nil` when `MiniSSL::Context#verify_mode` is set to `VERIFY_NONE`. + * @return [String, nil] DER encoded cert + */ + rb_define_method(eng, "peercert", engine_peercert, 0); + + rb_define_method(eng, "ssl_vers_st", engine_ssl_vers_st, 0); +} + +#else + +NORETURN(VALUE raise_error(VALUE self)); + +VALUE raise_error(VALUE self) { + rb_raise(rb_eStandardError, "SSL not available in this build"); +} + +void Init_mini_ssl(VALUE puma) { + VALUE mod; + + mod = rb_define_module_under(puma, "MiniSSL"); + rb_define_class_under(mod, "SSLError", rb_eStandardError); + + rb_define_singleton_method(mod, "check", raise_error, 0); +} +#endif diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/no_ssl/PumaHttp11Service.java b/vendor/cache/puma-fba741b91780/ext/puma_http11/no_ssl/PumaHttp11Service.java new file mode 100644 index 000000000..5701e83f6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/no_ssl/PumaHttp11Service.java @@ -0,0 +1,15 @@ +package puma; + +import java.io.IOException; + +import org.jruby.Ruby; +import org.jruby.runtime.load.BasicLibraryService; + +import org.jruby.puma.Http11; + +public class PumaHttp11Service implements BasicLibraryService { + public boolean basicLoad(final Ruby runtime) throws IOException { + Http11.createHttp11(runtime); + return true; + } +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/Http11.java b/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/Http11.java new file mode 100644 index 000000000..1521e8439 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/Http11.java @@ -0,0 +1,249 @@ +package org.jruby.puma; + +import org.jruby.Ruby; +import org.jruby.RubyClass; +import org.jruby.RubyHash; +import org.jruby.RubyModule; +import org.jruby.RubyNumeric; +import org.jruby.RubyObject; +import org.jruby.RubyString; + +import org.jruby.anno.JRubyMethod; + +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.builtin.IRubyObject; + +import org.jruby.exceptions.RaiseException; + +import org.jruby.util.ByteList; + +/** + * @author Ola Bini + * @author Charles Oliver Nutter + */ +public class Http11 extends RubyObject { + public final static int MAX_FIELD_NAME_LENGTH = 256; + public final static String MAX_FIELD_NAME_LENGTH_ERR = "HTTP element FIELD_NAME is longer than the 256 allowed length."; + public final static int MAX_FIELD_VALUE_LENGTH = 80 * 1024; + public final static String MAX_FIELD_VALUE_LENGTH_ERR = "HTTP element FIELD_VALUE is longer than the 81920 allowed length."; + public final static int MAX_REQUEST_URI_LENGTH = getConstLength("PUMA_REQUEST_URI_MAX_LENGTH", 1024 * 12); + public final static String MAX_REQUEST_URI_LENGTH_ERR = "HTTP element REQUEST_URI is longer than the " + MAX_REQUEST_URI_LENGTH + " allowed length."; + public final static int MAX_FRAGMENT_LENGTH = 1024; + public final static String MAX_FRAGMENT_LENGTH_ERR = "HTTP element REQUEST_PATH is longer than the 1024 allowed length."; + public final static int MAX_REQUEST_PATH_LENGTH = getConstLength("PUMA_REQUEST_PATH_MAX_LENGTH", 8192); + public final static String MAX_REQUEST_PATH_LENGTH_ERR = "HTTP element REQUEST_PATH is longer than the " + MAX_REQUEST_PATH_LENGTH + " allowed length."; + public final static int MAX_QUERY_STRING_LENGTH = getConstLength("PUMA_QUERY_STRING_MAX_LENGTH", 10 * 1024); + public final static String MAX_QUERY_STRING_LENGTH_ERR = "HTTP element QUERY_STRING is longer than the " + MAX_QUERY_STRING_LENGTH +" allowed length."; + public final static int MAX_HEADER_LENGTH = 1024 * (80 + 32); + public final static String MAX_HEADER_LENGTH_ERR = "HTTP element HEADER is longer than the 114688 allowed length."; + + public static final ByteList CONTENT_TYPE_BYTELIST = new ByteList(ByteList.plain("CONTENT_TYPE")); + public static final ByteList CONTENT_LENGTH_BYTELIST = new ByteList(ByteList.plain("CONTENT_LENGTH")); + public static final ByteList HTTP_PREFIX_BYTELIST = new ByteList(ByteList.plain("HTTP_")); + public static final ByteList COMMA_SPACE_BYTELIST = new ByteList(ByteList.plain(", ")); + public static final ByteList REQUEST_METHOD_BYTELIST = new ByteList(ByteList.plain("REQUEST_METHOD")); + public static final ByteList REQUEST_URI_BYTELIST = new ByteList(ByteList.plain("REQUEST_URI")); + public static final ByteList FRAGMENT_BYTELIST = new ByteList(ByteList.plain("FRAGMENT")); + public static final ByteList REQUEST_PATH_BYTELIST = new ByteList(ByteList.plain("REQUEST_PATH")); + public static final ByteList QUERY_STRING_BYTELIST = new ByteList(ByteList.plain("QUERY_STRING")); + public static final ByteList SERVER_PROTOCOL_BYTELIST = new ByteList(ByteList.plain("SERVER_PROTOCOL")); + + public static String getEnvOrProperty(String name) { + String envValue = System.getenv(name); + return (envValue != null) ? envValue : System.getProperty(name); + } + + public static int getConstLength(String name, Integer defaultValue) { + String stringValue = getEnvOrProperty(name); + if (stringValue == null || stringValue.isEmpty()) return defaultValue; + + try { + int value = Integer.parseUnsignedInt(stringValue); + if (value <= 0) { + throw new NumberFormatException("The number is not positive."); + } + return value; + } catch (NumberFormatException e) { + System.err.println(String.format("The value %s for %s is invalid. Using default value %d instead.", stringValue, name, defaultValue)); + return defaultValue; + } + } + + private static ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klass) { + return new Http11(runtime, klass); + } + }; + + public static void createHttp11(Ruby runtime) { + RubyModule mPuma = runtime.defineModule("Puma"); + mPuma.defineClassUnder("HttpParserError",runtime.getClass("IOError"),runtime.getClass("IOError").getAllocator()); + + RubyClass cHttpParser = mPuma.defineClassUnder("HttpParser",runtime.getObject(),ALLOCATOR); + cHttpParser.defineAnnotatedMethods(Http11.class); + } + + private Ruby runtime; + private Http11Parser hp; + private RubyString body; + + public Http11(Ruby runtime, RubyClass clazz) { + super(runtime,clazz); + this.runtime = runtime; + this.hp = new Http11Parser(); + this.hp.parser.init(); + } + + public static void validateMaxLength(Ruby runtime, int len, int max, String msg) { + if(len>max) { + throw newHTTPParserError(runtime, msg); + } + } + + private static RaiseException newHTTPParserError(Ruby runtime, String msg) { + return runtime.newRaiseException(getHTTPParserError(runtime), msg); + } + + private static RubyClass getHTTPParserError(Ruby runtime) { + // Cheaper to look this up lazily than cache eagerly and consume a field, since it's rarely encountered + return (RubyClass)runtime.getModule("Puma").getConstant("HttpParserError"); + } + + public static void http_field(Ruby runtime, RubyHash req, ByteList buffer, int field, int flen, int value, int vlen) { + RubyString f; + IRubyObject v; + validateMaxLength(runtime, flen, MAX_FIELD_NAME_LENGTH, MAX_FIELD_NAME_LENGTH_ERR); + validateMaxLength(runtime, vlen, MAX_FIELD_VALUE_LENGTH, MAX_FIELD_VALUE_LENGTH_ERR); + + ByteList b = new ByteList(buffer,field,flen); + for(int i = 0,j = b.length();i 0 && Character.isWhitespace(buffer.get(value + vlen - 1))) vlen--; + + if (b.equals(CONTENT_LENGTH_BYTELIST) || b.equals(CONTENT_TYPE_BYTELIST)) { + f = RubyString.newString(runtime, b); + } else { + f = RubyString.newStringShared(runtime, HTTP_PREFIX_BYTELIST); + f.cat(b); + } + + b = new ByteList(buffer, value, vlen); + v = req.fastARef(f); + if (v == null || v.isNil()) { + req.fastASet(f, RubyString.newString(runtime, b)); + } else { + RubyString vs = v.convertToString(); + vs.cat(COMMA_SPACE_BYTELIST); + vs.cat(b); + } + } + + public static void request_method(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + RubyString val = RubyString.newString(runtime,new ByteList(buffer,at,length)); + req.fastASet(RubyString.newStringShared(runtime, REQUEST_METHOD_BYTELIST),val); + } + + public static void request_uri(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + validateMaxLength(runtime, length, MAX_REQUEST_URI_LENGTH, MAX_REQUEST_URI_LENGTH_ERR); + RubyString val = RubyString.newString(runtime,new ByteList(buffer,at,length)); + req.fastASet(RubyString.newStringShared(runtime, REQUEST_URI_BYTELIST),val); + } + + public static void fragment(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + validateMaxLength(runtime, length, MAX_FRAGMENT_LENGTH, MAX_FRAGMENT_LENGTH_ERR); + RubyString val = RubyString.newString(runtime,new ByteList(buffer,at,length)); + req.fastASet(RubyString.newStringShared(runtime, FRAGMENT_BYTELIST),val); + } + + public static void request_path(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + validateMaxLength(runtime, length, MAX_REQUEST_PATH_LENGTH, MAX_REQUEST_PATH_LENGTH_ERR); + RubyString val = RubyString.newString(runtime,new ByteList(buffer,at,length)); + req.fastASet(RubyString.newStringShared(runtime, REQUEST_PATH_BYTELIST),val); + } + + public static void query_string(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + validateMaxLength(runtime, length, MAX_QUERY_STRING_LENGTH, MAX_QUERY_STRING_LENGTH_ERR); + RubyString val = RubyString.newString(runtime,new ByteList(buffer,at,length)); + req.fastASet(RubyString.newStringShared(runtime, QUERY_STRING_BYTELIST),val); + } + + public static void server_protocol(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + RubyString val = RubyString.newString(runtime,new ByteList(buffer,at,length)); + req.fastASet(RubyString.newStringShared(runtime, SERVER_PROTOCOL_BYTELIST),val); + } + + public void header_done(Ruby runtime, RubyHash req, ByteList buffer, int at, int length) { + body = RubyString.newStringShared(runtime, new ByteList(buffer, at, length)); + } + + @JRubyMethod + public IRubyObject initialize() { + this.hp.parser.init(); + return this; + } + + @JRubyMethod + public IRubyObject reset() { + this.hp.parser.init(); + return runtime.getNil(); + } + + @JRubyMethod + public IRubyObject finish() { + this.hp.finish(); + return this.hp.is_finished() ? runtime.getTrue() : runtime.getFalse(); + } + + @JRubyMethod + public IRubyObject execute(IRubyObject req_hash, IRubyObject data, IRubyObject start) { + int from = RubyNumeric.fix2int(start); + ByteList d = ((RubyString)data).getByteList(); + if(from >= d.length()) { + throw newHTTPParserError(runtime, "Requested start is after data buffer end."); + } else { + Http11Parser hp = this.hp; + Http11Parser.HttpParser parser = hp.parser; + + parser.data = (RubyHash) req_hash; + + hp.execute(runtime, this, d,from); + + validateMaxLength(runtime, parser.nread,MAX_HEADER_LENGTH, MAX_HEADER_LENGTH_ERR); + + if(hp.has_error()) { + throw newHTTPParserError(runtime, "Invalid HTTP format, parsing fails. Are you trying to open an SSL connection to a non-SSL Puma?"); + } else { + return runtime.newFixnum(parser.nread); + } + } + } + + @JRubyMethod(name = "error?") + public IRubyObject has_error() { + return this.hp.has_error() ? runtime.getTrue() : runtime.getFalse(); + } + + @JRubyMethod(name = "finished?") + public IRubyObject is_finished() { + return this.hp.is_finished() ? runtime.getTrue() : runtime.getFalse(); + } + + @JRubyMethod + public IRubyObject nread() { + return runtime.newFixnum(this.hp.parser.nread); + } + + @JRubyMethod + public IRubyObject body() { + return body; + } +}// Http11 diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/Http11Parser.java b/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/Http11Parser.java new file mode 100644 index 000000000..d5a681583 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/Http11Parser.java @@ -0,0 +1,455 @@ + +// line 1 "ext/puma_http11/http11_parser.java.rl" +package org.jruby.puma; + +import org.jruby.Ruby; +import org.jruby.RubyHash; +import org.jruby.util.ByteList; + +public class Http11Parser { + +/** Machine **/ + + +// line 58 "ext/puma_http11/http11_parser.java.rl" + + +/** Data **/ + +// line 20 "ext/puma_http11/org/jruby/puma/Http11Parser.java" +private static byte[] init__puma_parser_actions_0() +{ + return new byte [] { + 0, 1, 0, 1, 2, 1, 3, 1, 4, 1, 5, 1, + 6, 1, 7, 1, 8, 1, 9, 1, 11, 1, 12, 1, + 13, 2, 0, 8, 2, 1, 2, 2, 4, 5, 2, 10, + 7, 2, 12, 7, 3, 9, 10, 7 + }; +} + +private static final byte _puma_parser_actions[] = init__puma_parser_actions_0(); + + +private static short[] init__puma_parser_key_offsets_0() +{ + return new short [] { + 0, 0, 8, 17, 27, 29, 30, 31, 32, 33, 34, 36, + 39, 41, 44, 45, 61, 62, 78, 85, 91, 99, 107, 117, + 125, 134, 142, 150, 159, 168, 177, 186, 195, 204, 213, 222, + 231, 240, 249, 258, 267, 276, 285, 294, 303, 312, 313 + }; +} + +private static final short _puma_parser_key_offsets[] = init__puma_parser_key_offsets_0(); + + +private static char[] init__puma_parser_trans_keys_0() +{ + return new char [] { + 36, 95, 45, 46, 48, 57, 65, 90, 32, 36, 95, 45, + 46, 48, 57, 65, 90, 42, 43, 47, 58, 45, 57, 65, + 90, 97, 122, 32, 35, 72, 84, 84, 80, 47, 48, 57, + 46, 48, 57, 48, 57, 13, 48, 57, 10, 13, 33, 124, + 126, 35, 39, 42, 43, 45, 46, 48, 57, 65, 90, 94, + 122, 10, 33, 58, 124, 126, 35, 39, 42, 43, 45, 46, + 48, 57, 65, 90, 94, 122, 13, 32, 127, 0, 8, 10, + 31, 13, 127, 0, 8, 10, 31, 32, 60, 62, 127, 0, + 31, 34, 35, 32, 60, 62, 127, 0, 31, 34, 35, 43, + 58, 45, 46, 48, 57, 65, 90, 97, 122, 32, 34, 35, + 60, 62, 127, 0, 31, 32, 34, 35, 60, 62, 63, 127, + 0, 31, 32, 34, 35, 60, 62, 127, 0, 31, 32, 34, + 35, 60, 62, 127, 0, 31, 32, 36, 95, 45, 46, 48, + 57, 65, 90, 32, 36, 95, 45, 46, 48, 57, 65, 90, + 32, 36, 95, 45, 46, 48, 57, 65, 90, 32, 36, 95, + 45, 46, 48, 57, 65, 90, 32, 36, 95, 45, 46, 48, + 57, 65, 90, 32, 36, 95, 45, 46, 48, 57, 65, 90, + 32, 36, 95, 45, 46, 48, 57, 65, 90, 32, 36, 95, + 45, 46, 48, 57, 65, 90, 32, 36, 95, 45, 46, 48, + 57, 65, 90, 32, 36, 95, 45, 46, 48, 57, 65, 90, + 32, 36, 95, 45, 46, 48, 57, 65, 90, 32, 36, 95, + 45, 46, 48, 57, 65, 90, 32, 36, 95, 45, 46, 48, + 57, 65, 90, 32, 36, 95, 45, 46, 48, 57, 65, 90, + 32, 36, 95, 45, 46, 48, 57, 65, 90, 32, 36, 95, + 45, 46, 48, 57, 65, 90, 32, 36, 95, 45, 46, 48, + 57, 65, 90, 32, 36, 95, 45, 46, 48, 57, 65, 90, + 32, 0 + }; +} + +private static final char _puma_parser_trans_keys[] = init__puma_parser_trans_keys_0(); + + +private static byte[] init__puma_parser_single_lengths_0() +{ + return new byte [] { + 0, 2, 3, 4, 2, 1, 1, 1, 1, 1, 0, 1, + 0, 1, 1, 4, 1, 4, 3, 2, 4, 4, 2, 6, + 7, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 0 + }; +} + +private static final byte _puma_parser_single_lengths[] = init__puma_parser_single_lengths_0(); + + +private static byte[] init__puma_parser_range_lengths_0() +{ + return new byte [] { + 0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 6, 0, 6, 2, 2, 2, 2, 4, 1, + 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0 + }; +} + +private static final byte _puma_parser_range_lengths[] = init__puma_parser_range_lengths_0(); + + +private static short[] init__puma_parser_index_offsets_0() +{ + return new short [] { + 0, 0, 6, 13, 21, 24, 26, 28, 30, 32, 34, 36, + 39, 41, 44, 46, 57, 59, 70, 76, 81, 88, 95, 102, + 110, 119, 127, 135, 142, 149, 156, 163, 170, 177, 184, 191, + 198, 205, 212, 219, 226, 233, 240, 247, 254, 261, 263 + }; +} + +private static final short _puma_parser_index_offsets[] = init__puma_parser_index_offsets_0(); + + +private static byte[] init__puma_parser_indicies_0() +{ + return new byte [] { + 0, 0, 0, 0, 0, 1, 2, 3, 3, 3, 3, 3, + 1, 4, 5, 6, 7, 5, 5, 5, 1, 8, 9, 1, + 10, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, + 16, 15, 1, 17, 1, 18, 17, 1, 19, 1, 20, 21, + 21, 21, 21, 21, 21, 21, 21, 21, 1, 22, 1, 23, + 24, 23, 23, 23, 23, 23, 23, 23, 23, 1, 26, 27, + 1, 1, 1, 25, 29, 1, 1, 1, 28, 30, 1, 1, + 1, 1, 1, 31, 32, 1, 1, 1, 1, 1, 33, 34, + 35, 34, 34, 34, 34, 1, 8, 1, 9, 1, 1, 1, + 1, 35, 36, 1, 38, 1, 1, 39, 1, 1, 37, 40, + 1, 42, 1, 1, 1, 1, 41, 43, 1, 45, 1, 1, + 1, 1, 44, 2, 46, 46, 46, 46, 46, 1, 2, 47, + 47, 47, 47, 47, 1, 2, 48, 48, 48, 48, 48, 1, + 2, 49, 49, 49, 49, 49, 1, 2, 50, 50, 50, 50, + 50, 1, 2, 51, 51, 51, 51, 51, 1, 2, 52, 52, + 52, 52, 52, 1, 2, 53, 53, 53, 53, 53, 1, 2, + 54, 54, 54, 54, 54, 1, 2, 55, 55, 55, 55, 55, + 1, 2, 56, 56, 56, 56, 56, 1, 2, 57, 57, 57, + 57, 57, 1, 2, 58, 58, 58, 58, 58, 1, 2, 59, + 59, 59, 59, 59, 1, 2, 60, 60, 60, 60, 60, 1, + 2, 61, 61, 61, 61, 61, 1, 2, 62, 62, 62, 62, + 62, 1, 2, 63, 63, 63, 63, 63, 1, 2, 1, 1, + 0 + }; +} + +private static final byte _puma_parser_indicies[] = init__puma_parser_indicies_0(); + + +private static byte[] init__puma_parser_trans_targs_0() +{ + return new byte [] { + 2, 0, 3, 27, 4, 22, 24, 23, 5, 20, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 46, 17, + 18, 19, 14, 18, 19, 14, 5, 21, 5, 21, 22, 23, + 5, 24, 20, 25, 5, 26, 20, 5, 26, 20, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45 + }; +} + +private static final byte _puma_parser_trans_targs[] = init__puma_parser_trans_targs_0(); + + +private static byte[] init__puma_parser_trans_actions_0() +{ + return new byte [] { + 1, 0, 11, 0, 1, 1, 1, 1, 13, 13, 1, 0, + 0, 0, 0, 0, 0, 0, 19, 0, 0, 28, 23, 3, + 5, 7, 31, 7, 0, 9, 25, 1, 15, 0, 0, 0, + 37, 0, 37, 21, 40, 17, 40, 34, 0, 34, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0 + }; +} + +private static final byte _puma_parser_trans_actions[] = init__puma_parser_trans_actions_0(); + + +static final int puma_parser_start = 1; +static final int puma_parser_first_final = 46; +static final int puma_parser_error = 0; + + +// line 62 "ext/puma_http11/http11_parser.java.rl" + + public static interface ElementCB { + public void call(Ruby runtime, RubyHash data, ByteList buffer, int at, int length); + } + + public static interface FieldCB { + public void call(Ruby runtime, RubyHash data, ByteList buffer, int field, int flen, int value, int vlen); + } + + public static class HttpParser { + int cs; + int body_start; + int content_len; + int nread; + int mark; + int field_start; + int field_len; + int query_start; + + RubyHash data; + ByteList buffer; + + public void init() { + cs = 0; + + +// line 216 "ext/puma_http11/org/jruby/puma/Http11Parser.java" + { + cs = puma_parser_start; + } + +// line 88 "ext/puma_http11/http11_parser.java.rl" + + body_start = 0; + content_len = 0; + mark = 0; + nread = 0; + field_len = 0; + field_start = 0; + } + } + + public final HttpParser parser = new HttpParser(); + + public int execute(Ruby runtime, Http11 http, ByteList buffer, int off) { + int p, pe; + int cs = parser.cs; + int len = buffer.length(); + assert off<=len : "offset past end of buffer"; + + p = off; + pe = len; + // get a copy of the bytes, since it may not start at 0 + // FIXME: figure out how to just use the bytes in-place + byte[] data = buffer.bytes(); + parser.buffer = buffer; + + +// line 248 "ext/puma_http11/org/jruby/puma/Http11Parser.java" + { + int _klen; + int _trans = 0; + int _acts; + int _nacts; + int _keys; + int _goto_targ = 0; + + _goto: while (true) { + switch ( _goto_targ ) { + case 0: + if ( p == pe ) { + _goto_targ = 4; + continue _goto; + } + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } +case 1: + _match: do { + _keys = _puma_parser_key_offsets[cs]; + _trans = _puma_parser_index_offsets[cs]; + _klen = _puma_parser_single_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + _klen - 1; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + ((_upper-_lower) >> 1); + if ( data[p] < _puma_parser_trans_keys[_mid] ) + _upper = _mid - 1; + else if ( data[p] > _puma_parser_trans_keys[_mid] ) + _lower = _mid + 1; + else { + _trans += (_mid - _keys); + break _match; + } + } + _keys += _klen; + _trans += _klen; + } + + _klen = _puma_parser_range_lengths[cs]; + if ( _klen > 0 ) { + int _lower = _keys; + int _mid; + int _upper = _keys + (_klen<<1) - 2; + while (true) { + if ( _upper < _lower ) + break; + + _mid = _lower + (((_upper-_lower) >> 1) & ~1); + if ( data[p] < _puma_parser_trans_keys[_mid] ) + _upper = _mid - 2; + else if ( data[p] > _puma_parser_trans_keys[_mid+1] ) + _lower = _mid + 2; + else { + _trans += ((_mid - _keys)>>1); + break _match; + } + } + _trans += _klen; + } + } while (false); + + _trans = _puma_parser_indicies[_trans]; + cs = _puma_parser_trans_targs[_trans]; + + if ( _puma_parser_trans_actions[_trans] != 0 ) { + _acts = _puma_parser_trans_actions[_trans]; + _nacts = (int) _puma_parser_actions[_acts++]; + while ( _nacts-- > 0 ) + { + switch ( _puma_parser_actions[_acts++] ) + { + case 0: +// line 15 "ext/puma_http11/http11_parser.java.rl" + {parser.mark = p; } + break; + case 1: +// line 17 "ext/puma_http11/http11_parser.java.rl" + { parser.field_start = p; } + break; + case 2: +// line 18 "ext/puma_http11/http11_parser.java.rl" + { /* FIXME stub */ } + break; + case 3: +// line 19 "ext/puma_http11/http11_parser.java.rl" + { + parser.field_len = p-parser.field_start; + } + break; + case 4: +// line 23 "ext/puma_http11/http11_parser.java.rl" + { parser.mark = p; } + break; + case 5: +// line 24 "ext/puma_http11/http11_parser.java.rl" + { + Http11.http_field(runtime, parser.data, parser.buffer, parser.field_start, parser.field_len, parser.mark, p-parser.mark); + } + break; + case 6: +// line 27 "ext/puma_http11/http11_parser.java.rl" + { + Http11.request_method(runtime, parser.data, parser.buffer, parser.mark, p-parser.mark); + } + break; + case 7: +// line 30 "ext/puma_http11/http11_parser.java.rl" + { + Http11.request_uri(runtime, parser.data, parser.buffer, parser.mark, p-parser.mark); + } + break; + case 8: +// line 33 "ext/puma_http11/http11_parser.java.rl" + { + Http11.fragment(runtime, parser.data, parser.buffer, parser.mark, p-parser.mark); + } + break; + case 9: +// line 37 "ext/puma_http11/http11_parser.java.rl" + {parser.query_start = p; } + break; + case 10: +// line 38 "ext/puma_http11/http11_parser.java.rl" + { + Http11.query_string(runtime, parser.data, parser.buffer, parser.query_start, p-parser.query_start); + } + break; + case 11: +// line 42 "ext/puma_http11/http11_parser.java.rl" + { + Http11.server_protocol(runtime, parser.data, parser.buffer, parser.mark, p-parser.mark); + } + break; + case 12: +// line 46 "ext/puma_http11/http11_parser.java.rl" + { + Http11.request_path(runtime, parser.data, parser.buffer, parser.mark, p-parser.mark); + } + break; + case 13: +// line 50 "ext/puma_http11/http11_parser.java.rl" + { + parser.body_start = p + 1; + http.header_done(runtime, parser.data, parser.buffer, p + 1, pe - p - 1); + { p += 1; _goto_targ = 5; if (true) continue _goto;} + } + break; +// line 404 "ext/puma_http11/org/jruby/puma/Http11Parser.java" + } + } + } + +case 2: + if ( cs == 0 ) { + _goto_targ = 5; + continue _goto; + } + if ( ++p != pe ) { + _goto_targ = 1; + continue _goto; + } +case 4: +case 5: + } + break; } + } + +// line 114 "ext/puma_http11/http11_parser.java.rl" + + parser.cs = cs; + parser.nread += (p - off); + + assert p <= pe : "buffer overflow after parsing execute"; + assert parser.nread <= len : "nread longer than length"; + assert parser.body_start <= len : "body starts after buffer end"; + assert parser.mark < len : "mark is after buffer end"; + assert parser.field_len <= len : "field has length longer than whole buffer"; + assert parser.field_start < len : "field starts after buffer end"; + + return parser.nread; + } + + public int finish() { + if(has_error()) { + return -1; + } else if(is_finished()) { + return 1; + } else { + return 0; + } + } + + public boolean has_error() { + return parser.cs == puma_parser_error; + } + + public boolean is_finished() { + return parser.cs == puma_parser_first_final; + } +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/MiniSSL.java b/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/MiniSSL.java new file mode 100644 index 000000000..36ad801ec --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/org/jruby/puma/MiniSSL.java @@ -0,0 +1,509 @@ +package org.jruby.puma; + +import org.jruby.Ruby; +import org.jruby.RubyArray; +import org.jruby.RubyClass; +import org.jruby.RubyModule; +import org.jruby.RubyObject; +import org.jruby.RubyString; +import org.jruby.anno.JRubyMethod; +import org.jruby.exceptions.RaiseException; +import org.jruby.javasupport.JavaEmbedUtils; +import org.jruby.runtime.Block; +import org.jruby.runtime.ObjectAllocator; +import org.jruby.runtime.ThreadContext; +import org.jruby.runtime.builtin.IRubyObject; +import org.jruby.util.ByteList; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.X509TrustManager; +import java.io.FileInputStream; +import java.io.InputStream; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.Certificate; +import java.security.cert.CertificateEncodingException; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.concurrent.ConcurrentHashMap; +import java.util.Map; +import java.util.function.Supplier; + +import static javax.net.ssl.SSLEngineResult.Status; +import static javax.net.ssl.SSLEngineResult.HandshakeStatus; + +public class MiniSSL extends RubyObject { // MiniSSL::Engine + private static final long serialVersionUID = -6903439483039141234L; + private static ObjectAllocator ALLOCATOR = new ObjectAllocator() { + public IRubyObject allocate(Ruby runtime, RubyClass klass) { + return new MiniSSL(runtime, klass); + } + }; + + public static void createMiniSSL(Ruby runtime) { + RubyModule mPuma = runtime.defineModule("Puma"); + RubyModule ssl = mPuma.defineModuleUnder("MiniSSL"); + + // Puma::MiniSSL::SSLError + ssl.defineClassUnder("SSLError", runtime.getStandardError(), runtime.getStandardError().getAllocator()); + + RubyClass eng = ssl.defineClassUnder("Engine", runtime.getObject(), ALLOCATOR); + eng.defineAnnotatedMethods(MiniSSL.class); + } + + /** + * Fairly transparent wrapper around {@link java.nio.ByteBuffer} which adds the enhancements we need + */ + private static class MiniSSLBuffer { + ByteBuffer buffer; + + private MiniSSLBuffer(int capacity) { buffer = ByteBuffer.allocate(capacity); } + private MiniSSLBuffer(byte[] initialContents) { buffer = ByteBuffer.wrap(initialContents); } + + public void clear() { buffer.clear(); } + public void compact() { buffer.compact(); } + public void flip() { ((Buffer) buffer).flip(); } + public boolean hasRemaining() { return buffer.hasRemaining(); } + public int position() { return buffer.position(); } + + public ByteBuffer getRawBuffer() { + return buffer; + } + + /** + * Writes bytes to the buffer after ensuring there's room + */ + private void put(byte[] bytes, final int offset, final int length) { + if (buffer.remaining() < length) { + resize(buffer.limit() + length); + } + buffer.put(bytes, offset, length); + } + + /** + * Ensures that newCapacity bytes can be written to this buffer, only re-allocating if necessary + */ + public void resize(int newCapacity) { + if (newCapacity > buffer.capacity()) { + ByteBuffer dstTmp = ByteBuffer.allocate(newCapacity); + flip(); + dstTmp.put(buffer); + buffer = dstTmp; + } else { + buffer.limit(newCapacity); + } + } + + /** + * Drains the buffer to a ByteList, or returns null for an empty buffer + */ + public ByteList asByteList() { + flip(); + if (!buffer.hasRemaining()) { + buffer.clear(); + return null; + } + + byte[] bss = new byte[buffer.limit()]; + + buffer.get(bss); + buffer.clear(); + return new ByteList(bss, false); + } + + @Override + public String toString() { return buffer.toString(); } + } + + private SSLEngine engine; + private boolean closed; + private boolean handshake; + private MiniSSLBuffer inboundNetData; + private MiniSSLBuffer outboundAppData; + private MiniSSLBuffer outboundNetData; + + public MiniSSL(Ruby runtime, RubyClass klass) { + super(runtime, klass); + } + + private static Map keyManagerFactoryMap = new ConcurrentHashMap(); + private static Map trustManagerFactoryMap = new ConcurrentHashMap(); + + @JRubyMethod(meta = true) // Engine.server + public static synchronized IRubyObject server(ThreadContext context, IRubyObject recv, IRubyObject miniSSLContext) + throws KeyStoreException, IOException, CertificateException, NoSuchAlgorithmException, UnrecoverableKeyException { + // Create the KeyManagerFactory and TrustManagerFactory for this server + String keystoreFile = asStringValue(miniSSLContext.callMethod(context, "keystore"), null); + char[] keystorePass = asStringValue(miniSSLContext.callMethod(context, "keystore_pass"), null).toCharArray(); + String keystoreType = asStringValue(miniSSLContext.callMethod(context, "keystore_type"), KeyStore::getDefaultType); + + String truststoreFile; + char[] truststorePass; + String truststoreType; + IRubyObject truststore = miniSSLContext.callMethod(context, "truststore"); + if (truststore.isNil()) { + truststoreFile = keystoreFile; + truststorePass = keystorePass; + truststoreType = keystoreType; + } else if (!isDefaultSymbol(context, truststore)) { + truststoreFile = truststore.convertToString().asJavaString(); + IRubyObject pass = miniSSLContext.callMethod(context, "truststore_pass"); + if (pass.isNil()) { + truststorePass = null; + } else { + truststorePass = asStringValue(pass, null).toCharArray(); + } + truststoreType = asStringValue(miniSSLContext.callMethod(context, "truststore_type"), KeyStore::getDefaultType); + } else { // self.truststore = :default + truststoreFile = null; + truststorePass = null; + truststoreType = null; + } + + KeyStore ks = KeyStore.getInstance(keystoreType); + InputStream is = new FileInputStream(keystoreFile); + try { + ks.load(is, keystorePass); + } finally { + is.close(); + } + KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509"); + kmf.init(ks, keystorePass); + keyManagerFactoryMap.put(keystoreFile, kmf); + + if (truststoreFile != null) { + KeyStore ts = KeyStore.getInstance(truststoreType); + is = new FileInputStream(truststoreFile); + try { + ts.load(is, truststorePass); + } finally { + is.close(); + } + TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); + tmf.init(ts); + trustManagerFactoryMap.put(truststoreFile, tmf); + } + + RubyClass klass = (RubyClass) recv; + return klass.newInstance(context, miniSSLContext, Block.NULL_BLOCK); + } + + private static String asStringValue(IRubyObject value, Supplier defaultValue) { + if (defaultValue != null && value.isNil()) return defaultValue.get(); + return value.convertToString().asJavaString(); + } + + private static boolean isDefaultSymbol(ThreadContext context, IRubyObject truststore) { + return context.runtime.newSymbol("default").equals(truststore); + } + + @JRubyMethod + public IRubyObject initialize(ThreadContext context, IRubyObject miniSSLContext) + throws KeyStoreException, NoSuchAlgorithmException, KeyManagementException { + + String keystoreFile = miniSSLContext.callMethod(context, "keystore").convertToString().asJavaString(); + KeyManagerFactory kmf = keyManagerFactoryMap.get(keystoreFile); + IRubyObject truststore = miniSSLContext.callMethod(context, "truststore"); + String truststoreFile = isDefaultSymbol(context, truststore) ? "" : asStringValue(truststore, () -> keystoreFile); + TrustManagerFactory tmf = trustManagerFactoryMap.get(truststoreFile); // null if self.truststore = :default + if (kmf == null) { + throw new KeyStoreException("Could not find KeyManagerFactory for keystore: " + keystoreFile + " truststore: " + truststoreFile); + } + + SSLContext sslCtx = SSLContext.getInstance("TLS"); + + sslCtx.init(kmf.getKeyManagers(), getTrustManagers(tmf), null); + closed = false; + handshake = false; + engine = sslCtx.createSSLEngine(); + + String[] enabledProtocols; + IRubyObject protocols = miniSSLContext.callMethod(context, "protocols"); + if (protocols.isNil()) { + if (miniSSLContext.callMethod(context, "no_tlsv1").isTrue()) { + enabledProtocols = new String[] { "TLSv1.1", "TLSv1.2", "TLSv1.3" }; + } else { + enabledProtocols = new String[] { "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" }; + } + + if (miniSSLContext.callMethod(context, "no_tlsv1_1").isTrue()) { + enabledProtocols = new String[] { "TLSv1.2", "TLSv1.3" }; + } + } else if (protocols instanceof RubyArray) { + enabledProtocols = (String[]) ((RubyArray) protocols).toArray(new String[0]); + } else { + throw context.runtime.newTypeError(protocols, context.runtime.getArray()); + } + engine.setEnabledProtocols(enabledProtocols); + + engine.setUseClientMode(false); + + long verify_mode = miniSSLContext.callMethod(context, "verify_mode").convertToInteger("to_i").getLongValue(); + if ((verify_mode & 0x1) != 0) { // 'peer' + engine.setWantClientAuth(true); + } + if ((verify_mode & 0x2) != 0) { // 'force_peer' + engine.setNeedClientAuth(true); + } + + IRubyObject cipher_suites = miniSSLContext.callMethod(context, "cipher_suites"); + if (cipher_suites instanceof RubyArray) { + engine.setEnabledCipherSuites((String[]) ((RubyArray) cipher_suites).toArray(new String[0])); + } else if (!cipher_suites.isNil()) { + throw context.runtime.newTypeError(cipher_suites, context.runtime.getArray()); + } + + SSLSession session = engine.getSession(); + inboundNetData = new MiniSSLBuffer(session.getPacketBufferSize()); + outboundAppData = new MiniSSLBuffer(session.getApplicationBufferSize()); + outboundAppData.flip(); + outboundNetData = new MiniSSLBuffer(session.getPacketBufferSize()); + + return this; + } + + private TrustManager[] getTrustManagers(TrustManagerFactory factory) { + if (factory == null) return null; // use JDK trust defaults + final TrustManager[] tms = factory.getTrustManagers(); + if (tms != null) { + for (int i=0; i 0 ? chain[0] : null; + delegate.checkClientTrusted(chain, authType); + } + + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { + delegate.checkServerTrusted(chain, authType); + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + return delegate.getAcceptedIssuers(); + } + + } + + @JRubyMethod + public IRubyObject inject(IRubyObject arg) { + ByteList bytes = arg.convertToString().getByteList(); + inboundNetData.put(bytes.unsafeBytes(), bytes.getBegin(), bytes.getRealSize()); + return this; + } + + private enum SSLOperation { + WRAP, + UNWRAP + } + + private SSLEngineResult doOp(SSLOperation sslOp, MiniSSLBuffer src, MiniSSLBuffer dst) throws SSLException { + SSLEngineResult res = null; + boolean retryOp = true; + while (retryOp) { + switch (sslOp) { + case WRAP: + res = engine.wrap(src.getRawBuffer(), dst.getRawBuffer()); + break; + case UNWRAP: + res = engine.unwrap(src.getRawBuffer(), dst.getRawBuffer()); + break; + default: + throw new AssertionError("Unknown SSLOperation: " + sslOp); + } + + switch (res.getStatus()) { + case BUFFER_OVERFLOW: + // increase the buffer size to accommodate the overflowing data + int newSize = Math.max(engine.getSession().getPacketBufferSize(), engine.getSession().getApplicationBufferSize()); + dst.resize(newSize + dst.position()); + // retry the operation + retryOp = true; + break; + case BUFFER_UNDERFLOW: + // need to wait for more data to come in before we retry + retryOp = false; + break; + case CLOSED: + closed = true; + retryOp = false; + break; + default: + // other case is OK. We're done here. + retryOp = false; + } + if (res.getHandshakeStatus() == HandshakeStatus.FINISHED) { + handshake = true; + } + } + + return res; + } + + @JRubyMethod + public IRubyObject read() { + try { + inboundNetData.flip(); + + if(!inboundNetData.hasRemaining()) { + return getRuntime().getNil(); + } + + MiniSSLBuffer inboundAppData = new MiniSSLBuffer(engine.getSession().getApplicationBufferSize()); + doOp(SSLOperation.UNWRAP, inboundNetData, inboundAppData); + + HandshakeStatus handshakeStatus = engine.getHandshakeStatus(); + boolean done = false; + while (!done) { + SSLEngineResult res; + switch (handshakeStatus) { + case NEED_WRAP: + res = doOp(SSLOperation.WRAP, inboundAppData, outboundNetData); + handshakeStatus = res.getHandshakeStatus(); + break; + case NEED_UNWRAP: + res = doOp(SSLOperation.UNWRAP, inboundNetData, inboundAppData); + if (res.getStatus() == Status.BUFFER_UNDERFLOW) { + // need more data before we can shake more hands + done = true; + } + handshakeStatus = res.getHandshakeStatus(); + break; + case NEED_TASK: + Runnable runnable; + while ((runnable = engine.getDelegatedTask()) != null) { + runnable.run(); + } + handshakeStatus = engine.getHandshakeStatus(); + break; + default: + done = true; + } + } + + if (inboundNetData.hasRemaining()) { + inboundNetData.compact(); + } else { + inboundNetData.clear(); + } + + ByteList appDataByteList = inboundAppData.asByteList(); + if (appDataByteList == null) { + return getRuntime().getNil(); + } + + return RubyString.newString(getRuntime(), appDataByteList); + } catch (SSLException e) { + throw newSSLError(getRuntime(), e); + } + } + + @JRubyMethod + public IRubyObject write(IRubyObject arg) { + byte[] bls = arg.convertToString().getBytes(); + outboundAppData = new MiniSSLBuffer(bls); + + return getRuntime().newFixnum(bls.length); + } + + @JRubyMethod + public IRubyObject extract(ThreadContext context) { + try { + ByteList dataByteList = outboundNetData.asByteList(); + if (dataByteList != null) { + return RubyString.newString(context.runtime, dataByteList); + } + + if (!outboundAppData.hasRemaining()) { + return context.nil; + } + + outboundNetData.clear(); + doOp(SSLOperation.WRAP, outboundAppData, outboundNetData); + dataByteList = outboundNetData.asByteList(); + if (dataByteList == null) { + return context.nil; + } + + return RubyString.newString(context.runtime, dataByteList); + } catch (SSLException e) { + throw newSSLError(getRuntime(), e); + } + } + + @JRubyMethod + public IRubyObject peercert(ThreadContext context) throws CertificateEncodingException { + Certificate peerCert; + try { + peerCert = engine.getSession().getPeerCertificates()[0]; + } catch (SSLPeerUnverifiedException e) { + peerCert = lastCheckedCert0; // null if trust check did not happen + } + return peerCert == null ? context.nil : JavaEmbedUtils.javaToRuby(context.runtime, peerCert.getEncoded()); + } + + @JRubyMethod(name = "init?") + public IRubyObject isInit(ThreadContext context) { + return handshake ? getRuntime().getFalse() : getRuntime().getTrue(); + } + + @JRubyMethod + public IRubyObject shutdown() { + if (closed || engine.isInboundDone() && engine.isOutboundDone()) { + if (engine.isOutboundDone()) { + engine.closeOutbound(); + } + return getRuntime().getTrue(); + } else { + return getRuntime().getFalse(); + } + } + + private static RubyClass getSSLError(Ruby runtime) { + return (RubyClass) ((RubyModule) runtime.getModule("Puma").getConstantAt("MiniSSL")).getConstantAt("SSLError"); + } + + private static RaiseException newSSLError(Ruby runtime, SSLException cause) { + return newError(runtime, getSSLError(runtime), cause.toString(), cause); + } + + private static RaiseException newError(Ruby runtime, RubyClass errorClass, String message, Throwable cause) { + RaiseException ex = RaiseException.from(runtime, errorClass, message); + ex.initCause(cause); + return ex; + } + +} diff --git a/vendor/cache/puma-fba741b91780/ext/puma_http11/puma_http11.c b/vendor/cache/puma-fba741b91780/ext/puma_http11/puma_http11.c new file mode 100644 index 000000000..4b8501f10 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/ext/puma_http11/puma_http11.c @@ -0,0 +1,495 @@ +/** + * Copyright (c) 2005 Zed A. Shaw + * You can redistribute it and/or modify it under the same terms as Ruby. + * License 3-clause BSD + */ + +#define RSTRING_NOT_MODIFIED 1 + +#include "ruby.h" +#include "ext_help.h" +#include +#include +#include +#include "http11_parser.h" + +#ifndef MANAGED_STRINGS + +#ifndef RSTRING_PTR +#define RSTRING_PTR(s) (RSTRING(s)->ptr) +#endif +#ifndef RSTRING_LEN +#define RSTRING_LEN(s) (RSTRING(s)->len) +#endif + +#define rb_extract_chars(e, sz) (*sz = RSTRING_LEN(e), RSTRING_PTR(e)) +#define rb_free_chars(e) /* nothing */ + +#endif + +static VALUE eHttpParserError; + +#define HTTP_PREFIX "HTTP_" +#define HTTP_PREFIX_LEN (sizeof(HTTP_PREFIX) - 1) + +static VALUE global_request_method; +static VALUE global_request_uri; +static VALUE global_fragment; +static VALUE global_query_string; +static VALUE global_server_protocol; +static VALUE global_request_path; + +/** Defines common length and error messages for input length validation. */ +#define QUOTE(s) #s +#define EXPAND_MAX_LENGTH_VALUE(s) QUOTE(s) +#define DEF_MAX_LENGTH(N,length) const size_t MAX_##N##_LENGTH = length; const char *MAX_##N##_LENGTH_ERR = "HTTP element " # N " is longer than the " EXPAND_MAX_LENGTH_VALUE(length) " allowed length (was %d)" + +/** Validates the max length of given input and throws an HttpParserError exception if over. */ +#define VALIDATE_MAX_LENGTH(len, N) if(len > MAX_##N##_LENGTH) { rb_raise(eHttpParserError, MAX_##N##_LENGTH_ERR, len); } + +/** Defines global strings in the init method. */ +#define DEF_GLOBAL(N, val) global_##N = rb_str_new2(val); rb_global_variable(&global_##N) + + +/* Defines the maximum allowed lengths for various input elements.*/ +#ifndef PUMA_REQUEST_URI_MAX_LENGTH +#define PUMA_REQUEST_URI_MAX_LENGTH (1024 * 12) +#endif + +#ifndef PUMA_REQUEST_PATH_MAX_LENGTH +#define PUMA_REQUEST_PATH_MAX_LENGTH (8192) +#endif + +#ifndef PUMA_QUERY_STRING_MAX_LENGTH +#define PUMA_QUERY_STRING_MAX_LENGTH (1024 * 10) +#endif + +DEF_MAX_LENGTH(FIELD_NAME, 256); +DEF_MAX_LENGTH(FIELD_VALUE, 80 * 1024); +DEF_MAX_LENGTH(REQUEST_URI, PUMA_REQUEST_URI_MAX_LENGTH); +DEF_MAX_LENGTH(FRAGMENT, 1024); /* Don't know if this length is specified somewhere or not */ +DEF_MAX_LENGTH(REQUEST_PATH, PUMA_REQUEST_PATH_MAX_LENGTH); +DEF_MAX_LENGTH(QUERY_STRING, PUMA_QUERY_STRING_MAX_LENGTH); +DEF_MAX_LENGTH(HEADER, (1024 * (80 + 32))); + +struct common_field { + const size_t len; + const char *name; + int raw; + VALUE value; +}; + +/* + * A list of common HTTP headers we expect to receive. + * This allows us to avoid repeatedly creating identical string + * objects to be used with rb_hash_aset(). + */ +static struct common_field common_http_fields[] = { +# define f(N) { (sizeof(N) - 1), N, 0, Qnil } +# define fr(N) { (sizeof(N) - 1), N, 1, Qnil } + f("ACCEPT"), + f("ACCEPT_CHARSET"), + f("ACCEPT_ENCODING"), + f("ACCEPT_LANGUAGE"), + f("ALLOW"), + f("AUTHORIZATION"), + f("CACHE_CONTROL"), + f("CONNECTION"), + f("CONTENT_ENCODING"), + fr("CONTENT_LENGTH"), + fr("CONTENT_TYPE"), + f("COOKIE"), + f("DATE"), + f("EXPECT"), + f("FROM"), + f("HOST"), + f("IF_MATCH"), + f("IF_MODIFIED_SINCE"), + f("IF_NONE_MATCH"), + f("IF_RANGE"), + f("IF_UNMODIFIED_SINCE"), + f("KEEP_ALIVE"), /* Firefox sends this */ + f("MAX_FORWARDS"), + f("PRAGMA"), + f("PROXY_AUTHORIZATION"), + f("RANGE"), + f("REFERER"), + f("TE"), + f("TRAILER"), + f("TRANSFER_ENCODING"), + f("UPGRADE"), + f("USER_AGENT"), + f("VIA"), + f("X_FORWARDED_FOR"), /* common for proxies */ + f("X_REAL_IP"), /* common for proxies */ + f("WARNING") +# undef f +}; + +static void init_common_fields(void) +{ + unsigned i; + struct common_field *cf = common_http_fields; + char tmp[256]; /* MAX_FIELD_NAME_LENGTH */ + memcpy(tmp, HTTP_PREFIX, HTTP_PREFIX_LEN); + + for(i = 0; i < ARRAY_SIZE(common_http_fields); cf++, i++) { + if(cf->raw) { + cf->value = rb_str_new(cf->name, cf->len); + } else { + memcpy(tmp + HTTP_PREFIX_LEN, cf->name, cf->len + 1); + cf->value = rb_str_new(tmp, HTTP_PREFIX_LEN + cf->len); + } + rb_global_variable(&cf->value); + } +} + +static VALUE find_common_field_value(const char *field, size_t flen) +{ + unsigned i; + struct common_field *cf = common_http_fields; + for(i = 0; i < ARRAY_SIZE(common_http_fields); i++, cf++) { + if (cf->len == flen && !memcmp(cf->name, field, flen)) + return cf->value; + } + return Qnil; +} + +void http_field(puma_parser* hp, const char *field, size_t flen, + const char *value, size_t vlen) +{ + VALUE f = Qnil; + VALUE v; + + VALIDATE_MAX_LENGTH(flen, FIELD_NAME); + VALIDATE_MAX_LENGTH(vlen, FIELD_VALUE); + + f = find_common_field_value(field, flen); + + if (f == Qnil) { + /* + * We got a strange header that we don't have a memoized value for. + * Fallback to creating a new string to use as a hash key. + */ + + size_t new_size = HTTP_PREFIX_LEN + flen; + assert(new_size < BUFFER_LEN); + + memcpy(hp->buf, HTTP_PREFIX, HTTP_PREFIX_LEN); + memcpy(hp->buf + HTTP_PREFIX_LEN, field, flen); + + f = rb_str_new(hp->buf, new_size); + } + + while (vlen > 0 && isspace(value[vlen - 1])) vlen--; + + /* check for duplicate header */ + v = rb_hash_aref(hp->request, f); + + if (v == Qnil) { + v = rb_str_new(value, vlen); + rb_hash_aset(hp->request, f, v); + } else { + /* if duplicate header, normalize to comma-separated values */ + rb_str_cat2(v, ", "); + rb_str_cat(v, value, vlen); + } +} + +void request_method(puma_parser* hp, const char *at, size_t length) +{ + VALUE val = Qnil; + + val = rb_str_new(at, length); + rb_hash_aset(hp->request, global_request_method, val); +} + +void request_uri(puma_parser* hp, const char *at, size_t length) +{ + VALUE val = Qnil; + + VALIDATE_MAX_LENGTH(length, REQUEST_URI); + + val = rb_str_new(at, length); + rb_hash_aset(hp->request, global_request_uri, val); +} + +void fragment(puma_parser* hp, const char *at, size_t length) +{ + VALUE val = Qnil; + + VALIDATE_MAX_LENGTH(length, FRAGMENT); + + val = rb_str_new(at, length); + rb_hash_aset(hp->request, global_fragment, val); +} + +void request_path(puma_parser* hp, const char *at, size_t length) +{ + VALUE val = Qnil; + + VALIDATE_MAX_LENGTH(length, REQUEST_PATH); + + val = rb_str_new(at, length); + rb_hash_aset(hp->request, global_request_path, val); +} + +void query_string(puma_parser* hp, const char *at, size_t length) +{ + VALUE val = Qnil; + + VALIDATE_MAX_LENGTH(length, QUERY_STRING); + + val = rb_str_new(at, length); + rb_hash_aset(hp->request, global_query_string, val); +} + +void server_protocol(puma_parser* hp, const char *at, size_t length) +{ + VALUE val = rb_str_new(at, length); + rb_hash_aset(hp->request, global_server_protocol, val); +} + +/** Finalizes the request header to have a bunch of stuff that's + needed. */ + +void header_done(puma_parser* hp, const char *at, size_t length) +{ + hp->body = rb_str_new(at, length); +} + + +void HttpParser_free(void *data) { + TRACE(); + + if(data) { + xfree(data); + } +} + +void HttpParser_mark(void *ptr) { + puma_parser *hp = ptr; + if(hp->request) rb_gc_mark(hp->request); + if(hp->body) rb_gc_mark(hp->body); +} + +const rb_data_type_t HttpParser_data_type = { + "HttpParser", + { HttpParser_mark, HttpParser_free, 0 }, + 0, 0, RUBY_TYPED_FREE_IMMEDIATELY, +}; + +VALUE HttpParser_alloc(VALUE klass) +{ + puma_parser *hp = ALLOC_N(puma_parser, 1); + TRACE(); + hp->http_field = http_field; + hp->request_method = request_method; + hp->request_uri = request_uri; + hp->fragment = fragment; + hp->request_path = request_path; + hp->query_string = query_string; + hp->server_protocol = server_protocol; + hp->header_done = header_done; + hp->request = Qnil; + + puma_parser_init(hp); + + return TypedData_Wrap_Struct(klass, &HttpParser_data_type, hp); +} + +/** + * call-seq: + * parser.new -> parser + * + * Creates a new parser. + */ +VALUE HttpParser_init(VALUE self) +{ + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + puma_parser_init(http); + + return self; +} + + +/** + * call-seq: + * parser.reset -> nil + * + * Resets the parser to it's initial state so that you can reuse it + * rather than making new ones. + */ +VALUE HttpParser_reset(VALUE self) +{ + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + puma_parser_init(http); + + return Qnil; +} + + +/** + * call-seq: + * parser.finish -> true/false + * + * Finishes a parser early which could put in a "good" or bad state. + * You should call reset after finish it or bad things will happen. + */ +VALUE HttpParser_finish(VALUE self) +{ + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + puma_parser_finish(http); + + return puma_parser_is_finished(http) ? Qtrue : Qfalse; +} + + +/** + * call-seq: + * parser.execute(req_hash, data, start) -> Integer + * + * Takes a Hash and a String of data, parses the String of data filling in the Hash + * returning an Integer to indicate how much of the data has been read. No matter + * what the return value, you should call HttpParser#finished? and HttpParser#error? + * to figure out if it's done parsing or there was an error. + * + * This function now throws an exception when there is a parsing error. This makes + * the logic for working with the parser much easier. You can still test for an + * error, but now you need to wrap the parser with an exception handling block. + * + * The third argument allows for parsing a partial request and then continuing + * the parsing from that position. It needs all of the original data as well + * so you have to append to the data buffer as you read. + */ +VALUE HttpParser_execute(VALUE self, VALUE req_hash, VALUE data, VALUE start) +{ + puma_parser *http = NULL; + int from = 0; + char *dptr = NULL; + long dlen = 0; + + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + + from = FIX2INT(start); + dptr = rb_extract_chars(data, &dlen); + + if(from >= dlen) { + rb_free_chars(dptr); + rb_raise(eHttpParserError, "%s", "Requested start is after data buffer end."); + } else { + http->request = req_hash; + puma_parser_execute(http, dptr, dlen, from); + + rb_free_chars(dptr); + VALIDATE_MAX_LENGTH(puma_parser_nread(http), HEADER); + + if(puma_parser_has_error(http)) { + rb_raise(eHttpParserError, "%s", "Invalid HTTP format, parsing fails. Are you trying to open an SSL connection to a non-SSL Puma?"); + } else { + return INT2FIX(puma_parser_nread(http)); + } + } +} + + + +/** + * call-seq: + * parser.error? -> true/false + * + * Tells you whether the parser is in an error state. + */ +VALUE HttpParser_has_error(VALUE self) +{ + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + + return puma_parser_has_error(http) ? Qtrue : Qfalse; +} + + +/** + * call-seq: + * parser.finished? -> true/false + * + * Tells you whether the parser is finished or not and in a good state. + */ +VALUE HttpParser_is_finished(VALUE self) +{ + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + + return puma_parser_is_finished(http) ? Qtrue : Qfalse; +} + + +/** + * call-seq: + * parser.nread -> Integer + * + * Returns the amount of data processed so far during this processing cycle. It is + * set to 0 on initialize or reset calls and is incremented each time execute is called. + */ +VALUE HttpParser_nread(VALUE self) +{ + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + + return INT2FIX(http->nread); +} + +/** + * call-seq: + * parser.body -> nil or String + * + * If the request included a body, returns it. + */ +VALUE HttpParser_body(VALUE self) { + puma_parser *http = NULL; + DATA_GET(self, puma_parser, &HttpParser_data_type, http); + + return http->body; +} + +#ifdef HAVE_OPENSSL_BIO_H +void Init_mini_ssl(VALUE mod); +#endif + +void Init_puma_http11(void) +{ +#ifdef HAVE_RB_EXT_RACTOR_SAFE + rb_ext_ractor_safe(true); +#endif + + VALUE mPuma = rb_define_module("Puma"); + VALUE cHttpParser = rb_define_class_under(mPuma, "HttpParser", rb_cObject); + + DEF_GLOBAL(request_method, "REQUEST_METHOD"); + DEF_GLOBAL(request_uri, "REQUEST_URI"); + DEF_GLOBAL(fragment, "FRAGMENT"); + DEF_GLOBAL(query_string, "QUERY_STRING"); + DEF_GLOBAL(server_protocol, "SERVER_PROTOCOL"); + DEF_GLOBAL(request_path, "REQUEST_PATH"); + + eHttpParserError = rb_define_class_under(mPuma, "HttpParserError", rb_eIOError); + rb_global_variable(&eHttpParserError); + + rb_define_alloc_func(cHttpParser, HttpParser_alloc); + rb_define_method(cHttpParser, "initialize", HttpParser_init, 0); + rb_define_method(cHttpParser, "reset", HttpParser_reset, 0); + rb_define_method(cHttpParser, "finish", HttpParser_finish, 0); + rb_define_method(cHttpParser, "execute", HttpParser_execute, 3); + rb_define_method(cHttpParser, "error?", HttpParser_has_error, 0); + rb_define_method(cHttpParser, "finished?", HttpParser_is_finished, 0); + rb_define_method(cHttpParser, "nread", HttpParser_nread, 0); + rb_define_method(cHttpParser, "body", HttpParser_body, 0); + init_common_fields(); + +#ifdef HAVE_OPENSSL_BIO_H + Init_mini_ssl(mPuma); +#endif +} diff --git a/vendor/cache/puma-fba741b91780/lib/puma.rb b/vendor/cache/puma-fba741b91780/lib/puma.rb new file mode 100644 index 000000000..ab2b22053 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma.rb @@ -0,0 +1,78 @@ +# frozen_string_literal: true + +# Standard libraries +require 'socket' +require 'tempfile' +require 'uri' +require 'stringio' + +require 'thread' + +# use require, see https://github.com/puma/puma/pull/2381 +require 'puma/puma_http11' + +require_relative 'puma/detect' +require_relative 'puma/json_serialization' + +module Puma + # when Puma is loaded via `Puma::CLI`, all files are loaded via + # `require_relative`. The below are for non-standard loading + autoload :Const, "#{__dir__}/puma/const" + autoload :Server, "#{__dir__}/puma/server" + autoload :Launcher, "#{__dir__}/puma/launcher" + autoload :LogWriter, "#{__dir__}/puma/log_writer" + + # at present, MiniSSL::Engine is only defined in extension code (puma_http11), + # not in minissl.rb + HAS_SSL = const_defined?(:MiniSSL, false) && MiniSSL.const_defined?(:Engine, false) + + HAS_UNIX_SOCKET = Object.const_defined?(:UNIXSocket) && !IS_WINDOWS + + if HAS_SSL + require_relative 'puma/minissl' + else + module MiniSSL + # this class is defined so that it exists when Puma is compiled + # without ssl support, as Server and Reactor use it in rescue statements. + class SSLError < StandardError ; end + end + end + + def self.ssl? + HAS_SSL + end + + def self.abstract_unix_socket? + @abstract_unix ||= + if HAS_UNIX_SOCKET + begin + ::UNIXServer.new("\0puma.temp.unix").close + true + rescue ArgumentError # darwin + false + end + else + false + end + end + + # @!attribute [rw] stats_object= + def self.stats_object=(val) + @get_stats = val + end + + # @!attribute [rw] stats_object + def self.stats + Puma::JSONSerialization.generate @get_stats.stats + end + + # @!attribute [r] stats_hash + # @version 5.0.0 + def self.stats_hash + @get_stats.stats + end + + def self.set_thread_name(name) + Thread.current.name = "puma #{name}" + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/app/status.rb b/vendor/cache/puma-fba741b91780/lib/puma/app/status.rb new file mode 100644 index 000000000..f8604874c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/app/status.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true +require_relative '../json_serialization' + +module Puma + module App + # Check out {#call}'s source code to see what actions this web application + # can respond to. + class Status + OK_STATUS = '{ "status": "ok" }'.freeze + + # @param launcher [::Puma::Launcher] + # @param token [String, nil] the token used for authentication + # + def initialize(launcher, token = nil) + @launcher = launcher + @auth_token = token + end + + # most commands call methods in `::Puma::Launcher` based on command in + # `env['PATH_INFO']` + def call(env) + unless authenticate(env) + return rack_response(403, 'Invalid auth token', 'text/plain') + end + + # resp_type is processed by following case statement, return + # is a number (status) or a string used as the body of a 200 response + resp_type = + case env['PATH_INFO'][/\/([^\/]+)$/, 1] + when 'stop' + @launcher.stop ; 200 + + when 'halt' + @launcher.halt ; 200 + + when 'restart' + @launcher.restart ; 200 + + when 'phased-restart' + @launcher.phased_restart ? 200 : 404 + + when 'refork' + @launcher.refork ? 200 : 404 + + when 'reload-worker-directory' + @launcher.send(:reload_worker_directory) ? 200 : 404 + + when 'gc' + GC.start ; 200 + + when 'gc-stats' + Puma::JSONSerialization.generate GC.stat + + when 'stats' + Puma::JSONSerialization.generate @launcher.stats + + when 'thread-backtraces' + backtraces = [] + @launcher.thread_status do |name, backtrace| + backtraces << { name: name, backtrace: backtrace } + end + Puma::JSONSerialization.generate backtraces + + else + return rack_response(404, "Unsupported action", 'text/plain') + end + + case resp_type + when String + rack_response 200, resp_type + when 200 + rack_response 200, OK_STATUS + when 404 + str = env['PATH_INFO'][/\/(\S+)/, 1].tr '-', '_' + rack_response 404, "{ \"error\": \"#{str} not available\" }" + end + end + + private + + def authenticate(env) + return true unless @auth_token + env['QUERY_STRING'].to_s.split('&;').include? "token=#{@auth_token}" + end + + def rack_response(status, body, content_type='application/json') + headers = { + 'content-type' => content_type, + 'content-length' => body.bytesize.to_s + } + + [status, headers, [body]] + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/binder.rb b/vendor/cache/puma-fba741b91780/lib/puma/binder.rb new file mode 100644 index 000000000..239a902e3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/binder.rb @@ -0,0 +1,502 @@ +# frozen_string_literal: true + +require 'uri' +require 'socket' + +require_relative 'const' +require_relative 'util' +require_relative 'configuration' + +module Puma + + if HAS_SSL + require_relative 'minissl' + require_relative 'minissl/context_builder' + end + + class Binder + include Puma::Const + + RACK_VERSION = [1,6].freeze + + def initialize(log_writer, conf = Configuration.new, env: ENV) + @log_writer = log_writer + @conf = conf + @listeners = [] + @inherited_fds = {} + @activated_sockets = {} + @unix_paths = [] + @env = env + + @proto_env = { + "rack.version".freeze => RACK_VERSION, + "rack.errors".freeze => log_writer.stderr, + "rack.multithread".freeze => conf.options[:max_threads] > 1, + "rack.multiprocess".freeze => conf.options[:workers] >= 1, + "rack.run_once".freeze => false, + RACK_URL_SCHEME => conf.options[:rack_url_scheme], + "SCRIPT_NAME".freeze => env['SCRIPT_NAME'] || "", + + # I'd like to set a default CONTENT_TYPE here but some things + # depend on their not being a default set and inferring + # it from the content. And so if i set it here, it won't + # infer properly. + + "QUERY_STRING".freeze => "", + SERVER_SOFTWARE => PUMA_SERVER_STRING, + GATEWAY_INTERFACE => CGI_VER + } + + @envs = {} + @ios = [] + end + + attr_reader :ios + + # @version 5.0.0 + attr_reader :activated_sockets, :envs, :inherited_fds, :listeners, :proto_env, :unix_paths + + # @version 5.0.0 + attr_writer :ios, :listeners + + def env(sock) + @envs.fetch(sock, @proto_env) + end + + def close + @ios.each { |i| i.close } + end + + # @!attribute [r] connected_ports + # @version 5.0.0 + def connected_ports + t = ios.map { |io| io.addr[1] }; t.uniq!; t + end + + # @version 5.0.0 + def create_inherited_fds(env_hash) + env_hash.select {|k,v| k =~ /PUMA_INHERIT_\d+/}.each do |_k, v| + fd, url = v.split(":", 2) + @inherited_fds[url] = fd.to_i + end.keys # pass keys back for removal + end + + # systemd socket activation. + # LISTEN_FDS = number of listening sockets. e.g. 2 means accept on 2 sockets w/descriptors 3 and 4. + # LISTEN_PID = PID of the service process, aka us + # @see https://www.freedesktop.org/software/systemd/man/systemd-socket-activate.html + # @version 5.0.0 + # + def create_activated_fds(env_hash) + @log_writer.debug "ENV['LISTEN_FDS'] #{@env['LISTEN_FDS'].inspect} env_hash['LISTEN_PID'] #{env_hash['LISTEN_PID'].inspect}" + return [] unless env_hash['LISTEN_FDS'] && env_hash['LISTEN_PID'].to_i == $$ + env_hash['LISTEN_FDS'].to_i.times do |index| + sock = TCPServer.for_fd(socket_activation_fd(index)) + key = begin # Try to parse as a path + [:unix, Socket.unpack_sockaddr_un(sock.getsockname)] + rescue ArgumentError # Try to parse as a port/ip + port, addr = Socket.unpack_sockaddr_in(sock.getsockname) + addr = "[#{addr}]" if addr&.include? ':' + [:tcp, addr, port] + end + @activated_sockets[key] = sock + @log_writer.debug "Registered #{key.join ':'} for activation from LISTEN_FDS" + end + ["LISTEN_FDS", "LISTEN_PID"] # Signal to remove these keys from ENV + end + + # Synthesize binds from systemd socket activation + # + # When systemd socket activation is enabled, it can be tedious to keep the + # binds in sync. This method can synthesize any binds based on the received + # activated sockets. Any existing matching binds will be respected. + # + # When only_matching is true in, all binds that do not match an activated + # socket is removed in place. + # + # It's a noop if no activated sockets were received. + def synthesize_binds_from_activated_fs(binds, only_matching) + return binds unless activated_sockets.any? + + activated_binds = [] + + activated_sockets.keys.each do |proto, addr, port| + if port + tcp_url = "#{proto}://#{addr}:#{port}" + ssl_url = "ssl://#{addr}:#{port}" + ssl_url_prefix = "#{ssl_url}?" + + existing = binds.find { |bind| bind == tcp_url || bind == ssl_url || bind.start_with?(ssl_url_prefix) } + + activated_binds << (existing || tcp_url) + else + # TODO: can there be a SSL bind without a port? + activated_binds << "#{proto}://#{addr}" + end + end + + if only_matching + activated_binds + else + binds | activated_binds + end + end + + def parse(binds, log_writer = nil, log_msg = 'Listening') + log_writer ||= @log_writer + binds.each do |str| + uri = URI.parse str + case uri.scheme + when "tcp" + if fd = @inherited_fds.delete(str) + io = inherit_tcp_listener uri.host, uri.port, fd + log_writer.log "* Inherited #{str}" + elsif sock = @activated_sockets.delete([ :tcp, uri.host, uri.port ]) + io = inherit_tcp_listener uri.host, uri.port, sock + log_writer.log "* Activated #{str}" + else + ios_len = @ios.length + params = Util.parse_query uri.query + + low_latency = params.key?('low_latency') && params['low_latency'] != 'false' + backlog = params.fetch('backlog', 1024).to_i + + io = add_tcp_listener uri.host, uri.port, low_latency, backlog + + @ios[ios_len..-1].each do |i| + addr = loc_addr_str i + log_writer.log "* #{log_msg} on http://#{addr}" + end + end + + @listeners << [str, io] if io + when "unix" + path = "#{uri.host}#{uri.path}".gsub("%20", " ") + abstract = false + if str.start_with? 'unix://@' + raise "OS does not support abstract UNIXSockets" unless Puma.abstract_unix_socket? + abstract = true + path = "@#{path}" + end + + if fd = @inherited_fds.delete(str) + @unix_paths << path unless abstract || File.exist?(path) + io = inherit_unix_listener path, fd + log_writer.log "* Inherited #{str}" + elsif sock = @activated_sockets.delete([ :unix, path ]) || + !abstract && @activated_sockets.delete([ :unix, File.realdirpath(path) ]) + @unix_paths << path unless abstract || File.exist?(path) + io = inherit_unix_listener path, sock + log_writer.log "* Activated #{str}" + else + umask = nil + mode = nil + backlog = 1024 + + if uri.query + params = Util.parse_query uri.query + if u = params['umask'] + # Use Integer() to respect the 0 prefix as octal + umask = Integer(u) + end + + if u = params['mode'] + mode = Integer('0'+u) + end + + if u = params['backlog'] + backlog = Integer(u) + end + end + + @unix_paths << path unless abstract || File.exist?(path) + io = add_unix_listener path, umask, mode, backlog + log_writer.log "* #{log_msg} on #{str}" + end + + @listeners << [str, io] + when "ssl" + cert_key = %w[cert key] + + raise "Puma compiled without SSL support" unless HAS_SSL + + params = Util.parse_query uri.query + + # If key and certs are not defined and localhost gem is required. + # localhost gem will be used for self signed + # Load localhost authority if not loaded. + # Ruby 3 `values_at` accepts an array, earlier do not + if params.values_at(*cert_key).all? { |v| v.to_s.empty? } + ctx = localhost_authority && localhost_authority_context + end + + ctx ||= + begin + # Extract cert_pem and key_pem from options[:store] if present + cert_key.each do |v| + if params[v]&.start_with?('store:') + index = Integer(params.delete(v).split('store:').last) + params["#{v}_pem"] = @conf.options[:store][index] + end + end + MiniSSL::ContextBuilder.new(params, @log_writer).context + end + + if fd = @inherited_fds.delete(str) + log_writer.log "* Inherited #{str}" + io = inherit_ssl_listener fd, ctx + elsif sock = @activated_sockets.delete([ :tcp, uri.host, uri.port ]) + io = inherit_ssl_listener sock, ctx + log_writer.log "* Activated #{str}" + else + ios_len = @ios.length + backlog = params.fetch('backlog', 1024).to_i + low_latency = params['low_latency'] != 'false' + io = add_ssl_listener uri.host, uri.port, ctx, low_latency, backlog + + @ios[ios_len..-1].each do |i| + addr = loc_addr_str i + log_writer.log "* #{log_msg} on ssl://#{addr}?#{uri.query}" + end + end + + @listeners << [str, io] if io + else + log_writer.error "Invalid URI: #{str}" + end + end + + # If we inherited fds but didn't use them (because of a + # configuration change), then be sure to close them. + @inherited_fds.each do |str, fd| + log_writer.log "* Closing unused inherited connection: #{str}" + + begin + IO.for_fd(fd).close + rescue SystemCallError + end + + # We have to unlink a unix socket path that's not being used + uri = URI.parse str + if uri.scheme == "unix" + path = "#{uri.host}#{uri.path}" + File.unlink path + end + end + + # Also close any unused activated sockets + unless @activated_sockets.empty? + fds = @ios.map(&:to_i) + @activated_sockets.each do |key, sock| + next if fds.include? sock.to_i + log_writer.log "* Closing unused activated socket: #{key.first}://#{key[1..-1].join ':'}" + begin + sock.close + rescue SystemCallError + end + # We have to unlink a unix socket path that's not being used + File.unlink key[1] if key.first == :unix + end + end + end + + def localhost_authority + @localhost_authority ||= Localhost::Authority.fetch if defined?(Localhost::Authority) && !Puma::IS_JRUBY + end + + def localhost_authority_context + return unless localhost_authority + + key_path, crt_path = if [:key_path, :certificate_path].all? { |m| localhost_authority.respond_to?(m) } + [localhost_authority.key_path, localhost_authority.certificate_path] + else + local_certificates_path = File.expand_path("~/.localhost") + [File.join(local_certificates_path, "localhost.key"), File.join(local_certificates_path, "localhost.crt")] + end + MiniSSL::ContextBuilder.new({ "key" => key_path, "cert" => crt_path }, @log_writer).context + end + + # Tell the server to listen on host +host+, port +port+. + # If +optimize_for_latency+ is true (the default) then clients connecting + # will be optimized for latency over throughput. + # + # +backlog+ indicates how many unaccepted connections the kernel should + # allow to accumulate before returning connection refused. + # + def add_tcp_listener(host, port, optimize_for_latency=true, backlog=1024) + if host == "localhost" + loopback_addresses.each do |addr| + add_tcp_listener addr, port, optimize_for_latency, backlog + end + return + end + + host = host[1..-2] if host&.start_with? '[' + tcp_server = TCPServer.new(host, port) + + if optimize_for_latency + tcp_server.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) + end + tcp_server.setsockopt(Socket::SOL_SOCKET,Socket::SO_REUSEADDR, true) + tcp_server.listen backlog + + @ios << tcp_server + tcp_server + end + + def inherit_tcp_listener(host, port, fd) + s = fd.kind_of?(::TCPServer) ? fd : ::TCPServer.for_fd(fd) + + @ios << s + s + end + + def add_ssl_listener(host, port, ctx, + optimize_for_latency=true, backlog=1024) + + raise "Puma compiled without SSL support" unless HAS_SSL + # Puma will try to use local authority context if context is supplied nil + ctx ||= localhost_authority_context + + if host == "localhost" + loopback_addresses.each do |addr| + add_ssl_listener addr, port, ctx, optimize_for_latency, backlog + end + return + end + + host = host[1..-2] if host&.start_with? '[' + s = TCPServer.new(host, port) + if optimize_for_latency + s.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) + end + s.setsockopt(Socket::SOL_SOCKET,Socket::SO_REUSEADDR, true) + s.listen backlog + + ssl = MiniSSL::Server.new s, ctx + env = @proto_env.dup + env[HTTPS_KEY] = HTTPS + @envs[ssl] = env + + @ios << ssl + s + end + + def inherit_ssl_listener(fd, ctx) + raise "Puma compiled without SSL support" unless HAS_SSL + # Puma will try to use local authority context if context is supplied nil + ctx ||= localhost_authority_context + + s = fd.kind_of?(::TCPServer) ? fd : ::TCPServer.for_fd(fd) + + ssl = MiniSSL::Server.new(s, ctx) + + env = @proto_env.dup + env[HTTPS_KEY] = HTTPS + @envs[ssl] = env + + @ios << ssl + + s + end + + # Tell the server to listen on +path+ as a UNIX domain socket. + # + def add_unix_listener(path, umask=nil, mode=nil, backlog=1024) + # Let anyone connect by default + umask ||= 0 + + begin + old_mask = File.umask(umask) + + if File.exist? path + begin + old = UNIXSocket.new path + rescue SystemCallError, IOError + File.unlink path + else + old.close + raise "There is already a server bound to: #{path}" + end + end + s = UNIXServer.new path.sub(/\A@/, "\0") # check for abstract UNIXSocket + s.listen backlog + @ios << s + ensure + File.umask old_mask + end + + if mode + File.chmod mode, path + end + + env = @proto_env.dup + env[REMOTE_ADDR] = "127.0.0.1" + @envs[s] = env + + s + end + + def inherit_unix_listener(path, fd) + s = fd.kind_of?(::TCPServer) ? fd : ::UNIXServer.for_fd(fd) + + @ios << s + + env = @proto_env.dup + env[REMOTE_ADDR] = "127.0.0.1" + @envs[s] = env + + s + end + + def close_listeners + @listeners.each do |l, io| + begin + io.close unless io.closed? + uri = URI.parse l + next unless uri.scheme == 'unix' + unix_path = "#{uri.host}#{uri.path}" + File.unlink unix_path if @unix_paths.include?(unix_path) && File.exist?(unix_path) + rescue Errno::EBADF + end + end + end + + def redirects_for_restart + redirects = @listeners.map { |a| [a[1].to_i, a[1].to_i] }.to_h + redirects[:close_others] = true + redirects + end + + # @version 5.0.0 + def redirects_for_restart_env + @listeners.each_with_object({}).with_index do |(listen, memo), i| + memo["PUMA_INHERIT_#{i}"] = "#{listen[1].to_i}:#{listen[0]}" + end + end + + private + + # @!attribute [r] loopback_addresses + def loopback_addresses + t = Socket.ip_address_list.select do |addrinfo| + addrinfo.ipv6_loopback? || addrinfo.ipv4_loopback? + end + t.map! { |addrinfo| addrinfo.ip_address }; t.uniq!; t + end + + def loc_addr_str(io) + loc_addr = io.to_io.local_address + if loc_addr.ipv6? + "[#{loc_addr.ip_unpack[0]}]:#{loc_addr.ip_unpack[1]}" + else + loc_addr.ip_unpack.join(':') + end + end + + # @version 5.0.0 + def socket_activation_fd(int) + int + 3 # 3 is the magic number you add to follow the SA protocol + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/cli.rb b/vendor/cache/puma-fba741b91780/lib/puma/cli.rb new file mode 100644 index 000000000..8f0c7a8f2 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/cli.rb @@ -0,0 +1,247 @@ +# frozen_string_literal: true + +require 'optparse' +require 'uri' + +require_relative '../puma' +require_relative 'configuration' +require_relative 'launcher' +require_relative 'const' +require_relative 'log_writer' + +module Puma + class << self + # The CLI exports a Puma::Configuration instance here to allow + # apps to pick it up. An app must load this object conditionally + # because it is not set if the app is launched via any mechanism + # other than the CLI class. + attr_accessor :cli_config + end + + # Handles invoke a Puma::Server in a command line style. + # + class CLI + # Create a new CLI object using +argv+ as the command line + # arguments. + # + def initialize(argv, log_writer = LogWriter.stdio, events = Events.new, env: ENV) + @debug = false + @argv = argv.dup + @log_writer = log_writer + @events = events + + @conf = nil + + @stdout = nil + @stderr = nil + @append = false + + @control_url = nil + @control_options = {} + + setup_options env + + begin + @parser.parse! @argv + + if file = @argv.shift + @conf.configure do |user_config, file_config| + file_config.rackup file + end + end + rescue UnsupportedOption + exit 1 + end + + @conf.configure do |user_config, file_config| + if @stdout || @stderr + user_config.stdout_redirect @stdout, @stderr, @append + end + + if @control_url + user_config.activate_control_app @control_url, @control_options + end + end + + @launcher = Puma::Launcher.new(@conf, env: ENV, log_writer: @log_writer, events: @events, argv: argv) + end + + attr_reader :launcher + + # Parse the options, load the rackup, start the server and wait + # for it to finish. + # + def run + @launcher.run + end + + private + def unsupported(str) + @log_writer.error(str) + raise UnsupportedOption + end + + def configure_control_url(command_line_arg) + if command_line_arg + @control_url = command_line_arg + elsif Puma.jruby? + unsupported "No default url available on JRuby" + end + end + + # Build the OptionParser object to handle the available options. + # + + def setup_options(env = ENV) + @conf = Configuration.new({}, {events: @events}, env) do |user_config, file_config| + @parser = OptionParser.new do |o| + o.on "-b", "--bind URI", "URI to bind to (tcp://, unix://, ssl://)" do |arg| + user_config.bind arg + end + + o.on "--bind-to-activated-sockets [only]", "Bind to all activated sockets" do |arg| + user_config.bind_to_activated_sockets(arg || true) + end + + o.on "-C", "--config PATH", "Load PATH as a config file" do |arg| + file_config.load arg + end + + # Identical to supplying --config "-", but more semantic + o.on "--no-config", "Prevent Puma from searching for a config file" do |arg| + file_config.load "-" + end + + o.on "--control-url URL", "The bind url to use for the control server. Use 'auto' to use temp unix server" do |arg| + configure_control_url(arg) + end + + o.on "--control-token TOKEN", + "The token to use as authentication for the control server" do |arg| + @control_options[:auth_token] = arg + end + + o.on "--debug", "Log lowlevel debugging information" do + user_config.debug + end + + o.on "--dir DIR", "Change to DIR before starting" do |d| + user_config.directory d + end + + o.on "-e", "--environment ENVIRONMENT", + "The environment to run the Rack app on (default development)" do |arg| + user_config.environment arg + end + + o.on "-f", "--fork-worker=[REQUESTS]", OptionParser::DecimalInteger, + "Fork new workers from existing worker. Cluster mode only", + "Auto-refork after REQUESTS (default 1000)" do |*args| + user_config.fork_worker(*args.compact) + end + + o.on "-I", "--include PATH", "Specify $LOAD_PATH directories" do |arg| + $LOAD_PATH.unshift(*arg.split(':')) + end + + o.on "--idle-timeout SECONDS", "Number of seconds until the next request before automatic shutdown" do |arg| + user_config.idle_timeout arg + end + + o.on "-p", "--port PORT", "Define the TCP port to bind to", + "Use -b for more advanced options" do |arg| + user_config.bind "tcp://#{Configuration::DEFAULTS[:tcp_host]}:#{arg}" + end + + o.on "--pidfile PATH", "Use PATH as a pidfile" do |arg| + user_config.pidfile arg + end + + o.on "--plugin PLUGIN", "Load the given PLUGIN. Can be used multiple times to load multiple plugins." do |arg| + user_config.plugin arg + end + + o.on "--preload", "Preload the app. Cluster mode only" do + user_config.preload_app! + end + + o.on "--prune-bundler", "Prune out the bundler env if possible" do + user_config.prune_bundler + end + + o.on "--extra-runtime-dependencies GEM1,GEM2", "Defines any extra needed gems when using --prune-bundler" do |arg| + user_config.extra_runtime_dependencies arg.split(',') + end + + o.on "-q", "--quiet", "Do not log requests internally (default true)" do + user_config.quiet + end + + o.on "-v", "--log-requests", "Log requests as they occur" do + user_config.log_requests + end + + o.on "-R", "--restart-cmd CMD", + "The puma command to run during a hot restart", + "Default: inferred" do |cmd| + user_config.restart_command cmd + end + + o.on "-s", "--silent", "Do not log prompt messages other than errors" do + @log_writer = LogWriter.new(NullIO.new, $stderr) + end + + o.on "-S", "--state PATH", "Where to store the state details" do |arg| + user_config.state_path arg + end + + o.on '-t', '--threads INT', "min:max threads to use (default 0:16)" do |arg| + min, max = arg.split(":") + if max + user_config.threads min, max + else + user_config.threads min, min + end + end + + o.on "--early-hints", "Enable early hints support" do + user_config.early_hints + end + + o.on "-V", "--version", "Print the version information" do + puts "puma version #{Puma::Const::VERSION}" + exit 0 + end + + o.on "-w", "--workers COUNT", + "Activate cluster mode: How many worker processes to create" do |arg| + user_config.workers arg + end + + o.on "--tag NAME", "Additional text to display in process listing" do |arg| + user_config.tag arg + end + + o.on "--redirect-stdout FILE", "Redirect STDOUT to a specific file" do |arg| + @stdout = arg.to_s + end + + o.on "--redirect-stderr FILE", "Redirect STDERR to a specific file" do |arg| + @stderr = arg.to_s + end + + o.on "--[no-]redirect-append", "Append to redirected files" do |val| + @append = val + end + + o.banner = "puma " + + o.on_tail "-h", "--help", "Show help" do + $stdout.puts o + exit 0 + end + end + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/client.rb b/vendor/cache/puma-fba741b91780/lib/puma/client.rb new file mode 100644 index 000000000..67eddc66d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/client.rb @@ -0,0 +1,682 @@ +# frozen_string_literal: true + +class IO + # We need to use this for a jruby work around on both 1.8 and 1.9. + # So this either creates the constant (on 1.8), or harmlessly + # reopens it (on 1.9). + module WaitReadable + end +end + +require_relative 'detect' +require_relative 'io_buffer' +require 'tempfile' + +if Puma::IS_JRUBY + # We have to work around some OpenSSL buffer/io-readiness bugs + # so we pull it in regardless of if the user is binding + # to an SSL socket + require 'openssl' +end + +module Puma + + class ConnectionError < RuntimeError; end + + class HttpParserError501 < IOError; end + + #———————————————————————— DO NOT USE — this class is for internal use only ——— + + + # An instance of this class represents a unique request from a client. + # For example, this could be a web request from a browser or from CURL. + # + # An instance of `Puma::Client` can be used as if it were an IO object + # by the reactor. The reactor is expected to call `#to_io` + # on any non-IO objects it polls. For example, nio4r internally calls + # `IO::try_convert` (which may call `#to_io`) when a new socket is + # registered. + # + # Instances of this class are responsible for knowing if + # the header and body are fully buffered via the `try_to_finish` method. + # They can be used to "time out" a response via the `timeout_at` reader. + # + class Client # :nodoc: + + # this tests all values but the last, which must be chunked + ALLOWED_TRANSFER_ENCODING = %w[compress deflate gzip].freeze + + # chunked body validation + CHUNK_SIZE_INVALID = /[^\h]/.freeze + CHUNK_VALID_ENDING = Const::LINE_END + CHUNK_VALID_ENDING_SIZE = CHUNK_VALID_ENDING.bytesize + + # The maximum number of bytes we'll buffer looking for a valid + # chunk header. + MAX_CHUNK_HEADER_SIZE = 4096 + + # The maximum amount of excess data the client sends + # using chunk size extensions before we abort the connection. + MAX_CHUNK_EXCESS = 16 * 1024 + + # Content-Length header value validation + CONTENT_LENGTH_VALUE_INVALID = /[^\d]/.freeze + + TE_ERR_MSG = 'Invalid Transfer-Encoding' + + # The object used for a request with no body. All requests with + # no body share this one object since it has no state. + EmptyBody = NullIO.new + + include Puma::Const + + def initialize(io, env=nil) + @io = io + @to_io = io.to_io + @io_buffer = IOBuffer.new + @proto_env = env + @env = env&.dup + + @parser = HttpParser.new + @parsed_bytes = 0 + @read_header = true + @read_proxy = false + @ready = false + + @body = nil + @body_read_start = nil + @buffer = nil + @tempfile = nil + + @timeout_at = nil + + @requests_served = 0 + @hijacked = false + + @http_content_length_limit = nil + @http_content_length_limit_exceeded = false + + @peerip = nil + @peer_family = nil + @listener = nil + @remote_addr_header = nil + @expect_proxy_proto = false + + @body_remain = 0 + + @in_last_chunk = false + + # need unfrozen ASCII-8BIT, +'' is UTF-8 + @read_buffer = String.new # rubocop: disable Performance/UnfreezeString + end + + attr_reader :env, :to_io, :body, :io, :timeout_at, :ready, :hijacked, + :tempfile, :io_buffer, :http_content_length_limit_exceeded + + attr_writer :peerip, :http_content_length_limit + + attr_accessor :remote_addr_header, :listener + + # Remove in Puma 7? + def closed? + @to_io.closed? + end + + # Test to see if io meets a bare minimum of functioning, @to_io needs to be + # used for MiniSSL::Socket + def io_ok? + @to_io.is_a?(::BasicSocket) && !closed? + end + + # @!attribute [r] inspect + def inspect + "#" + end + + # For the hijack protocol (allows us to just put the Client object + # into the env) + def call + @hijacked = true + env[HIJACK_IO] ||= @io + end + + # @!attribute [r] in_data_phase + def in_data_phase + !(@read_header || @read_proxy) + end + + def set_timeout(val) + @timeout_at = Process.clock_gettime(Process::CLOCK_MONOTONIC) + val + end + + # Number of seconds until the timeout elapses. + def timeout + [@timeout_at - Process.clock_gettime(Process::CLOCK_MONOTONIC), 0].max + end + + def reset(fast_check=true) + @parser.reset + @io_buffer.reset + @read_header = true + @read_proxy = !!@expect_proxy_proto + @env = @proto_env.dup + @parsed_bytes = 0 + @ready = false + @body_remain = 0 + @peerip = nil if @remote_addr_header + @in_last_chunk = false + @http_content_length_limit_exceeded = false + + if @buffer + return false unless try_to_parse_proxy_protocol + + @parsed_bytes = @parser.execute(@env, @buffer, @parsed_bytes) + + if @parser.finished? + return setup_body + elsif @parsed_bytes >= MAX_HEADER + raise HttpParserError, + "HEADER is longer than allowed, aborting client early." + end + + return false + else + begin + if fast_check && @to_io.wait_readable(FAST_TRACK_KA_TIMEOUT) + return try_to_finish + end + rescue IOError + # swallow it + end + end + end + + def close + tempfile_close + begin + @io.close + rescue IOError, Errno::EBADF + Puma::Util.purge_interrupt_queue + end + end + + def tempfile_close + tf_path = @tempfile&.path + @tempfile&.close + File.unlink(tf_path) if tf_path + @tempfile = nil + @body = nil + rescue Errno::ENOENT, IOError + end + + # If necessary, read the PROXY protocol from the buffer. Returns + # false if more data is needed. + def try_to_parse_proxy_protocol + if @read_proxy + if @expect_proxy_proto == :v1 + if @buffer.include? "\r\n" + if md = PROXY_PROTOCOL_V1_REGEX.match(@buffer) + if md[1] + @peerip = md[1].split(" ")[0] + end + @buffer = md.post_match + end + # if the buffer has a \r\n but doesn't have a PROXY protocol + # request, this is just HTTP from a non-PROXY client; move on + @read_proxy = false + return @buffer.size > 0 + else + return false + end + end + end + true + end + + def try_to_finish + if env[CONTENT_LENGTH] && above_http_content_limit(env[CONTENT_LENGTH].to_i) + @http_content_length_limit_exceeded = true + end + + if @http_content_length_limit_exceeded + @buffer = nil + @body = EmptyBody + set_ready + return true + end + + return read_body if in_data_phase + + data = nil + begin + data = @io.read_nonblock(CHUNK_SIZE) + rescue IO::WaitReadable + return false + rescue EOFError + # Swallow error, don't log + rescue SystemCallError, IOError + raise ConnectionError, "Connection error detected during read" + end + + # No data means a closed socket + unless data + @buffer = nil + set_ready + raise EOFError + end + + if @buffer + @buffer << data + else + @buffer = data + end + + return false unless try_to_parse_proxy_protocol + + @parsed_bytes = @parser.execute(@env, @buffer, @parsed_bytes) + + if @parser.finished? && above_http_content_limit(@parser.body.bytesize) + @http_content_length_limit_exceeded = true + end + + if @parser.finished? + return setup_body + elsif @parsed_bytes >= MAX_HEADER + raise HttpParserError, + "HEADER is longer than allowed, aborting client early." + end + + false + end + + def eagerly_finish + return true if @ready + return false unless @to_io.wait_readable(0) + try_to_finish + end + + def finish(timeout) + return if @ready + @to_io.wait_readable(timeout) || timeout! until try_to_finish + end + + def timeout! + write_error(408) if in_data_phase + raise ConnectionError + end + + def write_error(status_code) + begin + @io << ERROR_RESPONSE[status_code] + rescue StandardError + end + end + + def peerip + return @peerip if @peerip + + if @remote_addr_header + hdr = (@env[@remote_addr_header] || @io.peeraddr.last).split(/[\s,]/).first + @peerip = hdr + return hdr + end + + @peerip ||= @io.peeraddr.last + end + + def peer_family + return @peer_family if @peer_family + + @peer_family ||= begin + @io.local_address.afamily + rescue + Socket::AF_INET + end + end + + # Returns true if the persistent connection can be closed immediately + # without waiting for the configured idle/shutdown timeout. + # @version 5.0.0 + # + def can_close? + # Allow connection to close if we're not in the middle of parsing a request. + @parsed_bytes == 0 + end + + def expect_proxy_proto=(val) + if val + if @read_header + @read_proxy = true + end + else + @read_proxy = false + end + @expect_proxy_proto = val + end + + private + + def setup_body + @body_read_start = Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond) + + if @env[HTTP_EXPECT] == CONTINUE + # TODO allow a hook here to check the headers before + # going forward + @io << HTTP_11_100 + @io.flush + end + + @read_header = false + + body = @parser.body + + te = @env[TRANSFER_ENCODING2] + if te + te_lwr = te.downcase + if te.include? ',' + te_ary = te_lwr.split ',' + te_count = te_ary.count CHUNKED + te_valid = te_ary[0..-2].all? { |e| ALLOWED_TRANSFER_ENCODING.include? e } + if te_ary.last == CHUNKED && te_count == 1 && te_valid + @env.delete TRANSFER_ENCODING2 + return setup_chunked_body body + elsif te_count >= 1 + raise HttpParserError , "#{TE_ERR_MSG}, multiple chunked: '#{te}'" + elsif !te_valid + raise HttpParserError501, "#{TE_ERR_MSG}, unknown value: '#{te}'" + end + elsif te_lwr == CHUNKED + @env.delete TRANSFER_ENCODING2 + return setup_chunked_body body + elsif ALLOWED_TRANSFER_ENCODING.include? te_lwr + raise HttpParserError , "#{TE_ERR_MSG}, single value must be chunked: '#{te}'" + else + raise HttpParserError501 , "#{TE_ERR_MSG}, unknown value: '#{te}'" + end + end + + @chunked_body = false + + cl = @env[CONTENT_LENGTH] + + if cl + # cannot contain characters that are not \d, or be empty + if CONTENT_LENGTH_VALUE_INVALID.match?(cl) || cl.empty? + raise HttpParserError, "Invalid Content-Length: #{cl.inspect}" + end + else + @buffer = body.empty? ? nil : body + @body = EmptyBody + set_ready + return true + end + + content_length = cl.to_i + + remain = content_length - body.bytesize + + if remain <= 0 + # Part of the body is a pipelined request OR garbage. We'll deal with that later. + if content_length == 0 + @body = EmptyBody + if body.empty? + @buffer = nil + else + @buffer = body + end + elsif remain == 0 + @body = StringIO.new body + @buffer = nil + else + @body = StringIO.new(body[0,content_length]) + @buffer = body[content_length..-1] + end + set_ready + return true + end + + if remain > MAX_BODY + @body = Tempfile.create(Const::PUMA_TMP_BASE) + File.unlink @body.path unless IS_WINDOWS + @body.binmode + @tempfile = @body + else + # The body[0,0] trick is to get an empty string in the same + # encoding as body. + @body = StringIO.new body[0,0] + end + + @body.write body + + @body_remain = remain + + false + end + + def read_body + if @chunked_body + return read_chunked_body + end + + # Read an odd sized chunk so we can read even sized ones + # after this + remain = @body_remain + + if remain > CHUNK_SIZE + want = CHUNK_SIZE + else + want = remain + end + + begin + chunk = @io.read_nonblock(want, @read_buffer) + rescue IO::WaitReadable + return false + rescue SystemCallError, IOError + raise ConnectionError, "Connection error detected during read" + end + + # No chunk means a closed socket + unless chunk + @body.close + @buffer = nil + set_ready + raise EOFError + end + + remain -= @body.write(chunk) + + if remain <= 0 + @body.rewind + @buffer = nil + set_ready + return true + end + + @body_remain = remain + + false + end + + def read_chunked_body + while true + begin + chunk = @io.read_nonblock(4096, @read_buffer) + rescue IO::WaitReadable + return false + rescue SystemCallError, IOError + raise ConnectionError, "Connection error detected during read" + end + + # No chunk means a closed socket + unless chunk + @body.close + @buffer = nil + set_ready + raise EOFError + end + + if decode_chunk(chunk) + @env[CONTENT_LENGTH] = @chunked_content_length.to_s + return true + end + end + end + + def setup_chunked_body(body) + @chunked_body = true + @partial_part_left = 0 + @prev_chunk = "" + @excess_cr = 0 + + @body = Tempfile.create(Const::PUMA_TMP_BASE) + File.unlink @body.path unless IS_WINDOWS + @body.binmode + @tempfile = @body + @chunked_content_length = 0 + + if decode_chunk(body) + @env[CONTENT_LENGTH] = @chunked_content_length.to_s + return true + end + end + + # @version 5.0.0 + def write_chunk(str) + @chunked_content_length += @body.write(str) + end + + def decode_chunk(chunk) + if @partial_part_left > 0 + if @partial_part_left <= chunk.size + if @partial_part_left > 2 + write_chunk(chunk[0..(@partial_part_left-3)]) # skip the \r\n + end + chunk = chunk[@partial_part_left..-1] + @partial_part_left = 0 + else + if @partial_part_left > 2 + if @partial_part_left == chunk.size + 1 + # Don't include the last \r + write_chunk(chunk[0..(@partial_part_left-3)]) + else + # don't include the last \r\n + write_chunk(chunk) + end + end + @partial_part_left -= chunk.size + return false + end + end + + if @prev_chunk.empty? + io = StringIO.new(chunk) + else + io = StringIO.new(@prev_chunk+chunk) + @prev_chunk = "" + end + + while !io.eof? + line = io.gets + if line.end_with?(CHUNK_VALID_ENDING) + # Puma doesn't process chunk extensions, but should parse if they're + # present, which is the reason for the semicolon regex + chunk_hex = line.strip[/\A[^;]+/] + if CHUNK_SIZE_INVALID.match? chunk_hex + raise HttpParserError, "Invalid chunk size: '#{chunk_hex}'" + end + len = chunk_hex.to_i(16) + if len == 0 + @in_last_chunk = true + @body.rewind + rest = io.read + if rest.bytesize < CHUNK_VALID_ENDING_SIZE + @buffer = nil + @partial_part_left = CHUNK_VALID_ENDING_SIZE - rest.bytesize + return false + else + # if the next character is a CRLF, set buffer to everything after that CRLF + start_of_rest = if rest.start_with?(CHUNK_VALID_ENDING) + CHUNK_VALID_ENDING_SIZE + else # we have started a trailer section, which we do not support. skip it! + rest.index(CHUNK_VALID_ENDING*2) + CHUNK_VALID_ENDING_SIZE*2 + end + + @buffer = rest[start_of_rest..-1] + @buffer = nil if @buffer.empty? + set_ready + return true + end + end + + # Track the excess as a function of the size of the + # header vs the size of the actual data. Excess can + # go negative (and is expected to) when the body is + # significant. + # The additional of chunk_hex.size and 2 compensates + # for a client sending 1 byte in a chunked body over + # a long period of time, making sure that that client + # isn't accidentally eventually punished. + @excess_cr += (line.size - len - chunk_hex.size - 2) + + if @excess_cr >= MAX_CHUNK_EXCESS + raise HttpParserError, "Maximum chunk excess detected" + end + + len += 2 + + part = io.read(len) + + unless part + @partial_part_left = len + next + end + + got = part.size + + case + when got == len + # proper chunked segment must end with "\r\n" + if part.end_with? CHUNK_VALID_ENDING + write_chunk(part[0..-3]) # to skip the ending \r\n + else + raise HttpParserError, "Chunk size mismatch" + end + when got <= len - 2 + write_chunk(part) + @partial_part_left = len - part.size + when got == len - 1 # edge where we get just \r but not \n + write_chunk(part[0..-2]) + @partial_part_left = len - part.size + end + else + if @prev_chunk.size + line.size >= MAX_CHUNK_HEADER_SIZE + raise HttpParserError, "maximum size of chunk header exceeded" + end + + @prev_chunk = line + return false + end + end + + if @in_last_chunk + set_ready + true + else + false + end + end + + def set_ready + if @body_read_start + @env['puma.request_body_wait'] = Process.clock_gettime(Process::CLOCK_MONOTONIC, :float_millisecond) - @body_read_start + end + @requests_served += 1 + @ready = true + end + + def above_http_content_limit(value) + @http_content_length_limit&.< value + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/cluster.rb b/vendor/cache/puma-fba741b91780/lib/puma/cluster.rb new file mode 100644 index 000000000..e19cf000f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/cluster.rb @@ -0,0 +1,616 @@ +# frozen_string_literal: true + +require_relative 'runner' +require_relative 'util' +require_relative 'plugin' +require_relative 'cluster/worker_handle' +require_relative 'cluster/worker' + +module Puma + # This class is instantiated by the `Puma::Launcher` and used + # to boot and serve a Ruby application when puma "workers" are needed + # i.e. when using multi-processes. For example `$ puma -w 5` + # + # An instance of this class will spawn the number of processes passed in + # via the `spawn_workers` method call. Each worker will have it's own + # instance of a `Puma::Server`. + class Cluster < Runner + def initialize(launcher) + super(launcher) + + @phase = 0 + @workers = [] + @next_check = Time.now + + @phased_restart = false + end + + # Returns the list of cluster worker handles. + # @return [Array] + attr_reader :workers + + def stop_workers + log "- Gracefully shutting down workers..." + @workers.each { |x| x.term } + + begin + loop do + wait_workers + break if @workers.reject {|w| w.pid.nil?}.empty? + sleep 0.2 + end + rescue Interrupt + log "! Cancelled waiting for workers" + end + end + + def start_phased_restart + @events.fire_on_restart! + @phase += 1 + log "- Starting phased worker restart, phase: #{@phase}" + + # Be sure to change the directory again before loading + # the app. This way we can pick up new code. + dir = @launcher.restart_dir + log "+ Changing to #{dir}" + Dir.chdir dir + end + + def redirect_io + super + + @workers.each { |x| x.hup } + end + + def spawn_workers + diff = @options[:workers] - @workers.size + return if diff < 1 + + master = Process.pid + if @options[:fork_worker] + @fork_writer << "-1\n" + end + + diff.times do + idx = next_worker_index + + if @options[:fork_worker] && idx != 0 + @fork_writer << "#{idx}\n" + pid = nil + else + pid = spawn_worker(idx, master) + end + + debug "Spawned worker: #{pid}" + @workers << WorkerHandle.new(idx, pid, @phase, @options) + end + + if @options[:fork_worker] && all_workers_in_phase? + @fork_writer << "0\n" + end + end + + # @version 5.0.0 + def spawn_worker(idx, master) + @config.run_hooks(:before_worker_fork, idx, @log_writer) + + pid = fork { worker(idx, master) } + if !pid + log "! Complete inability to spawn new workers detected" + log "! Seppuku is the only choice." + exit! 1 + end + + @config.run_hooks(:after_worker_fork, idx, @log_writer) + pid + end + + def cull_workers + diff = @workers.size - @options[:workers] + return if diff < 1 + debug "Culling #{diff} workers" + + workers = workers_to_cull(diff) + debug "Workers to cull: #{workers.inspect}" + + workers.each do |worker| + log "- Worker #{worker.index} (PID: #{worker.pid}) terminating" + worker.term + end + end + + def workers_to_cull(diff) + workers = @workers.sort_by(&:started_at) + + # In fork_worker mode, worker 0 acts as our master process. + # We should avoid culling it to preserve copy-on-write memory gains. + workers.reject! { |w| w.index == 0 } if @options[:fork_worker] + + workers[cull_start_index(diff), diff] + end + + def cull_start_index(diff) + case @options[:worker_culling_strategy] + when :oldest + 0 + else # :youngest + -diff + end + end + + # @!attribute [r] next_worker_index + def next_worker_index + occupied_positions = @workers.map(&:index) + idx = 0 + idx += 1 until !occupied_positions.include?(idx) + idx + end + + def worker_at(idx) + @workers.find { |w| w.index == idx } + end + + def all_workers_booted? + @workers.count { |w| !w.booted? } == 0 + end + + def all_workers_in_phase? + @workers.all? { |w| w.phase == @phase } + end + + def all_workers_idle_timed_out? + (@workers.map(&:pid) - idle_timed_out_worker_pids).empty? + end + + def check_workers + return if @next_check >= Time.now + + @next_check = Time.now + @options[:worker_check_interval] + + timeout_workers + wait_workers + cull_workers + spawn_workers + + if all_workers_booted? + # If we're running at proper capacity, check to see if + # we need to phase any workers out (which will restart + # in the right phase). + # + w = @workers.find { |x| x.phase != @phase } + + if w + log "- Stopping #{w.pid} for phased upgrade..." + unless w.term? + w.term + log "- #{w.signal} sent to #{w.pid}..." + end + end + end + + t = @workers.reject(&:term?) + t.map!(&:ping_timeout) + + @next_check = [t.min, @next_check].compact.min + end + + def worker(index, master) + @workers = [] + + @master_read.close + @suicide_pipe.close + @fork_writer.close + + pipes = { check_pipe: @check_pipe, worker_write: @worker_write } + if @options[:fork_worker] + pipes[:fork_pipe] = @fork_pipe + pipes[:wakeup] = @wakeup + end + + server = start_server if preload? + new_worker = Worker.new index: index, + master: master, + launcher: @launcher, + pipes: pipes, + server: server + new_worker.run + end + + def restart + @restart = true + stop + end + + def phased_restart(refork = false) + return false if @options[:preload_app] && !refork + + @phased_restart = true + wakeup! + + true + end + + def stop + @status = :stop + wakeup! + end + + def stop_blocked + @status = :stop if @status == :run + wakeup! + @control&.stop true + Process.waitall + end + + def halt + @status = :halt + wakeup! + end + + def reload_worker_directory + dir = @launcher.restart_dir + log "+ Changing to #{dir}" + Dir.chdir dir + end + + # Inside of a child process, this will return all zeroes, as @workers is only populated in + # the master process. + # @!attribute [r] stats + def stats + old_worker_count = @workers.count { |w| w.phase != @phase } + worker_status = @workers.map do |w| + { + started_at: utc_iso8601(w.started_at), + pid: w.pid, + index: w.index, + phase: w.phase, + booted: w.booted?, + last_checkin: utc_iso8601(w.last_checkin), + last_status: w.last_status, + } + end + + { + started_at: utc_iso8601(@started_at), + workers: @workers.size, + phase: @phase, + booted_workers: worker_status.count { |w| w[:booted] }, + old_workers: old_worker_count, + worker_status: worker_status, + }.merge(super) + end + + def preload? + @options[:preload_app] + end + + # @version 5.0.0 + def fork_worker! + if (worker = worker_at 0) + worker.phase += 1 + end + phased_restart(true) + end + + # We do this in a separate method to keep the lambda scope + # of the signals handlers as small as possible. + def setup_signals + if @options[:fork_worker] + Signal.trap "SIGURG" do + fork_worker! + end + + # Auto-fork after the specified number of requests. + if (fork_requests = @options[:fork_worker].to_i) > 0 + @events.register(:ping!) do |w| + fork_worker! if w.index == 0 && + w.phase == 0 && + w.last_status[:requests_count] >= fork_requests + end + end + end + + Signal.trap "SIGCHLD" do + wakeup! + end + + Signal.trap "TTIN" do + @options[:workers] += 1 + wakeup! + end + + Signal.trap "TTOU" do + @options[:workers] -= 1 if @options[:workers] >= 2 + wakeup! + end + + master_pid = Process.pid + + Signal.trap "SIGTERM" do + # The worker installs their own SIGTERM when booted. + # Until then, this is run by the worker and the worker + # should just exit if they get it. + if Process.pid != master_pid + log "Early termination of worker" + exit! 0 + else + @launcher.close_binder_listeners + + stop_workers + stop + @events.fire_on_stopped! + raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm] + exit 0 # Clean exit, workers were stopped + end + end + end + + def run + @status = :run + + output_header "cluster" + + # This is aligned with the output from Runner, see Runner#output_header + log "* Workers: #{@options[:workers]}" + + if preload? + # Threads explicitly marked as fork safe will be ignored. Used in Rails, + # but may be used by anyone. Note that we need to explicit + # Process::Waiter check here because there's a bug in Ruby 2.6 and below + # where calling thread_variable_get on a Process::Waiter will segfault. + # We can drop that clause once those versions of Ruby are no longer + # supported. + fork_safe = ->(t) { !t.is_a?(Process::Waiter) && t.thread_variable_get(:fork_safe) } + + before = Thread.list.reject(&fork_safe) + + log "* Restarts: (\u2714) hot (\u2716) phased" + log "* Preloading application" + load_and_bind + + after = Thread.list.reject(&fork_safe) + + if after.size > before.size + threads = (after - before) + if threads.first.respond_to? :backtrace + log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot:" + threads.each do |t| + log "! #{t.inspect} - #{t.backtrace ? t.backtrace.first : ''}" + end + else + log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot" + end + end + else + log "* Restarts: (\u2714) hot (\u2714) phased" + + unless @config.app_configured? + error "No application configured, nothing to run" + exit 1 + end + + @launcher.binder.parse @options[:binds] + end + + read, @wakeup = Puma::Util.pipe + + setup_signals + + # Used by the workers to detect if the master process dies. + # If select says that @check_pipe is ready, it's because the + # master has exited and @suicide_pipe has been automatically + # closed. + # + @check_pipe, @suicide_pipe = Puma::Util.pipe + + # Separate pipe used by worker 0 to receive commands to + # fork new worker processes. + @fork_pipe, @fork_writer = Puma::Util.pipe + + log "Use Ctrl-C to stop" + + single_worker_warning + + redirect_io + + Plugins.fire_background + + @launcher.write_state + + start_control + + @master_read, @worker_write = read, @wakeup + + @options[:worker_write] = @worker_write + + @config.run_hooks(:before_fork, nil, @log_writer) + + spawn_workers + + Signal.trap "SIGINT" do + stop + end + + begin + booted = false + in_phased_restart = false + workers_not_booted = @options[:workers] + + while @status == :run + begin + if @options[:idle_timeout] && all_workers_idle_timed_out? + log "- All workers reached idle timeout" + break + end + + if @phased_restart + start_phased_restart + @phased_restart = false + in_phased_restart = true + workers_not_booted = @options[:workers] + end + + check_workers + + if read.wait_readable([0, @next_check - Time.now].max) + req = read.read_nonblock(1) + next unless req + + if req == Puma::Const::PipeRequest::WAKEUP + @next_check = Time.now + next + end + + result = read.gets + pid = result.to_i + + if req == Puma::Const::PipeRequest::BOOT || req == Puma::Const::PipeRequest::FORK + pid, idx = result.split(':').map(&:to_i) + w = worker_at idx + w.pid = pid if w.pid.nil? + end + + if w = @workers.find { |x| x.pid == pid } + case req + when Puma::Const::PipeRequest::BOOT + w.boot! + log "- Worker #{w.index} (PID: #{pid}) booted in #{w.uptime.round(2)}s, phase: #{w.phase}" + @next_check = Time.now + workers_not_booted -= 1 + when Puma::Const::PipeRequest::EXTERNAL_TERM + # external term, see worker method, Signal.trap "SIGTERM" + w.term! + when Puma::Const::PipeRequest::TERM + w.term unless w.term? + when Puma::Const::PipeRequest::PING + status = result.sub(/^\d+/,'').chomp + w.ping!(status) + @events.fire(:ping!, w) + + if in_phased_restart && workers_not_booted.positive? && w0 = worker_at(0) + w0.ping!(status) + @events.fire(:ping!, w0) + end + + if !booted && @workers.none? {|worker| worker.last_status.empty?} + @events.fire_on_booted! + debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug? + booted = true + end + when Puma::Const::PipeRequest::IDLE + if idle_workers[pid] + idle_workers.delete pid + else + idle_workers[pid] = true + end + end + else + log "! Out-of-sync worker list, no #{pid} worker" + end + end + + if in_phased_restart && workers_not_booted.zero? + @events.fire_on_booted! + debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug? + in_phased_restart = false + end + rescue Interrupt + @status = :stop + end + end + + stop_workers unless @status == :halt + ensure + @check_pipe.close + @suicide_pipe.close + read.close + @wakeup.close + end + end + + private + + def single_worker_warning + return if @options[:workers] != 1 || @options[:silence_single_worker_warning] + + log "! WARNING: Detected running cluster mode with 1 worker." + log "! Running Puma in cluster mode with a single worker is often a misconfiguration." + log "! Consider running Puma in single-mode (workers = 0) in order to reduce memory overhead." + log "! Set the `silence_single_worker_warning` option to silence this warning message." + end + + # loops thru @workers, removing workers that exited, and calling + # `#term` if needed + def wait_workers + # Reap all children, known workers or otherwise. + # If puma has PID 1, as it's common in containerized environments, + # then it's responsible for reaping orphaned processes, so we must reap + # all our dead children, regardless of whether they are workers we spawned + # or some reattached processes. + reaped_children = {} + loop do + begin + pid, status = Process.wait2(-1, Process::WNOHANG) + break unless pid + reaped_children[pid] = status + rescue Errno::ECHILD + break + end + end + + @workers.reject! do |w| + next false if w.pid.nil? + begin + # We may need to check the PID individually because: + # 1. From Ruby versions 2.6 to 3.2, `Process.detach` can prevent or delay + # `Process.wait2(-1)` from detecting a terminated process: https://bugs.ruby-lang.org/issues/19837. + # 2. When `fork_worker` is enabled, some worker may not be direct children, + # but grand children. Because of this they won't be reaped by `Process.wait2(-1)`. + if reaped_children.delete(w.pid) || Process.wait(w.pid, Process::WNOHANG) + true + else + w.term if w.term? + nil + end + rescue Errno::ECHILD + begin + Process.kill(0, w.pid) + # child still alive but has another parent (e.g., using fork_worker) + w.term if w.term? + false + rescue Errno::ESRCH, Errno::EPERM + true # child is already terminated + end + end + end + + # Log unknown children + reaped_children.each do |pid, status| + log "! reaped unknown child process pid=#{pid} status=#{status}" + end + end + + # @version 5.0.0 + def timeout_workers + @workers.each do |w| + if !w.term? && w.ping_timeout <= Time.now + details = if w.booted? + "(Worker #{w.index} failed to check in within #{@options[:worker_timeout]} seconds)" + else + "(Worker #{w.index} failed to boot within #{@options[:worker_boot_timeout]} seconds)" + end + log "! Terminating timed out worker #{details}: #{w.pid}" + w.kill + end + end + end + + def idle_timed_out_worker_pids + idle_workers.keys + end + + def idle_workers + @idle_workers ||= {} + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/cluster/worker.rb b/vendor/cache/puma-fba741b91780/lib/puma/cluster/worker.rb new file mode 100644 index 000000000..d4e0952d0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/cluster/worker.rb @@ -0,0 +1,180 @@ +# frozen_string_literal: true + +module Puma + class Cluster < Puma::Runner + #—————————————————————— DO NOT USE — this class is for internal use only ——— + + + # This class is instantiated by the `Puma::Cluster` and represents a single + # worker process. + # + # At the core of this class is running an instance of `Puma::Server` which + # gets created via the `start_server` method from the `Puma::Runner` class + # that this inherits from. + class Worker < Puma::Runner # :nodoc: + attr_reader :index, :master + + def initialize(index:, master:, launcher:, pipes:, server: nil) + super(launcher) + + @index = index + @master = master + @check_pipe = pipes[:check_pipe] + @worker_write = pipes[:worker_write] + @fork_pipe = pipes[:fork_pipe] + @wakeup = pipes[:wakeup] + @server = server + @hook_data = {} + end + + def run + title = "puma: cluster worker #{index}: #{master}" + title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty? + $0 = title + + Signal.trap "SIGINT", "IGNORE" + Signal.trap "SIGCHLD", "DEFAULT" + + Thread.new do + Puma.set_thread_name "wrkr check" + @check_pipe.wait_readable + log "! Detected parent died, dying" + exit! 1 + end + + # If we're not running under a Bundler context, then + # report the info about the context we will be using + if !ENV['BUNDLE_GEMFILE'] + if File.exist?("Gemfile") + log "+ Gemfile in context: #{File.expand_path("Gemfile")}" + elsif File.exist?("gems.rb") + log "+ Gemfile in context: #{File.expand_path("gems.rb")}" + end + end + + # Invoke any worker boot hooks so they can get + # things in shape before booting the app. + @config.run_hooks(:before_worker_boot, index, @log_writer, @hook_data) + + begin + server = @server ||= start_server + rescue Exception => e + log "! Unable to start worker" + log e + log e.backtrace.join("\n ") + exit 1 + end + + restart_server = Queue.new << true << false + + fork_worker = @options[:fork_worker] && index == 0 + + if fork_worker + restart_server.clear + worker_pids = [] + Signal.trap "SIGCHLD" do + wakeup! if worker_pids.reject! do |p| + Process.wait(p, Process::WNOHANG) rescue true + end + end + + Thread.new do + Puma.set_thread_name "wrkr fork" + while (idx = @fork_pipe.gets) + idx = idx.to_i + if idx == -1 # stop server + if restart_server.length > 0 + restart_server.clear + server.begin_restart(true) + @config.run_hooks(:before_refork, nil, @log_writer, @hook_data) + end + elsif idx == 0 # restart server + restart_server << true << false + else # fork worker + worker_pids << pid = spawn_worker(idx) + @worker_write << "#{Puma::Const::PipeRequest::FORK}#{pid}:#{idx}\n" rescue nil + end + end + end + end + + Signal.trap "SIGTERM" do + @worker_write << "#{Puma::Const::PipeRequest::EXTERNAL_TERM}#{Process.pid}\n" rescue nil + restart_server.clear + server.stop + restart_server << false + end + + begin + @worker_write << "#{Puma::Const::PipeRequest::BOOT}#{Process.pid}:#{index}\n" + rescue SystemCallError, IOError + Puma::Util.purge_interrupt_queue + STDERR.puts "Master seems to have exited, exiting." + return + end + + while restart_server.pop + server_thread = server.run + + if @log_writer.debug? && index == 0 + debug_loaded_extensions "Loaded Extensions - worker 0:" + end + + stat_thread ||= Thread.new(@worker_write) do |io| + Puma.set_thread_name "stat pld" + base_payload = "p#{Process.pid}" + + while true + begin + b = server.backlog || 0 + r = server.running || 0 + t = server.pool_capacity || 0 + m = server.max_threads || 0 + rc = server.requests_count || 0 + payload = %Q!#{base_payload}{ "backlog":#{b}, "running":#{r}, "pool_capacity":#{t}, "max_threads":#{m}, "requests_count":#{rc} }\n! + io << payload + rescue IOError + Puma::Util.purge_interrupt_queue + break + end + sleep @options[:worker_check_interval] + end + end + server_thread.join + end + + # Invoke any worker shutdown hooks so they can prevent the worker + # exiting until any background operations are completed + @config.run_hooks(:before_worker_shutdown, index, @log_writer, @hook_data) + ensure + @worker_write << "#{Puma::Const::PipeRequest::TERM}#{Process.pid}\n" rescue nil + @worker_write.close + end + + private + + def spawn_worker(idx) + @config.run_hooks(:before_worker_fork, idx, @log_writer, @hook_data) + + pid = fork do + new_worker = Worker.new index: idx, + master: master, + launcher: @launcher, + pipes: { check_pipe: @check_pipe, + worker_write: @worker_write }, + server: @server + new_worker.run + end + + if !pid + log "! Complete inability to spawn new workers detected" + log "! Seppuku is the only choice." + exit! 1 + end + + @config.run_hooks(:after_worker_fork, idx, @log_writer, @hook_data) + pid + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/cluster/worker_handle.rb b/vendor/cache/puma-fba741b91780/lib/puma/cluster/worker_handle.rb new file mode 100644 index 000000000..12b94764e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/cluster/worker_handle.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true + +module Puma + class Cluster < Runner + #—————————————————————— DO NOT USE — this class is for internal use only ——— + + + # This class represents a worker process from the perspective of the puma + # master process. It contains information about the process and its health + # and it exposes methods to control the process via IPC. It does not + # include the actual logic executed by the worker process itself. For that, + # see Puma::Cluster::Worker. + class WorkerHandle # :nodoc: + def initialize(idx, pid, phase, options) + @index = idx + @pid = pid + @phase = phase + @stage = :started + @signal = "TERM" + @options = options + @first_term_sent = nil + @started_at = Time.now + @last_checkin = Time.now + @last_status = {} + @term = false + end + + attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status, :started_at + + # @version 5.0.0 + attr_writer :pid, :phase + + def booted? + @stage == :booted + end + + def uptime + Time.now - started_at + end + + def boot! + @last_checkin = Time.now + @stage = :booted + end + + def term! + @term = true + end + + def term? + @term + end + + STATUS_PATTERN = /{ "backlog":(?\d*), "running":(?\d*), "pool_capacity":(?\d*), "max_threads":(?\d*), "requests_count":(?\d*) }/ + private_constant :STATUS_PATTERN + + def ping!(status) + @last_checkin = Time.now + @last_status = status.match(STATUS_PATTERN).named_captures.map { |c_name, c| [c_name.to_sym, c.to_i] }.to_h + end + + # @see Puma::Cluster#check_workers + # @version 5.0.0 + def ping_timeout + @last_checkin + + (booted? ? + @options[:worker_timeout] : + @options[:worker_boot_timeout] + ) + end + + def term + begin + if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout] + @signal = "KILL" + else + @term ||= true + @first_term_sent ||= Time.now + end + Process.kill @signal, @pid if @pid + rescue Errno::ESRCH + end + end + + def kill + @signal = 'KILL' + term + end + + def hup + Process.kill "HUP", @pid + rescue Errno::ESRCH + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/commonlogger.rb b/vendor/cache/puma-fba741b91780/lib/puma/commonlogger.rb new file mode 100644 index 000000000..144e9a9fb --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/commonlogger.rb @@ -0,0 +1,115 @@ +# frozen_string_literal: true + +module Puma + # Rack::CommonLogger forwards every request to the given +app+, and + # logs a line in the + # {Apache common log format}[https://httpd.apache.org/docs/2.4/logs.html#common] + # to the +logger+. + # + # If +logger+ is nil, CommonLogger will fall back +rack.errors+, which is + # an instance of Rack::NullLogger. + # + # +logger+ can be any class, including the standard library Logger, and is + # expected to have either +write+ or +<<+ method, which accepts the CommonLogger::FORMAT. + # According to the SPEC, the error stream must also respond to +puts+ + # (which takes a single argument that responds to +to_s+), and +flush+ + # (which is called without arguments in order to make the error appear for + # sure) + class CommonLogger + # Common Log Format: https://httpd.apache.org/docs/2.4/logs.html#common + # + # lilith.local - - [07/Aug/2006 23:58:02 -0400] "GET / HTTP/1.1" 500 - + # + # %{%s - %s [%s] "%s %s%s %s" %d %s\n} % + FORMAT = %{%s - %s [%s] "%s %s%s %s" %d %s %0.4f\n} + + HIJACK_FORMAT = %{%s - %s [%s] "%s %s%s %s" HIJACKED -1 %0.4f\n} + + LOG_TIME_FORMAT = '%d/%b/%Y:%H:%M:%S %z' + + CONTENT_LENGTH = 'Content-Length' # should be lower case from app, + # Util::HeaderHash allows mixed + HTTP_VERSION = Const::HTTP_VERSION + HTTP_X_FORWARDED_FOR = Const::HTTP_X_FORWARDED_FOR + PATH_INFO = Const::PATH_INFO + QUERY_STRING = Const::QUERY_STRING + REMOTE_ADDR = Const::REMOTE_ADDR + REMOTE_USER = 'REMOTE_USER' + REQUEST_METHOD = Const::REQUEST_METHOD + + def initialize(app, logger=nil) + @app = app + @logger = logger + end + + def call(env) + began_at = Time.now + status, header, body = @app.call(env) + header = Util::HeaderHash.new(header) + + # If we've been hijacked, then output a special line + if env['rack.hijack_io'] + log_hijacking(env, 'HIJACK', header, began_at) + else + ary = env['rack.after_reply'] + ary << lambda { log(env, status, header, began_at) } + end + + [status, header, body] + end + + private + + def log_hijacking(env, status, header, began_at) + now = Time.now + + msg = HIJACK_FORMAT % [ + env[HTTP_X_FORWARDED_FOR] || env[REMOTE_ADDR] || "-", + env[REMOTE_USER] || "-", + now.strftime(LOG_TIME_FORMAT), + env[REQUEST_METHOD], + env[PATH_INFO], + env[QUERY_STRING].empty? ? "" : "?#{env[QUERY_STRING]}", + env[HTTP_VERSION], + now - began_at ] + + write(msg) + end + + def log(env, status, header, began_at) + now = Time.now + length = extract_content_length(header) + + msg = FORMAT % [ + env[HTTP_X_FORWARDED_FOR] || env[REMOTE_ADDR] || "-", + env[REMOTE_USER] || "-", + now.strftime(LOG_TIME_FORMAT), + env[REQUEST_METHOD], + env[PATH_INFO], + env[QUERY_STRING].empty? ? "" : "?#{env[QUERY_STRING]}", + env[HTTP_VERSION], + status.to_s[0..3], + length, + now - began_at ] + + write(msg) + end + + def write(msg) + logger = @logger || env['rack.errors'] + + # Standard library logger doesn't support write but it supports << which actually + # calls to write on the log device without formatting + if logger.respond_to?(:write) + logger.write(msg) + else + logger << msg + end + end + + def extract_content_length(headers) + value = headers[CONTENT_LENGTH] or return '-' + value.to_s == '0' ? '-' : value + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/configuration.rb b/vendor/cache/puma-fba741b91780/lib/puma/configuration.rb new file mode 100644 index 000000000..4cd1cd807 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/configuration.rb @@ -0,0 +1,405 @@ +# frozen_string_literal: true + +require_relative 'plugin' +require_relative 'const' +require_relative 'dsl' + +module Puma + # A class used for storing "leveled" configuration options. + # + # In this class any "user" specified options take precedence over any + # "file" specified options, take precedence over any "default" options. + # + # User input is preferred over "defaults": + # user_options = { foo: "bar" } + # default_options = { foo: "zoo" } + # options = UserFileDefaultOptions.new(user_options, default_options) + # puts options[:foo] + # # => "bar" + # + # All values can be accessed via `all_of` + # + # puts options.all_of(:foo) + # # => ["bar", "zoo"] + # + # A "file" option can be set. This config will be preferred over "default" options + # but will defer to any available "user" specified options. + # + # user_options = { foo: "bar" } + # default_options = { rackup: "zoo.rb" } + # options = UserFileDefaultOptions.new(user_options, default_options) + # options.file_options[:rackup] = "sup.rb" + # puts options[:rackup] + # # => "sup.rb" + # + # The "default" options can be set via procs. These are resolved during runtime + # via calls to `finalize_values` + class UserFileDefaultOptions + def initialize(user_options, default_options) + @user_options = user_options + @file_options = {} + @default_options = default_options + end + + attr_reader :user_options, :file_options, :default_options + + def [](key) + fetch(key) + end + + def []=(key, value) + user_options[key] = value + end + + def fetch(key, default_value = nil) + return user_options[key] if user_options.key?(key) + return file_options[key] if file_options.key?(key) + return default_options[key] if default_options.key?(key) + + default_value + end + + def all_of(key) + user = user_options[key] + file = file_options[key] + default = default_options[key] + + user = [user] unless user.is_a?(Array) + file = [file] unless file.is_a?(Array) + default = [default] unless default.is_a?(Array) + + user.compact! + file.compact! + default.compact! + + user + file + default + end + + def finalize_values + @default_options.each do |k,v| + if v.respond_to? :call + @default_options[k] = v.call + end + end + end + + def final_options + default_options + .merge(file_options) + .merge(user_options) + end + end + + # The main configuration class of Puma. + # + # It can be initialized with a set of "user" options and "default" options. + # Defaults will be merged with `Configuration.puma_default_options`. + # + # This class works together with 2 main other classes the `UserFileDefaultOptions` + # which stores configuration options in order so the precedence is that user + # set configuration wins over "file" based configuration wins over "default" + # configuration. These configurations are set via the `DSL` class. This + # class powers the Puma config file syntax and does double duty as a configuration + # DSL used by the `Puma::CLI` and Puma rack handler. + # + # It also handles loading plugins. + # + # [Note:] + # `:port` and `:host` are not valid keys. By the time they make it to the + # configuration options they are expected to be incorporated into a `:binds` key. + # Under the hood the DSL maps `port` and `host` calls to `:binds` + # + # config = Configuration.new({}) do |user_config, file_config, default_config| + # user_config.port 3003 + # end + # config.load + # puts config.options[:port] + # # => 3003 + # + # It is expected that `load` is called on the configuration instance after setting + # config. This method expands any values in `config_file` and puts them into the + # correct configuration option hash. + # + # Once all configuration is complete it is expected that `clamp` will be called + # on the instance. This will expand any procs stored under "default" values. This + # is done because an environment variable may have been modified while loading + # configuration files. + class Configuration + DEFAULTS = { + auto_trim_time: 30, + binds: ['tcp://0.0.0.0:9292'.freeze], + clean_thread_locals: false, + debug: false, + enable_keep_alives: true, + early_hints: nil, + environment: 'development'.freeze, + # Number of seconds to wait until we get the first data for the request. + first_data_timeout: 30, + # Number of seconds to wait until the next request before shutting down. + idle_timeout: nil, + io_selector_backend: :auto, + log_requests: false, + logger: STDOUT, + # How many requests to attempt inline before sending a client back to + # the reactor to be subject to normal ordering. The idea here is that + # we amortize the cost of going back to the reactor for a well behaved + # but very "greedy" client across 10 requests. This prevents a not + # well behaved client from monopolizing the thread forever. + max_fast_inline: 10, + max_threads: Puma.mri? ? 5 : 16, + min_threads: 0, + mode: :http, + mutate_stdout_and_stderr_to_sync_on_write: true, + out_of_band: [], + # Number of seconds for another request within a persistent session. + persistent_timeout: 20, + queue_requests: true, + rackup: 'config.ru'.freeze, + raise_exception_on_sigterm: true, + reaping_time: 1, + remote_address: :socket, + silence_single_worker_warning: false, + silence_fork_callback_warning: false, + tag: File.basename(Dir.getwd), + tcp_host: '0.0.0.0'.freeze, + tcp_port: 9292, + wait_for_less_busy_worker: 0.005, + worker_boot_timeout: 60, + worker_check_interval: 5, + worker_culling_strategy: :youngest, + worker_shutdown_timeout: 30, + worker_timeout: 60, + workers: 0, + http_content_length_limit: nil + } + + def initialize(user_options={}, default_options = {}, env = ENV, &block) + default_options = self.puma_default_options(env).merge(default_options) + + @options = UserFileDefaultOptions.new(user_options, default_options) + @plugins = PluginLoader.new + @user_dsl = DSL.new(@options.user_options, self) + @file_dsl = DSL.new(@options.file_options, self) + @default_dsl = DSL.new(@options.default_options, self) + + if !@options[:prune_bundler] + default_options[:preload_app] = (@options[:workers] > 1) && Puma.forkable? + end + + @puma_bundler_pruned = env.key? 'PUMA_BUNDLER_PRUNED' + + if block + configure(&block) + end + end + + attr_reader :options, :plugins + + def configure + yield @user_dsl, @file_dsl, @default_dsl + ensure + @user_dsl._offer_plugins + @file_dsl._offer_plugins + @default_dsl._offer_plugins + end + + def initialize_copy(other) + @conf = nil + @cli_options = nil + @options = @options.dup + end + + def flatten + dup.flatten! + end + + def flatten! + @options = @options.flatten + self + end + + def puma_default_options(env = ENV) + defaults = DEFAULTS.dup + puma_options_from_env(env).each { |k,v| defaults[k] = v if v } + defaults + end + + def puma_options_from_env(env = ENV) + min = env['PUMA_MIN_THREADS'] || env['MIN_THREADS'] + max = env['PUMA_MAX_THREADS'] || env['MAX_THREADS'] + workers = if env['WEB_CONCURRENCY'] == 'auto' + require_processor_counter + ::Concurrent.available_processor_count + else + env['WEB_CONCURRENCY'] + end + + { + min_threads: min && Integer(min), + max_threads: max && Integer(max), + workers: workers && Integer(workers), + environment: env['APP_ENV'] || env['RACK_ENV'] || env['RAILS_ENV'], + } + end + + def load + config_files.each { |config_file| @file_dsl._load_from(config_file) } + + @options + end + + def config_files + files = @options.all_of(:config_files) + + return [] if files == ['-'] + return files if files.any? + + first_default_file = %W(config/puma/#{@options[:environment]}.rb config/puma.rb).find do |f| + File.exist?(f) + end + + [first_default_file] + end + + # Call once all configuration (included from rackup files) + # is loaded to flesh out any defaults + def clamp + @options.finalize_values + end + + # Injects the Configuration object into the env + class ConfigMiddleware + def initialize(config, app) + @config = config + @app = app + end + + def call(env) + env[Const::PUMA_CONFIG] = @config + @app.call(env) + end + end + + # Indicate if there is a properly configured app + # + def app_configured? + @options[:app] || File.exist?(rackup) + end + + def rackup + @options[:rackup] + end + + # Load the specified rackup file, pull options from + # the rackup file, and set @app. + # + def app + found = options[:app] || load_rackup + + if @options[:log_requests] + require_relative 'commonlogger' + logger = @options[:logger] + found = CommonLogger.new(found, logger) + end + + ConfigMiddleware.new(self, found) + end + + # Return which environment we're running in + def environment + @options[:environment] + end + + def load_plugin(name) + @plugins.create name + end + + # @param key [:Symbol] hook to run + # @param arg [Launcher, Int] `:on_restart` passes Launcher + # + def run_hooks(key, arg, log_writer, hook_data = nil) + @options.all_of(key).each do |b| + begin + if Array === b + hook_data[b[1]] ||= Hash.new + b[0].call arg, hook_data[b[1]] + else + b.call arg + end + rescue => e + log_writer.log "WARNING hook #{key} failed with exception (#{e.class}) #{e.message}" + log_writer.debug e.backtrace.join("\n") + end + end + end + + def final_options + @options.final_options + end + + def self.temp_path + require 'tmpdir' + + t = (Time.now.to_f * 1000).to_i + "#{Dir.tmpdir}/puma-status-#{t}-#{$$}" + end + + private + + def require_processor_counter + require 'concurrent/utility/processor_counter' + rescue LoadError + warn <<~MESSAGE + WEB_CONCURRENCY=auto requires the "concurrent-ruby" gem to be installed. + Please add "concurrent-ruby" to your Gemfile. + MESSAGE + raise + end + + # Load and use the normal Rack builder if we can, otherwise + # fallback to our minimal version. + def rack_builder + # Load bundler now if we can so that we can pickup rack from + # a Gemfile + if @puma_bundler_pruned + begin + require 'bundler/setup' + rescue LoadError + end + end + + begin + require 'rack' + require 'rack/builder' + ::Rack::Builder + rescue LoadError + require_relative 'rack/builder' + Puma::Rack::Builder + end + end + + def load_rackup + raise "Missing rackup file '#{rackup}'" unless File.exist?(rackup) + + rack_app, rack_options = rack_builder.parse_file(rackup) + rack_options = rack_options || {} + + @options.file_options.merge!(rack_options) + + config_ru_binds = [] + rack_options.each do |k, v| + config_ru_binds << v if k.to_s.start_with?("bind") + end + + @options.file_options[:binds] = config_ru_binds unless config_ru_binds.empty? + + rack_app + end + + def self.random_token + require 'securerandom' unless defined?(SecureRandom) + + SecureRandom.hex(16) + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/const.rb b/vendor/cache/puma-fba741b91780/lib/puma/const.rb new file mode 100644 index 000000000..1a26a3de6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/const.rb @@ -0,0 +1,307 @@ +#encoding: utf-8 +# frozen_string_literal: true + +module Puma + class UnsupportedOption < RuntimeError + end + + # Every standard HTTP code mapped to the appropriate message. These are + # used so frequently that they are placed directly in Puma for easy + # access rather than Puma::Const itself. + + # Every standard HTTP code mapped to the appropriate message. + # Generated with: + # curl -s https://www.iana.org/assignments/http-status-codes/http-status-codes-1.csv | \ + # ruby -ne 'm = /^(\d{3}),(?!Unassigned|\(Unused\))([^,]+)/.match($_) and \ + # puts "#{m[1]} => \x27#{m[2].strip}\x27,"' + HTTP_STATUS_CODES = { + 100 => 'Continue', + 101 => 'Switching Protocols', + 102 => 'Processing', + 103 => 'Early Hints', + 200 => 'OK', + 201 => 'Created', + 202 => 'Accepted', + 203 => 'Non-Authoritative Information', + 204 => 'No Content', + 205 => 'Reset Content', + 206 => 'Partial Content', + 207 => 'Multi-Status', + 208 => 'Already Reported', + 226 => 'IM Used', + 300 => 'Multiple Choices', + 301 => 'Moved Permanently', + 302 => 'Found', + 303 => 'See Other', + 304 => 'Not Modified', + 305 => 'Use Proxy', + 307 => 'Temporary Redirect', + 308 => 'Permanent Redirect', + 400 => 'Bad Request', + 401 => 'Unauthorized', + 402 => 'Payment Required', + 403 => 'Forbidden', + 404 => 'Not Found', + 405 => 'Method Not Allowed', + 406 => 'Not Acceptable', + 407 => 'Proxy Authentication Required', + 408 => 'Request Timeout', + 409 => 'Conflict', + 410 => 'Gone', + 411 => 'Length Required', + 412 => 'Precondition Failed', + 413 => 'Content Too Large', + 414 => 'URI Too Long', + 415 => 'Unsupported Media Type', + 416 => 'Range Not Satisfiable', + 417 => 'Expectation Failed', + 421 => 'Misdirected Request', + 422 => 'Unprocessable Content', + 423 => 'Locked', + 424 => 'Failed Dependency', + 425 => 'Too Early', + 426 => 'Upgrade Required', + 428 => 'Precondition Required', + 429 => 'Too Many Requests', + 431 => 'Request Header Fields Too Large', + 451 => 'Unavailable For Legal Reasons', + 500 => 'Internal Server Error', + 501 => 'Not Implemented', + 502 => 'Bad Gateway', + 503 => 'Service Unavailable', + 504 => 'Gateway Timeout', + 505 => 'HTTP Version Not Supported', + 506 => 'Variant Also Negotiates', + 507 => 'Insufficient Storage', + 508 => 'Loop Detected', + 510 => 'Not Extended (OBSOLETED)', + 511 => 'Network Authentication Required' + }.freeze + + # For some HTTP status codes the client only expects headers. + # + + STATUS_WITH_NO_ENTITY_BODY = { + 204 => true, + 205 => true, + 304 => true + }.freeze + + # Frequently used constants when constructing requests or responses. Many times + # the constant just refers to a string with the same contents. Using these constants + # gave about a 3% to 10% performance improvement over using the strings directly. + # + # The constants are frozen because Hash#[]= when called with a String key dups + # the String UNLESS the String is frozen. This saves us therefore 2 object + # allocations when creating the env hash later. + # + # While Puma does try to emulate the CGI/1.2 protocol, it does not use the REMOTE_IDENT, + # REMOTE_USER, or REMOTE_HOST parameters since those are either a security problem or + # too taxing on performance. + module Const + + PUMA_VERSION = VERSION = "6.4.3" + CODE_NAME = "The Eagle of Durango" + + PUMA_SERVER_STRING = ["puma", PUMA_VERSION, CODE_NAME].join(" ").freeze + + FAST_TRACK_KA_TIMEOUT = 0.2 + + # How long to wait when getting some write blocking on the socket when + # sending data back + WRITE_TIMEOUT = 10 + + # The original URI requested by the client. + REQUEST_URI= "REQUEST_URI" + REQUEST_PATH = "REQUEST_PATH" + QUERY_STRING = "QUERY_STRING" + CONTENT_LENGTH = "CONTENT_LENGTH" + + PATH_INFO = "PATH_INFO" + + PUMA_TMP_BASE = "puma" + + ERROR_RESPONSE = { + # Indicate that we couldn't parse the request + 400 => "HTTP/1.1 400 Bad Request\r\n\r\n", + # The standard empty 404 response for bad requests. Use Error4040Handler for custom stuff. + 404 => "HTTP/1.1 404 Not Found\r\nConnection: close\r\n\r\n", + # The standard empty 408 response for requests that timed out. + 408 => "HTTP/1.1 408 Request Timeout\r\nConnection: close\r\n\r\n", + # Indicate that there was an internal error, obviously. + 500 => "HTTP/1.1 500 Internal Server Error\r\n\r\n", + # Incorrect or invalid header value + 501 => "HTTP/1.1 501 Not Implemented\r\n\r\n", + # A common header for indicating the server is too busy. Not used yet. + 503 => "HTTP/1.1 503 Service Unavailable\r\n\r\n" + }.freeze + + # The basic max request size we'll try to read. + CHUNK_SIZE = 16 * 1024 + + # This is the maximum header that is allowed before a client is booted. The parser detects + # this, but we'd also like to do this as well. + MAX_HEADER = 1024 * (80 + 32) + + # Maximum request body size before it is moved out of memory and into a tempfile for reading. + MAX_BODY = MAX_HEADER + + REQUEST_METHOD = "REQUEST_METHOD" + HEAD = "HEAD" + + # based on https://www.rfc-editor.org/rfc/rfc9110.html#name-overview, + # with CONNECT removed, and PATCH added + SUPPORTED_HTTP_METHODS = %w[HEAD GET POST PUT DELETE OPTIONS TRACE PATCH].freeze + + # list from https://www.iana.org/assignments/http-methods/http-methods.xhtml + # as of 04-May-23 + IANA_HTTP_METHODS = %w[ + ACL + BASELINE-CONTROL + BIND + CHECKIN + CHECKOUT + CONNECT + COPY + DELETE + GET + HEAD + LABEL + LINK + LOCK + MERGE + MKACTIVITY + MKCALENDAR + MKCOL + MKREDIRECTREF + MKWORKSPACE + MOVE + OPTIONS + ORDERPATCH + PATCH + POST + PRI + PROPFIND + PROPPATCH + PUT + REBIND + REPORT + SEARCH + TRACE + UNBIND + UNCHECKOUT + UNLINK + UNLOCK + UPDATE + UPDATEREDIRECTREF + VERSION-CONTROL + ].freeze + + # ETag is based on the apache standard of hex mtime-size-inode (inode is 0 on win32) + LINE_END = "\r\n" + REMOTE_ADDR = "REMOTE_ADDR" + HTTP_X_FORWARDED_FOR = "HTTP_X_FORWARDED_FOR" + HTTP_X_FORWARDED_SSL = "HTTP_X_FORWARDED_SSL" + HTTP_X_FORWARDED_SCHEME = "HTTP_X_FORWARDED_SCHEME" + HTTP_X_FORWARDED_PROTO = "HTTP_X_FORWARDED_PROTO" + + SERVER_NAME = "SERVER_NAME" + SERVER_PORT = "SERVER_PORT" + HTTP_HOST = "HTTP_HOST" + PORT_80 = "80" + PORT_443 = "443" + LOCALHOST = "localhost" + LOCALHOST_IPV4 = "127.0.0.1" + LOCALHOST_IPV6 = "::1" + UNSPECIFIED_IPV4 = "0.0.0.0" + UNSPECIFIED_IPV6 = "::" + + SERVER_PROTOCOL = "SERVER_PROTOCOL" + HTTP_11 = "HTTP/1.1" + + SERVER_SOFTWARE = "SERVER_SOFTWARE" + GATEWAY_INTERFACE = "GATEWAY_INTERFACE" + CGI_VER = "CGI/1.2" + + STOP_COMMAND = "?" + HALT_COMMAND = "!" + RESTART_COMMAND = "R" + + RACK_INPUT = "rack.input" + RACK_URL_SCHEME = "rack.url_scheme" + RACK_AFTER_REPLY = "rack.after_reply" + PUMA_SOCKET = "puma.socket" + PUMA_CONFIG = "puma.config" + PUMA_PEERCERT = "puma.peercert" + + HTTP = "http" + HTTPS = "https" + + HTTPS_KEY = "HTTPS" + + HTTP_VERSION = "HTTP_VERSION" + HTTP_CONNECTION = "HTTP_CONNECTION" + HTTP_EXPECT = "HTTP_EXPECT" + CONTINUE = "100-continue" + + HTTP_11_100 = "HTTP/1.1 100 Continue\r\n\r\n" + HTTP_11_200 = "HTTP/1.1 200 OK\r\n" + HTTP_10_200 = "HTTP/1.0 200 OK\r\n" + + CLOSE = "close" + KEEP_ALIVE = "keep-alive" + + CONTENT_LENGTH2 = "content-length" + CONTENT_LENGTH_S = "Content-Length: " + TRANSFER_ENCODING = "transfer-encoding" + TRANSFER_ENCODING2 = "HTTP_TRANSFER_ENCODING" + + CONNECTION_CLOSE = "Connection: close\r\n" + CONNECTION_KEEP_ALIVE = "Connection: Keep-Alive\r\n" + + TRANSFER_ENCODING_CHUNKED = "Transfer-Encoding: chunked\r\n" + CLOSE_CHUNKED = "0\r\n\r\n" + + CHUNKED = "chunked" + + COLON = ": " + + NEWLINE = "\n" + + HIJACK_P = "rack.hijack?" + HIJACK = "rack.hijack" + HIJACK_IO = "rack.hijack_io" + + EARLY_HINTS = "rack.early_hints" + + # Illegal character in the key or value of response header + DQUOTE = "\"" + HTTP_HEADER_DELIMITER = Regexp.escape("(),/:;<=>?@[]{}\\").freeze + ILLEGAL_HEADER_KEY_REGEX = /[\x00-\x20#{DQUOTE}#{HTTP_HEADER_DELIMITER}]/.freeze + # header values can contain HTAB? + ILLEGAL_HEADER_VALUE_REGEX = /[\x00-\x08\x0A-\x1F]/.freeze + + # The keys of headers that should not be convert to underscore + # normalized versions. These headers are ignored at the request reading layer, + # but if we normalize them after reading, it's just confusing for the application. + UNMASKABLE_HEADERS = { + "HTTP_TRANSFER,ENCODING" => true, + "HTTP_CONTENT,LENGTH" => true, + } + + # Banned keys of response header + BANNED_HEADER_KEY = /\A(rack\.|status\z)/.freeze + + PROXY_PROTOCOL_V1_REGEX = /^PROXY (?:TCP4|TCP6|UNKNOWN) ([^\r]+)\r\n/.freeze + + module PipeRequest + WAKEUP = "!" + BOOT = "b" + FORK = "f" + EXTERNAL_TERM = "e" + TERM = "t" + PING = "p" + IDLE = "i" + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/control_cli.rb b/vendor/cache/puma-fba741b91780/lib/puma/control_cli.rb new file mode 100644 index 000000000..5aaef94c3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/control_cli.rb @@ -0,0 +1,316 @@ +# frozen_string_literal: true + +require 'optparse' +require_relative 'const' +require_relative 'detect' +require 'uri' +require 'socket' + +module Puma + class ControlCLI + + # values must be string or nil + # value of `nil` means command cannot be processed via signal + # @version 5.0.3 + CMD_PATH_SIG_MAP = { + 'gc' => nil, + 'gc-stats' => nil, + 'halt' => 'SIGQUIT', + 'info' => 'SIGINFO', + 'phased-restart' => 'SIGUSR1', + 'refork' => 'SIGURG', + 'reload-worker-directory' => nil, + 'reopen-log' => 'SIGHUP', + 'restart' => 'SIGUSR2', + 'start' => nil, + 'stats' => nil, + 'status' => '', + 'stop' => 'SIGTERM', + 'thread-backtraces' => nil, + 'worker-count-down' => 'SIGTTOU', + 'worker-count-up' => 'SIGTTIN' + }.freeze + + # commands that cannot be used in a request + NO_REQ_COMMANDS = %w[info reopen-log worker-count-down worker-count-up].freeze + + # @version 5.0.0 + PRINTABLE_COMMANDS = %w[gc-stats stats thread-backtraces].freeze + + def initialize(argv, stdout=STDOUT, stderr=STDERR, env: ENV) + @state = nil + @quiet = false + @pidfile = nil + @pid = nil + @control_url = nil + @control_auth_token = nil + @config_file = nil + @command = nil + @environment = env['APP_ENV'] || env['RACK_ENV'] || env['RAILS_ENV'] + + @argv = argv.dup + @stdout = stdout + @stderr = stderr + @cli_options = {} + + opts = OptionParser.new do |o| + o.banner = "Usage: pumactl (-p PID | -P pidfile | -S status_file | -C url -T token | -F config.rb) (#{CMD_PATH_SIG_MAP.keys.join("|")})" + + o.on "-S", "--state PATH", "Where the state file to use is" do |arg| + @state = arg + end + + o.on "-Q", "--quiet", "Do not display messages" do |arg| + @quiet = true + end + + o.on "-P", "--pidfile PATH", "Pid file" do |arg| + @pidfile = arg + end + + o.on "-p", "--pid PID", "Pid" do |arg| + @pid = arg.to_i + end + + o.on "-C", "--control-url URL", "The bind url to use for the control server" do |arg| + @control_url = arg + end + + o.on "-T", "--control-token TOKEN", "The token to use as authentication for the control server" do |arg| + @control_auth_token = arg + end + + o.on "-F", "--config-file PATH", "Puma config script" do |arg| + @config_file = arg + end + + o.on "-e", "--environment ENVIRONMENT", + "The environment to run the Rack app on (default development)" do |arg| + @environment = arg + end + + o.on_tail("-H", "--help", "Show this message") do + @stdout.puts o + exit + end + + o.on_tail("-V", "--version", "Show version") do + @stdout.puts Const::PUMA_VERSION + exit + end + end + + opts.order!(argv) { |a| opts.terminate a } + opts.parse! + + @command = argv.shift + + # check presence of command + unless @command + raise "Available commands: #{CMD_PATH_SIG_MAP.keys.join(", ")}" + end + + unless CMD_PATH_SIG_MAP.key? @command + raise "Invalid command: #{@command}" + end + + unless @config_file == '-' + environment = @environment || 'development' + + if @config_file.nil? + @config_file = %W(config/puma/#{environment}.rb config/puma.rb).find do |f| + File.exist?(f) + end + end + + if @config_file + require_relative 'configuration' + require_relative 'log_writer' + + config = Puma::Configuration.new({ config_files: [@config_file] }, {} , env) + config.load + @state ||= config.options[:state] + @control_url ||= config.options[:control_url] + @control_auth_token ||= config.options[:control_auth_token] + @pidfile ||= config.options[:pidfile] + end + end + rescue => e + @stdout.puts e.message + exit 1 + end + + def message(msg) + @stdout.puts msg unless @quiet + end + + def prepare_configuration + if @state + unless File.exist? @state + raise "State file not found: #{@state}" + end + + require_relative 'state_file' + + sf = Puma::StateFile.new + sf.load @state + + @control_url = sf.control_url + @control_auth_token = sf.control_auth_token + @pid = sf.pid + elsif @pidfile + # get pid from pid_file + @pid = File.read(@pidfile, mode: 'rb:UTF-8').to_i + end + end + + def send_request + uri = URI.parse @control_url + + host = uri.host + + # create server object by scheme + server = + case uri.scheme + when 'ssl' + require 'openssl' + host = host[1..-2] if host&.start_with? '[' + OpenSSL::SSL::SSLSocket.new( + TCPSocket.new(host, uri.port), + OpenSSL::SSL::SSLContext.new) + .tap { |ssl| ssl.sync_close = true } # default is false + .tap(&:connect) + when 'tcp' + host = host[1..-2] if host&.start_with? '[' + TCPSocket.new host, uri.port + when 'unix' + # check for abstract UNIXSocket + UNIXSocket.new(@control_url.start_with?('unix://@') ? + "\0#{host}#{uri.path}" : "#{host}#{uri.path}") + else + raise "Invalid scheme: #{uri.scheme}" + end + + if @command == 'status' + message 'Puma is started' + else + url = "/#{@command}" + + if @control_auth_token + url = url + "?token=#{@control_auth_token}" + end + + server.syswrite "GET #{url} HTTP/1.0\r\n\r\n" + + unless data = server.read + raise 'Server closed connection before responding' + end + + response = data.split("\r\n") + + if response.empty? + raise "Server sent empty response" + end + + @http, @code, @message = response.first.split(' ',3) + + if @code == '403' + raise 'Unauthorized access to server (wrong auth token)' + elsif @code == '404' + raise "Command error: #{response.last}" + elsif @code != '200' + raise "Bad response from server: #{@code}" + end + + message "Command #{@command} sent success" + message response.last if PRINTABLE_COMMANDS.include?(@command) + end + ensure + if server + if uri.scheme == 'ssl' + server.sysclose + else + server.close unless server.closed? + end + end + end + + def send_signal + unless @pid + raise 'Neither pid nor control url available' + end + + begin + sig = CMD_PATH_SIG_MAP[@command] + + if sig.nil? + @stdout.puts "'#{@command}' not available via pid only" + @stdout.flush unless @stdout.sync + return + elsif sig.start_with? 'SIG' + if Signal.list.key? sig.sub(/\ASIG/, '') + Process.kill sig, @pid + else + raise "Signal '#{sig}' not available'" + end + elsif @command == 'status' + begin + Process.kill 0, @pid + @stdout.puts 'Puma is started' + @stdout.flush unless @stdout.sync + rescue Errno::ESRCH + raise 'Puma is not running' + end + return + end + rescue SystemCallError + if @command == 'restart' + start + else + raise "No pid '#{@pid}' found" + end + end + + message "Command #{@command} sent success" + end + + def run + return start if @command == 'start' + prepare_configuration + + if Puma.windows? || @control_url && !NO_REQ_COMMANDS.include?(@command) + send_request + else + send_signal + end + + rescue => e + message e.message + exit 1 + end + + private + def start + require_relative 'cli' + + run_args = [] + + run_args += ["-S", @state] if @state + run_args += ["-q"] if @quiet + run_args += ["--pidfile", @pidfile] if @pidfile + run_args += ["--control-url", @control_url] if @control_url + run_args += ["--control-token", @control_auth_token] if @control_auth_token + run_args += ["-C", @config_file] if @config_file + run_args += ["-e", @environment] if @environment + + log_writer = Puma::LogWriter.new(@stdout, @stderr) + + # replace $0 because puma use it to generate restart command + puma_cmd = $0.gsub(/pumactl$/, 'puma') + $0 = puma_cmd if File.exist?(puma_cmd) + + cli = Puma::CLI.new run_args, log_writer + cli.run + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/detect.rb b/vendor/cache/puma-fba741b91780/lib/puma/detect.rb new file mode 100644 index 000000000..a233eb402 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/detect.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +# This file can be loaded independently of puma.rb, so it cannot have any code +# that assumes puma.rb is loaded. + + +module Puma + # @version 5.2.1 + HAS_FORK = ::Process.respond_to? :fork + + HAS_NATIVE_IO_WAIT = ::IO.public_instance_methods(false).include? :wait_readable + + IS_JRUBY = Object.const_defined? :JRUBY_VERSION + + IS_OSX = RUBY_DESCRIPTION.include? 'darwin' + + IS_WINDOWS = RUBY_DESCRIPTION.match?(/mswin|ming|cygwin/) + + IS_LINUX = !(IS_OSX || IS_WINDOWS) + + # @version 5.2.0 + IS_MRI = RUBY_ENGINE == 'ruby' + + def self.jruby? + IS_JRUBY + end + + def self.osx? + IS_OSX + end + + def self.windows? + IS_WINDOWS + end + + # @version 5.0.0 + def self.mri? + IS_MRI + end + + # @version 5.0.0 + def self.forkable? + HAS_FORK + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/dsl.rb b/vendor/cache/puma-fba741b91780/lib/puma/dsl.rb new file mode 100644 index 000000000..497946791 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/dsl.rb @@ -0,0 +1,1425 @@ +# frozen_string_literal: true + +require_relative 'const' +require_relative 'util' + +module Puma + # The methods that are available for use inside the configuration file. + # These same methods are used in Puma cli and the rack handler + # internally. + # + # Used manually (via CLI class): + # + # config = Configuration.new({}) do |user_config| + # user_config.port 3001 + # end + # config.load + # + # puts config.options[:binds] # => "tcp://127.0.0.1:3001" + # + # Used to load file: + # + # $ cat puma_config.rb + # port 3002 + # + # Resulting configuration: + # + # config = Configuration.new(config_file: "puma_config.rb") + # config.load + # + # puts config.options[:binds] # => "tcp://127.0.0.1:3002" + # + # You can also find many examples being used by the test suite in + # +test/config+. + # + # Puma v6 adds the option to specify a key name (String or Symbol) to the + # hooks that run inside the forked workers. All the hooks run inside the + # {Puma::Cluster::Worker#run} method. + # + # Previously, the worker index and the LogWriter instance were passed to the + # hook blocks/procs. If a key name is specified, a hash is passed as the last + # parameter. This allows storage of data, typically objects that are created + # before the worker that need to be passed to the hook when the worker is shutdown. + # + # The following hooks have been updated: + # + # | DSL Method | Options Key | Fork Block Location | + # | on_worker_boot | :before_worker_boot | inside, before | + # | on_worker_shutdown | :before_worker_shutdown | inside, after | + # | on_refork | :before_refork | inside | + # + class DSL + ON_WORKER_KEY = [String, Symbol].freeze + + # Convenience method so logic can be used in CI. + # + # @see ssl_bind + # + def self.ssl_bind_str(host, port, opts) + verify = opts.fetch(:verify_mode, 'none').to_s + + tls_str = + if opts[:no_tlsv1_1] then '&no_tlsv1_1=true' + elsif opts[:no_tlsv1] then '&no_tlsv1=true' + else '' + end + + ca_additions = "&ca=#{Puma::Util.escape(opts[:ca])}" if ['peer', 'force_peer'].include?(verify) + + low_latency_str = opts.key?(:low_latency) ? "&low_latency=#{opts[:low_latency]}" : '' + backlog_str = opts[:backlog] ? "&backlog=#{Integer(opts[:backlog])}" : '' + + if defined?(JRUBY_VERSION) + cipher_suites = opts[:ssl_cipher_list] ? "&ssl_cipher_list=#{opts[:ssl_cipher_list]}" : nil # old name + cipher_suites = "#{cipher_suites}&cipher_suites=#{opts[:cipher_suites]}" if opts[:cipher_suites] + protocols = opts[:protocols] ? "&protocols=#{opts[:protocols]}" : nil + + keystore_additions = "keystore=#{opts[:keystore]}&keystore-pass=#{opts[:keystore_pass]}" + keystore_additions = "#{keystore_additions}&keystore-type=#{opts[:keystore_type]}" if opts[:keystore_type] + if opts[:truststore] + truststore_additions = "&truststore=#{opts[:truststore]}" + truststore_additions = "#{truststore_additions}&truststore-pass=#{opts[:truststore_pass]}" if opts[:truststore_pass] + truststore_additions = "#{truststore_additions}&truststore-type=#{opts[:truststore_type]}" if opts[:truststore_type] + end + + "ssl://#{host}:#{port}?#{keystore_additions}#{truststore_additions}#{cipher_suites}#{protocols}" \ + "&verify_mode=#{verify}#{tls_str}#{ca_additions}#{backlog_str}" + else + ssl_cipher_filter = opts[:ssl_cipher_filter] ? "&ssl_cipher_filter=#{opts[:ssl_cipher_filter]}" : nil + ssl_ciphersuites = opts[:ssl_ciphersuites] ? "&ssl_ciphersuites=#{opts[:ssl_ciphersuites]}" : nil + v_flags = (ary = opts[:verification_flags]) ? "&verification_flags=#{Array(ary).join ','}" : nil + + cert_flags = (cert = opts[:cert]) ? "cert=#{Puma::Util.escape(cert)}" : nil + key_flags = (key = opts[:key]) ? "&key=#{Puma::Util.escape(key)}" : nil + password_flags = (password_command = opts[:key_password_command]) ? "&key_password_command=#{Puma::Util.escape(password_command)}" : nil + + reuse_flag = + if (reuse = opts[:reuse]) + if reuse == true + '&reuse=dflt' + elsif reuse.is_a?(Hash) && (reuse.key?(:size) || reuse.key?(:timeout)) + val = +'' + if (size = reuse[:size]) && Integer === size + val << size.to_s + end + if (timeout = reuse[:timeout]) && Integer === timeout + val << ",#{timeout}" + end + if val.empty? + nil + else + "&reuse=#{val}" + end + else + nil + end + else + nil + end + + "ssl://#{host}:#{port}?#{cert_flags}#{key_flags}#{password_flags}#{ssl_cipher_filter}#{ssl_ciphersuites}" \ + "#{reuse_flag}&verify_mode=#{verify}#{tls_str}#{ca_additions}#{v_flags}#{backlog_str}#{low_latency_str}" + end + end + + def initialize(options, config) + @config = config + @options = options + + @plugins = [] + end + + def _load_from(path) + if path + @path = path + instance_eval(File.read(path), path, 1) + end + ensure + _offer_plugins + end + + def _offer_plugins + @plugins.each do |o| + if o.respond_to? :config + @options.shift + o.config self + end + end + + @plugins.clear + end + + def set_default_host(host) + @options[:default_host] = host + end + + def default_host + @options[:default_host] || Configuration::DEFAULTS[:tcp_host] + end + + def inject(&blk) + instance_eval(&blk) + end + + def get(key,default=nil) + @options[key.to_sym] || default + end + + # Load the named plugin for use by this configuration. + # + # @example + # plugin :tmp_restart + # + def plugin(name) + @plugins << @config.load_plugin(name) + end + + # Use an object or block as the rack application. This allows the + # configuration file to be the application itself. + # + # @example + # app do |env| + # body = 'Hello, World!' + # + # [ + # 200, + # { + # 'Content-Type' => 'text/plain', + # 'Content-Length' => body.length.to_s + # }, + # [body] + # ] + # end + # + # @see Puma::Configuration#app + # + def app(obj=nil, &block) + obj ||= block + + raise "Provide either a #call'able or a block" unless obj + + @options[:app] = obj + end + + # Start the Puma control rack application on +url+. This application can + # be communicated with to control the main server. Additionally, you can + # provide an authentication token, so all requests to the control server + # will need to include that token as a query parameter. This allows for + # simple authentication. + # + # Check out {Puma::App::Status} to see what the app has available. + # + # @example + # activate_control_app 'unix:///var/run/pumactl.sock' + # @example + # activate_control_app 'unix:///var/run/pumactl.sock', { auth_token: '12345' } + # @example + # activate_control_app 'unix:///var/run/pumactl.sock', { no_token: true } + # + def activate_control_app(url="auto", opts={}) + if url == "auto" + path = Configuration.temp_path + @options[:control_url] = "unix://#{path}" + @options[:control_url_temp] = path + else + @options[:control_url] = url + end + + if opts[:no_token] + # We need to use 'none' rather than :none because this value will be + # passed on to an instance of OptionParser, which doesn't support + # symbols as option values. + # + # See: https://github.com/puma/puma/issues/1193#issuecomment-305995488 + auth_token = 'none' + else + auth_token = opts[:auth_token] + auth_token ||= Configuration.random_token + end + + @options[:control_auth_token] = auth_token + @options[:control_url_umask] = opts[:umask] if opts[:umask] + end + + # Load additional configuration from a file. + # Files get loaded later via Configuration#load. + # + # @example + # load 'config/puma/production.rb' + # + def load(file) + @options[:config_files] ||= [] + @options[:config_files] << file + end + + # Bind the server to +url+. "tcp://", "unix://" and "ssl://" are the only + # accepted protocols. Multiple urls can be bound to, calling +bind+ does + # not overwrite previous bindings. + # + # The default is "tcp://0.0.0.0:9292". + # + # You can use query parameters within the url to specify options: + # + # * Set the socket backlog depth with +backlog+, default is 1024. + # * Set up an SSL certificate with +key+ & +cert+. + # * Set up an SSL certificate for mTLS with +key+, +cert+, +ca+ and +verify_mode+. + # * Set whether to optimize for low latency instead of throughput with + # +low_latency+, default is to not optimize for low latency. This is done + # via +Socket::TCP_NODELAY+. + # * Set socket permissions with +umask+. + # + # @example Backlog depth + # bind 'unix:///var/run/puma.sock?backlog=512' + # @example SSL cert + # bind 'ssl://127.0.0.1:9292?key=key.key&cert=cert.pem' + # @example SSL cert for mutual TLS (mTLS) + # bind 'ssl://127.0.0.1:9292?key=key.key&cert=cert.pem&ca=ca.pem&verify_mode=force_peer' + # @example Disable optimization for low latency + # bind 'tcp://0.0.0.0:9292?low_latency=false' + # @example Socket permissions + # bind 'unix:///var/run/puma.sock?umask=0111' + # + # @see Puma::Runner#load_and_bind + # @see Puma::Cluster#run + # + def bind(url) + @options[:binds] ||= [] + @options[:binds] << url + end + + def clear_binds! + @options[:binds] = [] + end + + # Bind to (systemd) activated sockets, regardless of configured binds. + # + # Systemd can present sockets as file descriptors that are already opened. + # By default Puma will use these but only if it was explicitly told to bind + # to the socket. If not, it will close the activated sockets. This means + # all configuration is duplicated. + # + # Binds can contain additional configuration, but only SSL config is really + # relevant since the unix and TCP socket options are ignored. + # + # This means there is a lot of duplicated configuration for no additional + # value in most setups. This method tells the launcher to bind to all + # activated sockets, regardless of existing bind. + # + # To clear configured binds, the value only can be passed. This will clear + # out any binds that may have been configured. + # + # @example Use any systemd activated sockets as well as configured binds + # bind_to_activated_sockets + # + # @example Only bind to systemd activated sockets, ignoring other binds + # bind_to_activated_sockets 'only' + # + def bind_to_activated_sockets(bind=true) + @options[:bind_to_activated_sockets] = bind + end + + # Define the TCP port to bind to. Use `bind` for more advanced options. + # + # The default is +9292+. + # + # @example + # port 3000 + # + def port(port, host=nil) + host ||= default_host + bind URI::Generic.build(scheme: 'tcp', host: host, port: Integer(port)).to_s + end + + # Define how long the tcp socket stays open, if no data has been received. + # + # The default is 30 seconds. + # + # @example + # first_data_timeout 40 + # + # @see Puma::Server.new + # + def first_data_timeout(seconds) + @options[:first_data_timeout] = Integer(seconds) + end + + # Define how long persistent connections can be idle before Puma closes them. + # + # The default is 20 seconds. + # + # @example + # persistent_timeout 30 + # + # @see Puma::Server.new + # + def persistent_timeout(seconds) + @options[:persistent_timeout] = Integer(seconds) + end + + # If a new request is not received within this number of seconds, begin shutting down. + # + # The default is +nil+. + # + # @example + # idle_timeout 60 + # + # @see Puma::Server.new + # + def idle_timeout(seconds) + @options[:idle_timeout] = Integer(seconds) + end + + # Work around leaky apps that leave garbage in Thread locals + # across requests. + # + # The default is +false+. + # + # @example + # clean_thread_locals + # + def clean_thread_locals(which=true) + @options[:clean_thread_locals] = which + end + + # When shutting down, drain the accept socket of pending connections and + # process them. This loops over the accept socket until there are no more + # read events and then stops looking and waits for the requests to finish. + # + # @see Puma::Server#graceful_shutdown + # + def drain_on_shutdown(which=true) + @options[:drain_on_shutdown] = which + end + + # Set the environment in which the rack's app will run. The value must be + # a string. + # + # The default is "development". + # + # @example + # environment 'production' + # + def environment(environment) + @options[:environment] = environment + end + + # How long to wait for threads to stop when shutting them down. + # Specifying :immediately will cause Puma to kill the threads immediately. + # Otherwise the value is the number of seconds to wait. + # + # Puma always waits a few seconds after killing a thread for it to try + # to finish up it's work, even in :immediately mode. + # + # The default is +:forever+. + # + # @see Puma::Server#graceful_shutdown + # + def force_shutdown_after(val=:forever) + i = case val + when :forever + -1 + when :immediately + 0 + else + Float(val) + end + + @options[:force_shutdown_after] = i + end + + # Code to run before doing a restart. This code should + # close log files, database connections, etc. + # + # This can be called multiple times to add code each time. + # + # @example + # on_restart do + # puts 'On restart...' + # end + # + def on_restart(&block) + @options[:on_restart] ||= [] + @options[:on_restart] << block + end + + # Command to use to restart Puma. This should be just how to + # load Puma itself (ie. 'ruby -Ilib bin/puma'), not the arguments + # to Puma, as those are the same as the original process. + # + # @example + # restart_command '/u/app/lolcat/bin/restart_puma' + # + def restart_command(cmd) + @options[:restart_cmd] = cmd.to_s + end + + # Store the pid of the server in the file at "path". + # + # @example + # pidfile '/u/apps/lolcat/tmp/pids/puma.pid' + # + def pidfile(path) + @options[:pidfile] = path.to_s + end + + # Disable request logging, the inverse of `log_requests`. + # + # The default is +true+. + # + # @example + # quiet + # + def quiet(which=true) + @options[:log_requests] = !which + end + + # Enable request logging, the inverse of `quiet`. + # + # The default is +false+. + # + # @example + # log_requests + # + def log_requests(which=true) + @options[:log_requests] = which + end + + # Pass in a custom logging class instance + # + # @example + # custom_logger Logger.new('t.log') + # + def custom_logger(custom_logger) + @options[:custom_logger] = custom_logger + end + + # Show debugging info + # + # The default is +false+. + # + # @example + # debug + # + def debug + @options[:debug] = true + end + + # Load +path+ as a rackup file. + # + # The default is "config.ru". + # + # @example + # rackup '/u/apps/lolcat/config.ru' + # + def rackup(path) + @options[:rackup] ||= path.to_s + end + + # Allows setting `env['rack.url_scheme']`. + # Only necessary if X-Forwarded-Proto is not being set by your proxy + # Normal values are 'http' or 'https'. + # + def rack_url_scheme(scheme=nil) + @options[:rack_url_scheme] = scheme + end + + # Enable HTTP 103 Early Hints responses. + # + # The default is +nil+. + # + # @example + # early_hints + # + def early_hints(answer=true) + @options[:early_hints] = answer + end + + # Redirect +STDOUT+ and +STDERR+ to files specified. The +append+ parameter + # specifies whether the output is appended. + # + # The default is +false+. + # + # @example + # stdout_redirect '/app/lolcat/log/stdout', '/app/lolcat/log/stderr' + # @example + # stdout_redirect '/app/lolcat/log/stdout', '/app/lolcat/log/stderr', true + # + def stdout_redirect(stdout=nil, stderr=nil, append=false) + @options[:redirect_stdout] = stdout + @options[:redirect_stderr] = stderr + @options[:redirect_append] = append + end + + def log_formatter(&block) + @options[:log_formatter] = block + end + + # Configure the number of threads to use to answer requests. + # + # It can be a single fixed number, or a +min+ and a +max+. + # + # The default is the environment variables +PUMA_MIN_THREADS+ / +PUMA_MAX_THREADS+ + # (or +MIN_THREADS+ / +MAX_THREADS+ if the +PUMA_+ variables aren't set). + # + # If these environment variables aren't set, the default is "0, 5" in MRI or "0, 16" for other interpreters. + # + # @example + # threads 5 + # @example + # threads 0, 16 + # @example + # threads 5, 5 + # + def threads(min, max = min) + min = Integer(min) + max = Integer(max) + if min > max + raise "The minimum (#{min}) number of threads must be less than or equal to the max (#{max})" + end + + if max < 1 + raise "The maximum number of threads (#{max}) must be greater than 0" + end + + @options[:min_threads] = min + @options[:max_threads] = max + end + + # Instead of using +bind+ and manually constructing a URI like: + # + # bind 'ssl://127.0.0.1:9292?key=key_path&cert=cert_path' + # + # you can use the this method. + # + # When binding on localhost you don't need to specify +cert+ and +key+, + # Puma will assume you are using the +localhost+ gem and try to load the + # appropriate files. + # + # When using the options hash parameter, the `reuse:` value is either + # `true`, which sets reuse 'on' with default values, or a hash, with `:size` + # and/or `:timeout` keys, each with integer values. + # + # The `cert:` options hash parameter can be the path to a certificate + # file including all intermediate certificates in PEM format. + # + # The `cert_pem:` options hash parameter can be String containing the + # cerificate and all intermediate certificates in PEM format. + # + # @example + # ssl_bind '127.0.0.1', '9292', { + # cert: path_to_cert, + # key: path_to_key, + # ssl_cipher_filter: cipher_filter, # optional + # ssl_ciphersuites: ciphersuites, # optional + # verify_mode: verify_mode, # default 'none' + # verification_flags: flags, # optional, not supported by JRuby + # reuse: true # optional + # } + # + # @example Using self-signed certificate with the +localhost+ gem: + # ssl_bind '127.0.0.1', '9292' + # + # @example Alternatively, you can provide +cert_pem+ and +key_pem+: + # ssl_bind '127.0.0.1', '9292', { + # cert_pem: File.read(path_to_cert), + # key_pem: File.read(path_to_key), + # reuse: {size: 2_000, timeout: 20} # optional + # } + # + # @example For JRuby, two keys are required: +keystore+ & +keystore_pass+ + # ssl_bind '127.0.0.1', '9292', { + # keystore: path_to_keystore, + # keystore_pass: password, + # ssl_cipher_list: cipher_list, # optional + # verify_mode: verify_mode # default 'none' + # } + # + def ssl_bind(host, port, opts = {}) + add_pem_values_to_options_store(opts) + bind self.class.ssl_bind_str(host, port, opts) + end + + # Use +path+ as the file to store the server info state. This is + # used by +pumactl+ to query and control the server. + # + # @example + # state_path '/u/apps/lolcat/tmp/pids/puma.state' + # + def state_path(path) + @options[:state] = path.to_s + end + + # Use +permission+ to restrict permissions for the state file. + # + # @example + # state_permission 0600 + # + # @version 5.0.0 + # + def state_permission(permission) + @options[:state_permission] = permission + end + + # How many worker processes to run. Typically this is set to + # the number of available cores. + # + # The default is the value of the environment variable +WEB_CONCURRENCY+ if + # set, otherwise 0. + # + # @note Cluster mode only. + # + # @example + # workers 2 + # + # @see Puma::Cluster + # + def workers(count) + @options[:workers] = count.to_i + end + + # Disable warning message when running in cluster mode with a single worker. + # + # Cluster mode has some overhead of running an additional 'control' process + # in order to manage the cluster. If only running a single worker it is + # likely not worth paying that overhead vs running in single mode with + # additional threads instead. + # + # There are some scenarios where running cluster mode with a single worker + # may still be warranted and valid under certain deployment scenarios, see + # https://github.com/puma/puma/issues/2534 + # + # Moving from workers = 1 to workers = 0 will save 10-30% of memory use. + # + # The default is +false+. + # + # @note Cluster mode only. + # + # @example + # silence_single_worker_warning + # + def silence_single_worker_warning + @options[:silence_single_worker_warning] = true + end + + # Disable warning message when running single mode with callback hook defined. + # + # The default is +false+. + # + # @example + # silence_fork_callback_warning + # + def silence_fork_callback_warning + @options[:silence_fork_callback_warning] = true + end + + # Code to run immediately before master process + # forks workers (once on boot). These hooks can block if necessary + # to wait for background operations unknown to Puma to finish before + # the process terminates. + # This can be used to close any connections to remote servers (database, + # Redis, ...) that were opened when preloading the code. + # + # This can be called multiple times to add several hooks. + # + # @note Cluster mode only. + # + # @example + # before_fork do + # puts "Starting workers..." + # end + # + def before_fork(&block) + warn_if_in_single_mode('before_fork') + + @options[:before_fork] ||= [] + @options[:before_fork] << block + end + + # Code to run in a worker when it boots to setup + # the process before booting the app. + # + # This can be called multiple times to add several hooks. + # + # @note Cluster mode only. + # + # @example + # on_worker_boot do + # puts 'Before worker boot...' + # end + # + def on_worker_boot(key = nil, &block) + warn_if_in_single_mode('on_worker_boot') + + process_hook :before_worker_boot, key, block, 'on_worker_boot' + end + + # Code to run immediately before a worker shuts + # down (after it has finished processing HTTP requests). The worker's + # index is passed as an argument. These hooks + # can block if necessary to wait for background operations unknown + # to Puma to finish before the process terminates. + # + # This can be called multiple times to add several hooks. + # + # @note Cluster mode only. + # + # @example + # on_worker_shutdown do + # puts 'On worker shutdown...' + # end + # + def on_worker_shutdown(key = nil, &block) + warn_if_in_single_mode('on_worker_shutdown') + + process_hook :before_worker_shutdown, key, block, 'on_worker_shutdown' + end + + # Code to run in the master right before a worker is started. The worker's + # index is passed as an argument. + # + # This can be called multiple times to add several hooks. + # + # @note Cluster mode only. + # + # @example + # on_worker_fork do + # puts 'Before worker fork...' + # end + # + def on_worker_fork(&block) + warn_if_in_single_mode('on_worker_fork') + + process_hook :before_worker_fork, nil, block, 'on_worker_fork' + end + + # Code to run in the master after a worker has been started. The worker's + # index is passed as an argument. + # + # This is called everytime a worker is to be started. + # + # @note Cluster mode only. + # + # @example + # after_worker_fork do + # puts 'After worker fork...' + # end + # + def after_worker_fork(&block) + warn_if_in_single_mode('after_worker_fork') + + process_hook :after_worker_fork, nil, block, 'after_worker_fork' + end + + alias_method :after_worker_boot, :after_worker_fork + + # Code to run after puma is booted (works for both: single and clustered) + # + # @example + # on_booted do + # puts 'After booting...' + # end + # + def on_booted(&block) + @config.options[:events].on_booted(&block) + end + + # Code to run after puma is stopped (works for both: single and clustered) + # + # @example + # on_stopped do + # puts 'After stopping...' + # end + # + def on_stopped(&block) + @config.options[:events].on_stopped(&block) + end + + # When `fork_worker` is enabled, code to run in Worker 0 + # before all other workers are re-forked from this process, + # after the server has temporarily stopped serving requests + # (once per complete refork cycle). + # + # This can be used to trigger extra garbage-collection to maximize + # copy-on-write efficiency, or close any connections to remote servers + # (database, Redis, ...) that were opened while the server was running. + # + # This can be called multiple times to add several hooks. + # + # @note Cluster mode with `fork_worker` enabled only. + # + # @example + # on_refork do + # 3.times {GC.start} + # end + # + # @version 5.0.0 + # + def on_refork(key = nil, &block) + process_hook :before_refork, key, block, 'on_refork' + end + + # Provide a block to be executed just before a thread is added to the thread + # pool. Be careful: while the block executes, thread creation is delayed, and + # probably a request will have to wait too! The new thread will not be added to + # the threadpool until the provided block returns. + # + # Return values are ignored. + # Raising an exception will log a warning. + # + # This hook is useful for doing something when the thread pool grows. + # + # This can be called multiple times to add several hooks. + # + # @example + # on_thread_start do + # puts 'On thread start...' + # end + # + def on_thread_start(&block) + @options[:before_thread_start] ||= [] + @options[:before_thread_start] << block + end + + # Provide a block to be executed after a thread is trimmed from the thread + # pool. Be careful: while this block executes, Puma's main loop is + # blocked, so no new requests will be picked up. + # + # This hook only runs when a thread in the threadpool is trimmed by Puma. + # It does not run when a thread dies due to exceptions or any other cause. + # + # Return values are ignored. + # Raising an exception will log a warning. + # + # This hook is useful for cleaning up thread local resources when a thread + # is trimmed. + # + # This can be called multiple times to add several hooks. + # + # @example + # on_thread_exit do + # puts 'On thread exit...' + # end + # + def on_thread_exit(&block) + @options[:before_thread_exit] ||= [] + @options[:before_thread_exit] << block + end + + # Code to run out-of-band when the worker is idle. + # These hooks run immediately after a request has finished + # processing and there are no busy threads on the worker. + # The worker doesn't accept new requests until this code finishes. + # + # This hook is useful for running out-of-band garbage collection + # or scheduling asynchronous tasks to execute after a response. + # + # This can be called multiple times to add several hooks. + # + def out_of_band(&block) + process_hook :out_of_band, nil, block, 'out_of_band' + end + + # The directory to operate out of. + # + # The default is the current directory. + # + # @example + # directory '/u/apps/lolcat' + # + def directory(dir) + @options[:directory] = dir.to_s + end + + # Preload the application before starting the workers; this conflicts with + # phased restart feature. + # + # The default is +true+ if your app uses more than 1 worker. + # + # @note Cluster mode only. + # + # @example + # preload_app! + # + def preload_app!(answer=true) + @options[:preload_app] = answer + end + + # Use +obj+ or +block+ as the low level error handler. This allows the + # configuration file to change the default error on the server. + # + # @example + # lowlevel_error_handler do |err| + # [200, {}, ["error page"]] + # end + # + def lowlevel_error_handler(obj=nil, &block) + obj ||= block + raise "Provide either a #call'able or a block" unless obj + @options[:lowlevel_error_handler] = obj + end + + # This option is used to allow your app and its gems to be + # properly reloaded when not using preload. + # + # When set, if Puma detects that it's been invoked in the + # context of Bundler, it will cleanup the environment and + # re-run itself outside the Bundler environment, but directly + # using the files that Bundler has setup. + # + # This means that Puma is now decoupled from your Bundler + # context and when each worker loads, it will be loading a + # new Bundler context and thus can float around as the release + # dictates. + # + # @note This is incompatible with +preload_app!+. + # @note This is only supported for RubyGems 2.2+ + # + # @see extra_runtime_dependencies + # + def prune_bundler(answer=true) + @options[:prune_bundler] = answer + end + + # Raises a SignalException when SIGTERM is received. In environments where + # SIGTERM is something expected, you can suppress these with this option. + # + # This can be useful for example in Kubernetes, where rolling restart is + # guaranteed usually on the infrastructure level. + # + # The default is +true+. + # + # @example + # raise_exception_on_sigterm false + # + # @see Puma::Launcher#setup_signals + # @see Puma::Cluster#setup_signals + # + def raise_exception_on_sigterm(answer=true) + @options[:raise_exception_on_sigterm] = answer + end + + # When using prune_bundler, if extra runtime dependencies need to be loaded to + # initialize your app, then this setting can be used. This includes any Puma plugins. + # + # Before bundler is pruned, the gem names supplied will be looked up in the bundler + # context and then loaded again after bundler is pruned. + # Only applies if prune_bundler is used. + # + # @example + # extra_runtime_dependencies ['gem_name_1', 'gem_name_2'] + # @example + # extra_runtime_dependencies ['puma_worker_killer', 'puma-heroku'] + # + # @see Puma::Launcher#extra_runtime_deps_directories + # + def extra_runtime_dependencies(answer = []) + @options[:extra_runtime_dependencies] = Array(answer) + end + + # Additional text to display in process listing. + # + # If you do not specify a tag, Puma will infer it. If you do not want Puma + # to add a tag, use an empty string. + # + # The default is the current file or directory base name. + # + # @example + # tag 'app name' + # @example + # tag '' + # + def tag(string) + @options[:tag] = string.to_s + end + + # Change the default interval for checking workers. + # + # The default is 5 seconds. + # + # @note Cluster mode only. + # + # @example + # worker_check_interval 10 + # + # @see Puma::Cluster#check_workers + # + def worker_check_interval(interval) + @options[:worker_check_interval] = Integer(interval) + end + + # Verifies that all workers have checked in to the master process within + # the given timeout. If not the worker process will be restarted. This is + # not a request timeout, it is to protect against a hung or dead process. + # Setting this value will not protect against slow requests. + # + # This value must be greater than worker_check_interval. + # + # The default is 60 seconds. + # + # @note Cluster mode only. + # + # @example + # worker_timeout 60 + # + # @see Puma::Cluster::Worker#ping_timeout + # + def worker_timeout(timeout) + timeout = Integer(timeout) + min = @options.fetch(:worker_check_interval, Configuration::DEFAULTS[:worker_check_interval]) + + if timeout <= min + raise "The minimum worker_timeout must be greater than the worker reporting interval (#{min})" + end + + @options[:worker_timeout] = timeout + end + + # Change the default worker timeout for booting. + # + # The default is the value of `worker_timeout`. + # + # @note Cluster mode only. + # + # @example + # worker_boot_timeout 60 + # + # @see Puma::Cluster::Worker#ping_timeout + # + def worker_boot_timeout(timeout) + @options[:worker_boot_timeout] = Integer(timeout) + end + + # Set the timeout for worker shutdown. + # + # The default is 60 seconds. + # + # @note Cluster mode only. + # + # @example + # worker_shutdown_timeout 90 + # + # @see Puma::Cluster::Worker#term + # + def worker_shutdown_timeout(timeout) + @options[:worker_shutdown_timeout] = Integer(timeout) + end + + # Set the strategy for worker culling. + # + # There are two possible values: + # + # 1. **:youngest** - the youngest workers (i.e. the workers that were + # the most recently started) will be culled. + # 2. **:oldest** - the oldest workers (i.e. the workers that were started + # the longest time ago) will be culled. + # + # The default is +:youngest+. + # + # @note Cluster mode only. + # + # @example + # worker_culling_strategy :oldest + # + # @see Puma::Cluster#cull_workers + # + def worker_culling_strategy(strategy) + stategy = strategy.to_sym + + if ![:youngest, :oldest].include?(strategy) + raise "Invalid value for worker_culling_strategy - #{stategy}" + end + + @options[:worker_culling_strategy] = strategy + end + + # When set to true, workers accept all requests + # and queue them before passing them to the handlers. + # When set to false, each worker process accepts exactly as + # many requests as it is configured to simultaneously handle. + # + # Queueing requests generally improves performance. In some + # cases, such as a single threaded application, it may be + # better to ensure requests get balanced across workers. + # + # Note that setting this to false disables HTTP keepalive and + # slow clients will occupy a handler thread while the request + # is being sent. A reverse proxy, such as nginx, can handle + # slow clients and queue requests before they reach Puma. + # + # The default is +true+. + # + # @see Puma::Server + # + def queue_requests(answer=true) + @options[:queue_requests] = answer + end + + # When a shutdown is requested, the backtraces of all the + # threads will be written to $stdout. This can help figure + # out why shutdown is hanging. + # + def shutdown_debug(val=true) + @options[:shutdown_debug] = val + end + + + # Attempts to route traffic to less-busy workers by causing them to delay + # listening on the socket, allowing workers which are not processing any + # requests to pick up new requests first. + # + # The default is 0.005 seconds. + # + # Only works on MRI. For all other interpreters, this setting does nothing. + # + # @see Puma::Server#handle_servers + # @see Puma::ThreadPool#wait_for_less_busy_worker + # + # @version 5.0.0 + # + def wait_for_less_busy_worker(val=0.005) + @options[:wait_for_less_busy_worker] = val.to_f + end + + # Control how the remote address of the connection is set. This + # is configurable because to calculate the true socket peer address + # a kernel syscall is required which for very fast rack handlers + # slows down the handling significantly. + # + # There are 5 possible values: + # + # 1. **:socket** - read the peername from the socket using the + # syscall. This is the normal behavior. If this fails for any reason (e.g., + # if the peer disconnects between the connection being accepted and the getpeername + # system call), Puma will return "0.0.0.0" + # 2. **:localhost** - set the remote address to "127.0.0.1" + # 3. **header: **- set the remote address to the value of the + # provided http header. For instance: + # `set_remote_address header: "X-Real-IP"`. + # Only the first word (as separated by spaces or comma) is used, allowing + # headers such as X-Forwarded-For to be used as well. If this header is absent, + # Puma will fall back to the behavior of :socket + # 4. **proxy_protocol: :v1**- set the remote address to the value read from the + # HAproxy PROXY protocol, version 1. If the request does not have the PROXY + # protocol attached to it, will fall back to :socket + # 5. **\** - this allows you to hardcode remote address to any value + # you wish. Because Puma never uses this field anyway, it's format is + # entirely in your hands. + # + # The default is +:socket+. + # + # @example + # set_remote_address :localhost + # + def set_remote_address(val=:socket) + case val + when :socket + @options[:remote_address] = val + when :localhost + @options[:remote_address] = :value + @options[:remote_address_value] = "127.0.0.1".freeze + when String + @options[:remote_address] = :value + @options[:remote_address_value] = val + when Hash + if hdr = val[:header] + @options[:remote_address] = :header + @options[:remote_address_header] = "HTTP_" + hdr.upcase.tr("-", "_") + elsif protocol_version = val[:proxy_protocol] + @options[:remote_address] = :proxy_protocol + protocol_version = protocol_version.downcase.to_sym + unless [:v1].include?(protocol_version) + raise "Invalid value for proxy_protocol - #{protocol_version.inspect}" + end + @options[:remote_address_proxy_protocol] = protocol_version + else + raise "Invalid value for set_remote_address - #{val.inspect}" + end + else + raise "Invalid value for set_remote_address - #{val}" + end + end + + # When enabled, workers will be forked from worker 0 instead of from the master process. + # This option is similar to `preload_app` because the app is preloaded before forking, + # but it is compatible with phased restart. + # + # This option also enables the `refork` command (SIGURG), which optimizes copy-on-write performance + # in a running app. + # + # A refork will automatically trigger once after the specified number of requests + # (default 1000), or pass 0 to disable auto refork. + # + # @note Cluster mode only. + # + # @version 5.0.0 + # + def fork_worker(after_requests=1000) + @options[:fork_worker] = Integer(after_requests) + end + + # The number of requests to attempt inline before sending a client back to + # the reactor to be subject to normal ordering. + # + # The default is 10. + # + # @example + # max_fast_inline 20 + # + def max_fast_inline(num_of_requests) + @options[:max_fast_inline] = Float(num_of_requests) + end + + # When `true`, keep-alive connections are maintained on inbound requests. + # Enabling this setting reduces the number of TCP operations, reducing response + # times for connections that can send multiple requests in a single connection. + # + # When Puma receives more incoming connections than available Puma threads, + # enabling the keep-alive behavior may result in processing requests out-of-order, + # increasing overall response time variance. Increased response time variance + # means that the overall average of response times might not change, but more + # outliers will exist. Those long-tail outliers may significantly affect response + # times for some processed requests. + # + # When `false`, Puma closes the connection after each request, requiring the + # client to open a new request. Disabling this setting guarantees that requests + # will be processed in the order they are fully received, decreasing response + # variance and eliminating long-tail outliers caused by keep-alive behavior. + # The trade-off is that the number of TCP operations required will increase. + # + # The default is +true+. + # + # @example + # enable_keep_alives false + # + def enable_keep_alives(enabled=true) + @options[:enable_keep_alives] = enabled + end + + # Specify the backend for the IO selector. + # + # Provided values will be passed directly to +NIO::Selector.new+, with the + # exception of +:auto+ which will let nio4r choose the backend. + # + # Check the documentation of +NIO::Selector.backends+ for the list of valid + # options. Note that the available options on your system will depend on the + # operating system. If you want to use the pure Ruby backend (not + # recommended due to its comparatively low performance), set environment + # variable +NIO4R_PURE+ to +true+. + # + # The default is +:auto+. + # + # @see https://github.com/socketry/nio4r/blob/master/lib/nio/selector.rb + # + def io_selector_backend(backend) + @options[:io_selector_backend] = backend.to_sym + end + + # Ensures +STDOUT+ and +STDERR+ is immediately flushed to the underlying + # operating system and is not buffered internally + # + # The default is +true+. + # + # @example + # mutate_stdout_and_stderr_to_sync_on_write false + # + def mutate_stdout_and_stderr_to_sync_on_write(enabled=true) + @options[:mutate_stdout_and_stderr_to_sync_on_write] = enabled + end + + # Specify how big the request payload should be, in bytes. + # This limit is compared against Content-Length HTTP header. + # If the payload size (CONTENT_LENGTH) is larger than http_content_length_limit, + # HTTP 413 status code is returned. + # + # When no Content-Length http header is present, it is compared against the + # size of the body of the request. + # + # The default is +nil+. + # + # @example + # http_content_length_limit 2_000_000_000 + # + def http_content_length_limit(limit) + @options[:http_content_length_limit] = limit + end + + # Supported http methods, which will replace `Puma::Const::SUPPORTED_HTTP_METHODS`. + # The value of `:any` will allows all methods, otherwise, the value must be + # an array of strings. Note that methods are all uppercase. + # + # `Puma::Const::SUPPORTED_HTTP_METHODS` is conservative, if you want a + # complete set of methods, the methods defined by the + # [IANA Method Registry](https://www.iana.org/assignments/http-methods/http-methods.xhtml) + # are pre-defined as the constant `Puma::Const::IANA_HTTP_METHODS`. + # + # @note If the `methods` value is `:any`, no method check with be performed, + # similar to Puma v5 and earlier. + # + # @example Adds 'PROPFIND' to existing supported methods + # supported_http_methods(Puma::Const::SUPPORTED_HTTP_METHODS + ['PROPFIND']) + # @example Restricts methods to the array elements + # supported_http_methods %w[HEAD GET POST PUT DELETE OPTIONS PROPFIND] + # @example Restricts methods to the methods in the IANA Registry + # supported_http_methods Puma::Const::IANA_HTTP_METHODS + # @example Allows any method + # supported_http_methods :any + # + def supported_http_methods(methods) + if methods == :any + @options[:supported_http_methods] = :any + elsif Array === methods && methods == (ary = methods.grep(String).uniq) && + !ary.empty? + @options[:supported_http_methods] = ary + else + raise "supported_http_methods must be ':any' or a unique array of strings" + end + end + + private + + # To avoid adding cert_pem and key_pem as URI params, we store them on the + # options[:store] from where Puma binder knows how to find and extract them. + # + def add_pem_values_to_options_store(opts) + return if defined?(JRUBY_VERSION) + + @options[:store] ||= [] + + # Store cert_pem and key_pem to options[:store] if present + [:cert, :key].each do |v| + opt_key = :"#{v}_pem" + if opts[opt_key] + index = @options[:store].length + @options[:store] << opts[opt_key] + opts[v] = "store:#{index}" + end + end + end + + def process_hook(options_key, key, block, meth) + @options[options_key] ||= [] + if ON_WORKER_KEY.include? key.class + @options[options_key] << [block, key.to_sym] + elsif key.nil? + @options[options_key] << block + else + raise "'#{meth}' key must be String or Symbol" + end + end + + def warn_if_in_single_mode(hook_name) + return if @options[:silence_fork_callback_warning] + # user_options (CLI) have precedence over config file + workers_val = @config.options.user_options[:workers] || @options[:workers] || + @config.puma_default_options[:workers] || 0 + if workers_val == 0 + log_string = + "Warning: You specified code to run in a `#{hook_name}` block, " \ + "but Puma is not configured to run in cluster mode (worker count > 0 ), " \ + "so your `#{hook_name}` block did not run" + + LogWriter.stdio.log(log_string) + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/error_logger.rb b/vendor/cache/puma-fba741b91780/lib/puma/error_logger.rb new file mode 100644 index 000000000..792e14a6a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/error_logger.rb @@ -0,0 +1,113 @@ +# frozen_string_literal: true + +require_relative 'const' + +module Puma + # The implementation of a detailed error logging. + # @version 5.0.0 + # + class ErrorLogger + include Const + + attr_reader :ioerr + + REQUEST_FORMAT = %{"%s %s%s" - (%s)} + + LOG_QUEUE = Queue.new + + def initialize(ioerr, env: ENV) + @ioerr = ioerr + + @debug = env.key?('PUMA_DEBUG') + end + + def self.stdio(env: ENV) + new($stderr, env: env) + end + + # Print occurred error details. + # +options+ hash with additional options: + # - +error+ is an exception object + # - +req+ the http request + # - +text+ (default nil) custom string to print in title + # and before all remaining info. + # + def info(options={}) + internal_write title(options) + end + + # Print occurred error details only if + # environment variable PUMA_DEBUG is defined. + # +options+ hash with additional options: + # - +error+ is an exception object + # - +req+ the http request + # - +text+ (default nil) custom string to print in title + # and before all remaining info. + # + def debug(options={}) + return unless @debug + + error = options[:error] + req = options[:req] + + string_block = [] + string_block << title(options) + string_block << request_dump(req) if request_parsed?(req) + string_block << error.backtrace if error + + internal_write string_block.join("\n") + end + + def title(options={}) + text = options[:text] + req = options[:req] + error = options[:error] + + string_block = ["#{Time.now}"] + string_block << " #{text}" if text + string_block << " (#{request_title(req)})" if request_parsed?(req) + string_block << ": #{error.inspect}" if error + string_block.join('') + end + + def request_dump(req) + "Headers: #{request_headers(req)}\n" \ + "Body: #{req.body}" + end + + def request_title(req) + env = req.env + + REQUEST_FORMAT % [ + env[REQUEST_METHOD], + env[REQUEST_PATH] || env[PATH_INFO], + env[QUERY_STRING] || "", + env[HTTP_X_FORWARDED_FOR] || env[REMOTE_ADDR] || "-" + ] + end + + def request_headers(req) + headers = req.env.select { |key, _| key.start_with?('HTTP_') } + headers.map { |key, value| [key[5..-1], value] }.to_h.inspect + end + + def request_parsed?(req) + req && req.env[REQUEST_METHOD] + end + + def internal_write(str) + LOG_QUEUE << str + while (w_str = LOG_QUEUE.pop(true)) do + begin + @ioerr.is_a?(IO) and @ioerr.wait_writable(1) + @ioerr.write "#{w_str}\n" + @ioerr.flush unless @ioerr.sync + rescue Errno::EPIPE, Errno::EBADF, IOError, Errno::EINVAL + # 'Invalid argument' (Errno::EINVAL) may be raised by flush + end + end + rescue ThreadError + end + private :internal_write + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/events.rb b/vendor/cache/puma-fba741b91780/lib/puma/events.rb new file mode 100644 index 000000000..3ada3e130 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/events.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +module Puma + + # This is an event sink used by `Puma::Server` to handle + # lifecycle events such as :on_booted, :on_restart, and :on_stopped. + # Using `Puma::DSL` it is possible to register callback hooks + # for each event type. + class Events + + def initialize + @hooks = Hash.new { |h,k| h[k] = [] } + end + + # Fire callbacks for the named hook + def fire(hook, *args) + @hooks[hook].each { |t| t.call(*args) } + end + + # Register a callback for a given hook + def register(hook, obj=nil, &blk) + if obj and blk + raise "Specify either an object or a block, not both" + end + + h = obj || blk + + @hooks[hook] << h + + h + end + + def on_booted(&block) + register(:on_booted, &block) + end + + def on_restart(&block) + register(:on_restart, &block) + end + + def on_stopped(&block) + register(:on_stopped, &block) + end + + def fire_on_booted! + fire(:on_booted) + end + + def fire_on_restart! + fire(:on_restart) + end + + def fire_on_stopped! + fire(:on_stopped) + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/io_buffer.rb b/vendor/cache/puma-fba741b91780/lib/puma/io_buffer.rb new file mode 100644 index 000000000..d2265e976 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/io_buffer.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require 'stringio' + +module Puma + class IOBuffer < StringIO + def initialize + super.binmode + end + + def empty? + length.zero? + end + + def reset + truncate 0 + rewind + end + + def to_s + rewind + read + end + + # Read & Reset - returns contents and resets + # @return [String] StringIO contents + def read_and_reset + rewind + str = read + truncate 0 + rewind + str + end + + alias_method :clear, :reset + + # before Ruby 2.5, `write` would only take one argument + if RUBY_VERSION >= '2.5' && RUBY_ENGINE != 'truffleruby' + alias_method :append, :write + else + def append(*strs) + strs.each { |str| write str } + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/jruby_restart.rb b/vendor/cache/puma-fba741b91780/lib/puma/jruby_restart.rb new file mode 100644 index 000000000..48c410e33 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/jruby_restart.rb @@ -0,0 +1,11 @@ +# frozen_string_literal: true + +require 'ffi' + +module Puma + module JRubyRestart + extend FFI::Library + ffi_lib 'c' + attach_function :chdir, [:string], :int + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/json_serialization.rb b/vendor/cache/puma-fba741b91780/lib/puma/json_serialization.rb new file mode 100644 index 000000000..94cad5c1a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/json_serialization.rb @@ -0,0 +1,96 @@ +# frozen_string_literal: true +require 'stringio' + +module Puma + + # Puma deliberately avoids the use of the json gem and instead performs JSON + # serialization without any external dependencies. In a puma cluster, loading + # any gem into the puma master process means that operators cannot use a + # phased restart to upgrade their application if the new version of that + # application uses a different version of that gem. The json gem in + # particular is additionally problematic because it leverages native + # extensions. If the puma master process relies on a gem with native + # extensions and operators remove gems from disk related to old releases, + # subsequent phased restarts can fail. + # + # The implementation of JSON serialization in this module is not designed to + # be particularly full-featured or fast. It just has to handle the few places + # where Puma relies on JSON serialization internally. + + module JSONSerialization + QUOTE = /"/ + BACKSLASH = /\\/ + CONTROL_CHAR_TO_ESCAPE = /[\x00-\x1F]/ # As required by ECMA-404 + CHAR_TO_ESCAPE = Regexp.union QUOTE, BACKSLASH, CONTROL_CHAR_TO_ESCAPE + + class SerializationError < StandardError; end + + class << self + def generate(value) + StringIO.open do |io| + serialize_value io, value + io.string + end + end + + private + + def serialize_value(output, value) + case value + when Hash + output << '{' + value.each_with_index do |(k, v), index| + output << ',' if index != 0 + serialize_object_key output, k + output << ':' + serialize_value output, v + end + output << '}' + when Array + output << '[' + value.each_with_index do |member, index| + output << ',' if index != 0 + serialize_value output, member + end + output << ']' + when Integer, Float + output << value.to_s + when String + serialize_string output, value + when true + output << 'true' + when false + output << 'false' + when nil + output << 'null' + else + raise SerializationError, "Unexpected value of type #{value.class}" + end + end + + def serialize_string(output, value) + output << '"' + output << value.gsub(CHAR_TO_ESCAPE) do |character| + case character + when BACKSLASH + '\\\\' + when QUOTE + '\\"' + when CONTROL_CHAR_TO_ESCAPE + '\u%.4X' % character.ord + end + end + output << '"' + end + + def serialize_object_key(output, value) + case value + when Symbol, String + serialize_string output, value.to_s + else + raise SerializationError, "Could not serialize object of type #{value.class} as object key" + end + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/launcher.rb b/vendor/cache/puma-fba741b91780/lib/puma/launcher.rb new file mode 100644 index 000000000..7e5a601b3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/launcher.rb @@ -0,0 +1,488 @@ +# frozen_string_literal: true + +require_relative 'log_writer' +require_relative 'events' +require_relative 'detect' +require_relative 'cluster' +require_relative 'single' +require_relative 'const' +require_relative 'binder' + +module Puma + # Puma::Launcher is the single entry point for starting a Puma server based on user + # configuration. It is responsible for taking user supplied arguments and resolving them + # with configuration in `config/puma.rb` or `config/puma/.rb`. + # + # It is responsible for either launching a cluster of Puma workers or a single + # puma server. + class Launcher + autoload :BundlePruner, 'puma/launcher/bundle_pruner' + + # Returns an instance of Launcher + # + # +conf+ A Puma::Configuration object indicating how to run the server. + # + # +launcher_args+ A Hash that currently has one required key `:events`, + # this is expected to hold an object similar to an `Puma::LogWriter.stdio`, + # this object will be responsible for broadcasting Puma's internal state + # to a logging destination. An optional key `:argv` can be supplied, + # this should be an array of strings, these arguments are re-used when + # restarting the puma server. + # + # Examples: + # + # conf = Puma::Configuration.new do |user_config| + # user_config.threads 1, 10 + # user_config.app do |env| + # [200, {}, ["hello world"]] + # end + # end + # Puma::Launcher.new(conf, log_writer: Puma::LogWriter.stdio).run + def initialize(conf, launcher_args={}) + @runner = nil + @log_writer = launcher_args[:log_writer] || LogWriter::DEFAULT + @events = launcher_args[:events] || Events.new + @argv = launcher_args[:argv] || [] + @original_argv = @argv.dup + @config = conf + + env = launcher_args.delete(:env) || ENV + + @config.options[:log_writer] = @log_writer + + # Advertise the Configuration + Puma.cli_config = @config if defined?(Puma.cli_config) + + @config.load + + @binder = Binder.new(@log_writer, conf) + @binder.create_inherited_fds(ENV).each { |k| ENV.delete k } + @binder.create_activated_fds(ENV).each { |k| ENV.delete k } + + @environment = conf.environment + + # Load the systemd integration if we detect systemd's NOTIFY_SOCKET. + # Skip this on JRuby though, because it is incompatible with the systemd + # integration due to https://github.com/jruby/jruby/issues/6504 + if ENV["NOTIFY_SOCKET"] && !Puma.jruby? && !ENV["PUMA_SKIP_SYSTEMD"] + @config.plugins.create('systemd') + end + + if @config.options[:bind_to_activated_sockets] + @config.options[:binds] = @binder.synthesize_binds_from_activated_fs( + @config.options[:binds], + @config.options[:bind_to_activated_sockets] == 'only' + ) + end + + @options = @config.options + @config.clamp + + @log_writer.formatter = LogWriter::PidFormatter.new if clustered? + @log_writer.formatter = options[:log_formatter] if @options[:log_formatter] + + @log_writer.custom_logger = options[:custom_logger] if @options[:custom_logger] + + generate_restart_data + + if clustered? && !Puma.forkable? + unsupported "worker mode not supported on #{RUBY_ENGINE} on this platform" + end + + Dir.chdir(@restart_dir) + + prune_bundler! + + @environment = @options[:environment] if @options[:environment] + set_rack_environment + + if clustered? + @options[:logger] = @log_writer + + @runner = Cluster.new(self) + else + @runner = Single.new(self) + end + Puma.stats_object = @runner + + @status = :run + + log_config if env['PUMA_LOG_CONFIG'] + end + + attr_reader :binder, :log_writer, :events, :config, :options, :restart_dir + + # Return stats about the server + def stats + @runner.stats + end + + # Write a state file that can be used by pumactl to control + # the server + def write_state + write_pid + + path = @options[:state] + permission = @options[:state_permission] + return unless path + + require_relative 'state_file' + + sf = StateFile.new + sf.pid = Process.pid + sf.control_url = @options[:control_url] + sf.control_auth_token = @options[:control_auth_token] + sf.running_from = File.expand_path('.') + + sf.save path, permission + end + + # Delete the configured pidfile + def delete_pidfile + path = @options[:pidfile] + File.unlink(path) if path && File.exist?(path) + end + + # Begin async shutdown of the server + def halt + @status = :halt + @runner.halt + end + + # Begin async shutdown of the server gracefully + def stop + @status = :stop + @runner.stop + end + + # Begin async restart of the server + def restart + @status = :restart + @runner.restart + end + + # Begin a phased restart if supported + def phased_restart + unless @runner.respond_to?(:phased_restart) and @runner.phased_restart + log "* phased-restart called but not available, restarting normally." + return restart + end + true + end + + # Begin a refork if supported + def refork + if clustered? && @runner.respond_to?(:fork_worker!) && @options[:fork_worker] + @runner.fork_worker! + true + else + log "* refork called but not available." + false + end + end + + # Run the server. This blocks until the server is stopped + def run + previous_env = get_env + + @config.clamp + + @config.plugins.fire_starts self + + setup_signals + set_process_title + + # This blocks until the server is stopped + @runner.run + + do_run_finished(previous_env) + end + + # Return all tcp ports the launcher may be using, TCP or SSL + # @!attribute [r] connected_ports + # @version 5.0.0 + def connected_ports + @binder.connected_ports + end + + # @!attribute [r] restart_args + def restart_args + cmd = @options[:restart_cmd] + if cmd + cmd.split(' ') + @original_argv + else + @restart_argv + end + end + + def close_binder_listeners + @runner.close_control_listeners + @binder.close_listeners + unless @status == :restart + log "=== puma shutdown: #{Time.now} ===" + log "- Goodbye!" + end + end + + # @!attribute [r] thread_status + # @version 5.0.0 + def thread_status + Thread.list.each do |thread| + name = "Thread: TID-#{thread.object_id.to_s(36)}" + name += " #{thread['label']}" if thread['label'] + name += " #{thread.name}" if thread.respond_to?(:name) && thread.name + backtrace = thread.backtrace || [""] + + yield name, backtrace + end + end + + private + + def get_env + if defined?(Bundler) + env = Bundler::ORIGINAL_ENV.dup + # add -rbundler/setup so we load from Gemfile when restarting + bundle = "-rbundler/setup" + env["RUBYOPT"] = [env["RUBYOPT"], bundle].join(" ").lstrip unless env["RUBYOPT"].to_s.include?(bundle) + env + else + ENV.to_h + end + end + + def do_run_finished(previous_env) + case @status + when :halt + do_forceful_stop + when :run, :stop + do_graceful_stop + when :restart + do_restart(previous_env) + end + + close_binder_listeners unless @status == :restart + end + + def do_forceful_stop + log "* Stopping immediately!" + @runner.stop_control + end + + def do_graceful_stop + @events.fire_on_stopped! + @runner.stop_blocked + end + + def do_restart(previous_env) + log "* Restarting..." + ENV.replace(previous_env) + @runner.stop_control + restart! + end + + def restart! + @events.fire_on_restart! + @config.run_hooks :on_restart, self, @log_writer + + if Puma.jruby? + close_binder_listeners + + require_relative 'jruby_restart' + argv = restart_args + JRubyRestart.chdir(@restart_dir) + Kernel.exec(*argv) + elsif Puma.windows? + close_binder_listeners + + argv = restart_args + Dir.chdir(@restart_dir) + Kernel.exec(*argv) + else + argv = restart_args + Dir.chdir(@restart_dir) + ENV.update(@binder.redirects_for_restart_env) + argv += [@binder.redirects_for_restart] + Kernel.exec(*argv) + end + end + + # If configured, write the pid of the current process out + # to a file. + def write_pid + path = @options[:pidfile] + return unless path + cur_pid = Process.pid + File.write path, cur_pid, mode: 'wb:UTF-8' + at_exit do + delete_pidfile if cur_pid == Process.pid + end + end + + def reload_worker_directory + @runner.reload_worker_directory if @runner.respond_to?(:reload_worker_directory) + end + + def log(str) + @log_writer.log(str) + end + + def clustered? + (@options[:workers] || 0) > 0 + end + + def unsupported(str) + @log_writer.error(str) + raise UnsupportedOption + end + + def set_process_title + Process.respond_to?(:setproctitle) ? Process.setproctitle(title) : $0 = title + end + + # @!attribute [r] title + def title + buffer = "puma #{Puma::Const::VERSION} (#{@options[:binds].join(',')})" + buffer += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty? + buffer + end + + def set_rack_environment + @options[:environment] = environment + ENV['RACK_ENV'] = environment + end + + # @!attribute [r] environment + def environment + @environment + end + + def prune_bundler? + @options[:prune_bundler] && clustered? && !@options[:preload_app] + end + + def prune_bundler! + return unless prune_bundler? + BundlePruner.new(@original_argv, @options[:extra_runtime_dependencies], @log_writer).prune + end + + def generate_restart_data + if dir = @options[:directory] + @restart_dir = dir + + elsif Puma.windows? + # I guess the value of PWD is garbage on windows so don't bother + # using it. + @restart_dir = Dir.pwd + + # Use the same trick as unicorn, namely favor PWD because + # it will contain an unresolved symlink, useful for when + # the pwd is /data/releases/current. + elsif dir = ENV['PWD'] + s_env = File.stat(dir) + s_pwd = File.stat(Dir.pwd) + + if s_env.ino == s_pwd.ino and (Puma.jruby? or s_env.dev == s_pwd.dev) + @restart_dir = dir + end + end + + @restart_dir ||= Dir.pwd + + # if $0 is a file in the current directory, then restart + # it the same, otherwise add -S on there because it was + # picked up in PATH. + # + if File.exist?($0) + arg0 = [Gem.ruby, $0] + else + arg0 = [Gem.ruby, "-S", $0] + end + + # Detect and reinject -Ilib from the command line, used for testing without bundler + # cruby has an expanded path, jruby has just "lib" + lib = File.expand_path "lib" + arg0[1,0] = ["-I", lib] if [lib, "lib"].include?($LOAD_PATH[0]) + + if defined? Puma::WILD_ARGS + @restart_argv = arg0 + Puma::WILD_ARGS + @original_argv + else + @restart_argv = arg0 + @original_argv + end + end + + def setup_signals + begin + Signal.trap "SIGUSR2" do + restart + end + rescue Exception + log "*** SIGUSR2 not implemented, signal based restart unavailable!" + end + + unless Puma.jruby? + begin + Signal.trap "SIGUSR1" do + phased_restart + end + rescue Exception + log "*** SIGUSR1 not implemented, signal based restart unavailable!" + end + end + + begin + Signal.trap "SIGTERM" do + # Shortcut the control flow in case raise_exception_on_sigterm is true + do_graceful_stop + + raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm] + end + rescue Exception + log "*** SIGTERM not implemented, signal based gracefully stopping unavailable!" + end + + begin + Signal.trap "SIGINT" do + stop + end + rescue Exception + log "*** SIGINT not implemented, signal based gracefully stopping unavailable!" + end + + begin + Signal.trap "SIGHUP" do + if @runner.redirected_io? + @runner.redirect_io + else + stop + end + end + rescue Exception + log "*** SIGHUP not implemented, signal based logs reopening unavailable!" + end + + begin + unless Puma.jruby? # INFO in use by JVM already + Signal.trap "SIGINFO" do + thread_status do |name, backtrace| + @log_writer.log(name) + @log_writer.log(backtrace.map { |bt| " #{bt}" }) + end + end + end + rescue Exception + # Not going to log this one, as SIGINFO is *BSD only and would be pretty annoying + # to see this constantly on Linux. + end + end + + def log_config + log "Configuration:" + + @config.final_options + .each { |config_key, value| log "- #{config_key}: #{value}" } + + log "\n" + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/launcher/bundle_pruner.rb b/vendor/cache/puma-fba741b91780/lib/puma/launcher/bundle_pruner.rb new file mode 100644 index 000000000..1b5c91161 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/launcher/bundle_pruner.rb @@ -0,0 +1,104 @@ +# frozen_string_literal: true + +module Puma + class Launcher + + # This class is used to pickup Gemfile changes during + # application restarts. + class BundlePruner + + def initialize(original_argv, extra_runtime_dependencies, log_writer) + @original_argv = Array(original_argv) + @extra_runtime_dependencies = Array(extra_runtime_dependencies) + @log_writer = log_writer + end + + def prune + return if ENV['PUMA_BUNDLER_PRUNED'] + return unless defined?(Bundler) + + require_rubygems_min_version! + + unless puma_wild_path + log "! Unable to prune Bundler environment, continuing" + return + end + + dirs = paths_to_require_after_prune + + log '* Pruning Bundler environment' + home = ENV['GEM_HOME'] + bundle_gemfile = Bundler.original_env['BUNDLE_GEMFILE'] + bundle_app_config = Bundler.original_env['BUNDLE_APP_CONFIG'] + + with_unbundled_env do + ENV['GEM_HOME'] = home + ENV['BUNDLE_GEMFILE'] = bundle_gemfile + ENV['PUMA_BUNDLER_PRUNED'] = '1' + ENV["BUNDLE_APP_CONFIG"] = bundle_app_config + args = [Gem.ruby, puma_wild_path, '-I', dirs.join(':')] + @original_argv + # Ruby 2.0+ defaults to true which breaks socket activation + args += [{:close_others => false}] + Kernel.exec(*args) + end + end + + private + + def require_rubygems_min_version! + min_version = Gem::Version.new('2.2') + + return if min_version <= Gem::Version.new(Gem::VERSION) + + raise "prune_bundler is not supported on your version of RubyGems. " \ + "You must have RubyGems #{min_version}+ to use this feature." + end + + def puma_wild_path + puma_lib_dir = puma_require_paths.detect { |x| File.exist? File.join(x, '../bin/puma-wild') } + File.expand_path(File.join(puma_lib_dir, '../bin/puma-wild')) + end + + def with_unbundled_env + bundler_ver = Gem::Version.new(Bundler::VERSION) + if bundler_ver < Gem::Version.new('2.1.0') + Bundler.with_clean_env { yield } + else + Bundler.with_unbundled_env { yield } + end + end + + def paths_to_require_after_prune + puma_require_paths + extra_runtime_deps_paths + end + + def extra_runtime_deps_paths + t = @extra_runtime_dependencies.map do |dep_name| + if (spec = spec_for_gem(dep_name)) + require_paths_for_gem(spec) + else + log "* Could not load extra dependency: #{dep_name}" + nil + end + end + t.flatten!; t.compact!; t + end + + def puma_require_paths + require_paths_for_gem(spec_for_gem('puma')) + end + + def spec_for_gem(gem_name) + Bundler.rubygems.loaded_specs(gem_name) + end + + def require_paths_for_gem(gem_spec) + gem_spec.full_require_paths + end + + def log(str) + @log_writer.log(str) + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/log_writer.rb b/vendor/cache/puma-fba741b91780/lib/puma/log_writer.rb new file mode 100644 index 000000000..9a1eb9dc7 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/log_writer.rb @@ -0,0 +1,147 @@ +# frozen_string_literal: true + +require_relative 'null_io' +require_relative 'error_logger' +require 'stringio' +require 'io/wait' unless Puma::HAS_NATIVE_IO_WAIT + +module Puma + + # Handles logging concerns for both standard messages + # (+stdout+) and errors (+stderr+). + class LogWriter + + class DefaultFormatter + def call(str) + str + end + end + + class PidFormatter + def call(str) + "[#{$$}] #{str}" + end + end + + LOG_QUEUE = Queue.new + + attr_reader :stdout, + :stderr + + attr_accessor :formatter, :custom_logger + + # Create a LogWriter that prints to +stdout+ and +stderr+. + def initialize(stdout, stderr, env: ENV) + @formatter = DefaultFormatter.new + @custom_logger = nil + @stdout = stdout + @stderr = stderr + + @debug = env.key?('PUMA_DEBUG') + @error_logger = ErrorLogger.new(@stderr, env: env) + end + + DEFAULT = new(STDOUT, STDERR) + + # Returns an LogWriter object which writes its status to + # two StringIO objects. + def self.strings(env: ENV) + LogWriter.new(StringIO.new, StringIO.new, env: env) + end + + def self.stdio(env: ENV) + LogWriter.new($stdout, $stderr, env: env) + end + + def self.null(env: ENV) + n = NullIO.new + LogWriter.new(n, n, env: env) + end + + # Write +str+ to +@stdout+ + def log(str) + if @custom_logger&.respond_to?(:write) + @custom_logger.write(format(str)) + else + internal_write "#{@formatter.call str}\n" + end + end + + def write(str) + internal_write @formatter.call(str) + end + + def internal_write(str) + LOG_QUEUE << str + while (w_str = LOG_QUEUE.pop(true)) do + begin + @stdout.is_a?(IO) and @stdout.wait_writable(1) + @stdout.write w_str + @stdout.flush unless @stdout.sync + rescue Errno::EPIPE, Errno::EBADF, IOError, Errno::EINVAL + # 'Invalid argument' (Errno::EINVAL) may be raised by flush + end + end + rescue ThreadError + end + private :internal_write + + def debug? + @debug + end + + def debug(str) + log("% #{str}") if @debug + end + + # Write +str+ to +@stderr+ + def error(str) + @error_logger.info(text: @formatter.call("ERROR: #{str}")) + exit 1 + end + + def format(str) + formatter.call(str) + end + + # An HTTP connection error has occurred. + # +error+ a connection exception, +req+ the request, + # and +text+ additional info + # @version 5.0.0 + def connection_error(error, req, text="HTTP connection error") + @error_logger.info(error: error, req: req, text: text) + end + + # An HTTP parse error has occurred. + # +error+ a parsing exception, + # and +req+ the request. + def parse_error(error, req) + @error_logger.info(error: error, req: req, text: 'HTTP parse error, malformed request') + end + + # An SSL error has occurred. + # @param error + # @param ssl_socket + def ssl_error(error, ssl_socket) + peeraddr = ssl_socket.peeraddr.last rescue "" + peercert = ssl_socket.peercert + subject = peercert&.subject + @error_logger.info(error: error, text: "SSL error, peer: #{peeraddr}, peer cert: #{subject}") + end + + # An unknown error has occurred. + # +error+ an exception object, +req+ the request, + # and +text+ additional info + def unknown_error(error, req=nil, text="Unknown error") + @error_logger.info(error: error, req: req, text: text) + end + + # Log occurred error debug dump. + # +error+ an exception object, +req+ the request, + # and +text+ additional info + # @version 5.0.0 + def debug_error(error, req=nil, text="") + @error_logger.debug(error: error, req: req, text: text) + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/minissl.rb b/vendor/cache/puma-fba741b91780/lib/puma/minissl.rb new file mode 100644 index 000000000..0ff2ce85d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/minissl.rb @@ -0,0 +1,459 @@ +# frozen_string_literal: true + +begin + require 'io/wait' unless Puma::HAS_NATIVE_IO_WAIT +rescue LoadError +end + +require 'open3' +# need for Puma::MiniSSL::OPENSSL constants used in `HAS_TLS1_3` +# use require, see https://github.com/puma/puma/pull/2381 +require 'puma/puma_http11' + +module Puma + module MiniSSL + # Define constant at runtime, as it's easy to determine at built time, + # but Puma could (it shouldn't) be loaded with an older OpenSSL version + # @version 5.0.0 + HAS_TLS1_3 = IS_JRUBY || + ((OPENSSL_VERSION[/ \d+\.\d+\.\d+/].split('.').map(&:to_i) <=> [1,1,1]) != -1 && + (OPENSSL_LIBRARY_VERSION[/ \d+\.\d+\.\d+/].split('.').map(&:to_i) <=> [1,1,1]) !=-1) + + class Socket + def initialize(socket, engine) + @socket = socket + @engine = engine + @peercert = nil + @reuse = nil + end + + # @!attribute [r] to_io + def to_io + @socket + end + + def closed? + @socket.closed? + end + + # Returns a two element array, + # first is protocol version (SSL_get_version), + # second is 'handshake' state (SSL_state_string) + # + # Used for dropping tcp connections to ssl. + # See OpenSSL ssl/ssl_stat.c SSL_state_string for info + # @!attribute [r] ssl_version_state + # @version 5.0.0 + # + def ssl_version_state + IS_JRUBY ? [nil, nil] : @engine.ssl_vers_st + end + + # Used to check the handshake status, in particular when a TCP connection + # is made with TLSv1.3 as an available protocol + # @version 5.0.0 + def bad_tlsv1_3? + HAS_TLS1_3 && ssl_version_state == ['TLSv1.3', 'SSLERR'] + end + private :bad_tlsv1_3? + + def readpartial(size) + while true + output = @engine.read + return output if output + + data = @socket.readpartial(size) + @engine.inject(data) + output = @engine.read + + return output if output + + while neg_data = @engine.extract + @socket.write neg_data + end + end + end + + def engine_read_all + output = @engine.read + while output and additional_output = @engine.read + output << additional_output + end + output + end + + def read_nonblock(size, *_) + # *_ is to deal with keyword args that were added + # at some point (and being used in the wild) + while true + output = engine_read_all + return output if output + + data = @socket.read_nonblock(size, exception: false) + if data == :wait_readable || data == :wait_writable + # It would make more sense to let @socket.read_nonblock raise + # EAGAIN if necessary but it seems like it'll misbehave on Windows. + # I don't have a Windows machine to debug this so I can't explain + # exactly whats happening in that OS. Please let me know if you + # find out! + # + # In the meantime, we can emulate the correct behavior by + # capturing :wait_readable & :wait_writable and raising EAGAIN + # ourselves. + raise IO::EAGAINWaitReadable + elsif data.nil? + raise SSLError.exception "HTTP connection?" if bad_tlsv1_3? + return nil + end + + @engine.inject(data) + output = engine_read_all + + return output if output + + while neg_data = @engine.extract + @socket.write neg_data + end + end + end + + def write(data) + return 0 if data.empty? + + data_size = data.bytesize + need = data_size + + while true + wrote = @engine.write data + + enc_wr = +'' + while (enc = @engine.extract) + enc_wr << enc + end + @socket.write enc_wr unless enc_wr.empty? + + need -= wrote + + return data_size if need == 0 + + data = data.byteslice(wrote..-1) + end + end + + alias_method :syswrite, :write + alias_method :<<, :write + + # This is a temporary fix to deal with websockets code using + # write_nonblock. + + # The problem with implementing it properly + # is that it means we'd have to have the ability to rewind + # an engine because after we write+extract, the socket + # write_nonblock call might raise an exception and later + # code would pass the same data in, but the engine would think + # it had already written the data in. + # + # So for the time being (and since write blocking is quite rare), + # go ahead and actually block in write_nonblock. + # + def write_nonblock(data, *_) + write data + end + + def flush + @socket.flush + end + + def close + begin + unless @engine.shutdown + while alert_data = @engine.extract + @socket.write alert_data + end + end + rescue IOError, SystemCallError + Puma::Util.purge_interrupt_queue + # nothing + ensure + @socket.close + end + end + + # @!attribute [r] peeraddr + def peeraddr + @socket.peeraddr + end + + # OpenSSL is loaded in `MiniSSL::ContextBuilder` when + # `MiniSSL::Context#verify_mode` is not `VERIFY_NONE`. + # When `VERIFY_NONE`, `MiniSSL::Engine#peercert` is nil, regardless of + # whether the client sends a cert. + # @return [OpenSSL::X509::Certificate, nil] + # @!attribute [r] peercert + def peercert + return @peercert if @peercert + + raw = @engine.peercert + return nil unless raw + + @peercert = OpenSSL::X509::Certificate.new raw + end + end + + if IS_JRUBY + OPENSSL_NO_SSL3 = false + OPENSSL_NO_TLS1 = false + end + + class Context + attr_accessor :verify_mode + attr_reader :no_tlsv1, :no_tlsv1_1 + + def initialize + @no_tlsv1 = false + @no_tlsv1_1 = false + @key = nil + @cert = nil + @key_pem = nil + @cert_pem = nil + @reuse = nil + @reuse_cache_size = nil + @reuse_timeout = nil + end + + def check_file(file, desc) + raise ArgumentError, "#{desc} file '#{file}' does not exist" unless File.exist? file + raise ArgumentError, "#{desc} file '#{file}' is not readable" unless File.readable? file + end + + if IS_JRUBY + # jruby-specific Context properties: java uses a keystore and password pair rather than a cert/key pair + attr_reader :keystore + attr_reader :keystore_type + attr_accessor :keystore_pass + attr_reader :truststore + attr_reader :truststore_type + attr_accessor :truststore_pass + attr_reader :cipher_suites + attr_reader :protocols + + def keystore=(keystore) + check_file keystore, 'Keystore' + @keystore = keystore + end + + def truststore=(truststore) + # NOTE: historically truststore was assumed the same as keystore, this is kept for backwards + # compatibility, to rely on JVM's trust defaults we allow setting `truststore = :default` + unless truststore.eql?(:default) + raise ArgumentError, "No such truststore file '#{truststore}'" unless File.exist?(truststore) + end + @truststore = truststore + end + + def keystore_type=(type) + raise ArgumentError, "Invalid keystore type: #{type.inspect}" unless ['pkcs12', 'jks', nil].include?(type) + @keystore_type = type + end + + def truststore_type=(type) + raise ArgumentError, "Invalid truststore type: #{type.inspect}" unless ['pkcs12', 'jks', nil].include?(type) + @truststore_type = type + end + + def cipher_suites=(list) + list = list.split(',').map(&:strip) if list.is_a?(String) + @cipher_suites = list + end + + # aliases for backwards compatibility + alias_method :ssl_cipher_list, :cipher_suites + alias_method :ssl_cipher_list=, :cipher_suites= + + def protocols=(list) + list = list.split(',').map(&:strip) if list.is_a?(String) + @protocols = list + end + + def check + raise "Keystore not configured" unless @keystore + # @truststore defaults to @keystore due backwards compatibility + end + + else + # non-jruby Context properties + attr_reader :key + attr_reader :key_password_command + attr_reader :cert + attr_reader :ca + attr_reader :cert_pem + attr_reader :key_pem + attr_accessor :ssl_cipher_filter + attr_accessor :ssl_ciphersuites + attr_accessor :verification_flags + + attr_reader :reuse, :reuse_cache_size, :reuse_timeout + + def key=(key) + check_file key, 'Key' + @key = key + end + + def key_password_command=(key_password_command) + @key_password_command = key_password_command + end + + def cert=(cert) + check_file cert, 'Cert' + @cert = cert + end + + def ca=(ca) + check_file ca, 'ca' + @ca = ca + end + + def cert_pem=(cert_pem) + raise ArgumentError, "'cert_pem' is not a String" unless cert_pem.is_a? String + @cert_pem = cert_pem + end + + def key_pem=(key_pem) + raise ArgumentError, "'key_pem' is not a String" unless key_pem.is_a? String + @key_pem = key_pem + end + + def check + raise "Key not configured" if @key.nil? && @key_pem.nil? + raise "Cert not configured" if @cert.nil? && @cert_pem.nil? + end + + # Executes the command to return the password needed to decrypt the key. + def key_password + raise "Key password command not configured" if @key_password_command.nil? + + stdout_str, stderr_str, status = Open3.capture3(@key_password_command) + + return stdout_str.chomp if status.success? + + raise "Key password failed with code #{status.exitstatus}: #{stderr_str}" + end + + # Controls session reuse. Allowed values are as follows: + # * 'off' - matches the behavior of Puma 5.6 and earlier. This is included + # in case reuse 'on' is made the default in future Puma versions. + # * 'dflt' - sets session reuse on, with OpenSSL default cache size of + # 20k and default timeout of 300 seconds. + # * 's,t' - where s and t are integer strings, for size and timeout. + # * 's' - where s is an integer strings for size. + # * ',t' - where t is an integer strings for timeout. + # + def reuse=(reuse_str) + case reuse_str + when 'off' + @reuse = nil + when 'dflt' + @reuse = true + when /\A\d+\z/ + @reuse = true + @reuse_cache_size = reuse_str.to_i + when /\A\d+,\d+\z/ + @reuse = true + size, time = reuse_str.split ',' + @reuse_cache_size = size.to_i + @reuse_timeout = time.to_i + when /\A,\d+\z/ + @reuse = true + @reuse_timeout = reuse_str.delete(',').to_i + end + end + end + + # disables TLSv1 + # @!attribute [w] no_tlsv1= + def no_tlsv1=(tlsv1) + raise ArgumentError, "Invalid value of no_tlsv1=" unless ['true', 'false', true, false].include?(tlsv1) + @no_tlsv1 = tlsv1 + end + + # disables TLSv1 and TLSv1.1. Overrides `#no_tlsv1=` + # @!attribute [w] no_tlsv1_1= + def no_tlsv1_1=(tlsv1_1) + raise ArgumentError, "Invalid value of no_tlsv1_1=" unless ['true', 'false', true, false].include?(tlsv1_1) + @no_tlsv1_1 = tlsv1_1 + end + + end + + VERIFY_NONE = 0 + VERIFY_PEER = 1 + VERIFY_FAIL_IF_NO_PEER_CERT = 2 + + # https://github.com/openssl/openssl/blob/master/include/openssl/x509_vfy.h.in + # /* Certificate verify flags */ + VERIFICATION_FLAGS = { + "USE_CHECK_TIME" => 0x2, + "CRL_CHECK" => 0x4, + "CRL_CHECK_ALL" => 0x8, + "IGNORE_CRITICAL" => 0x10, + "X509_STRICT" => 0x20, + "ALLOW_PROXY_CERTS" => 0x40, + "POLICY_CHECK" => 0x80, + "EXPLICIT_POLICY" => 0x100, + "INHIBIT_ANY" => 0x200, + "INHIBIT_MAP" => 0x400, + "NOTIFY_POLICY" => 0x800, + "EXTENDED_CRL_SUPPORT" => 0x1000, + "USE_DELTAS" => 0x2000, + "CHECK_SS_SIGNATURE" => 0x4000, + "TRUSTED_FIRST" => 0x8000, + "SUITEB_128_LOS_ONLY" => 0x10000, + "SUITEB_192_LOS" => 0x20000, + "SUITEB_128_LOS" => 0x30000, + "PARTIAL_CHAIN" => 0x80000, + "NO_ALT_CHAINS" => 0x100000, + "NO_CHECK_TIME" => 0x200000 + }.freeze + + class Server + def initialize(socket, ctx) + @socket = socket + @ctx = ctx + @eng_ctx = IS_JRUBY ? @ctx : SSLContext.new(ctx) + end + + def accept + @ctx.check + io = @socket.accept + engine = Engine.server @eng_ctx + Socket.new io, engine + end + + def accept_nonblock + @ctx.check + io = @socket.accept_nonblock + engine = Engine.server @eng_ctx + Socket.new io, engine + end + + # @!attribute [r] to_io + def to_io + @socket + end + + # @!attribute [r] addr + # @version 5.0.0 + def addr + @socket.addr + end + + def close + @socket.close unless @socket.closed? # closed? call is for Windows + end + + def closed? + @socket.closed? + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/minissl/context_builder.rb b/vendor/cache/puma-fba741b91780/lib/puma/minissl/context_builder.rb new file mode 100644 index 000000000..6abce102a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/minissl/context_builder.rb @@ -0,0 +1,96 @@ +module Puma + module MiniSSL + class ContextBuilder + def initialize(params, log_writer) + @params = params + @log_writer = log_writer + end + + def context + ctx = MiniSSL::Context.new + + if defined?(JRUBY_VERSION) + unless params['keystore'] + log_writer.error "Please specify the Java keystore via 'keystore='" + end + + ctx.keystore = params['keystore'] + + unless params['keystore-pass'] + log_writer.error "Please specify the Java keystore password via 'keystore-pass='" + end + + ctx.keystore_pass = params['keystore-pass'] + ctx.keystore_type = params['keystore-type'] + + if truststore = params['truststore'] + ctx.truststore = truststore.eql?('default') ? :default : truststore + ctx.truststore_pass = params['truststore-pass'] + ctx.truststore_type = params['truststore-type'] + end + + ctx.cipher_suites = params['cipher_suites'] || params['ssl_cipher_list'] + ctx.protocols = params['protocols'] if params['protocols'] + else + if params['key'].nil? && params['key_pem'].nil? + log_writer.error "Please specify the SSL key via 'key=' or 'key_pem='" + end + + ctx.key = params['key'] if params['key'] + ctx.key_pem = params['key_pem'] if params['key_pem'] + ctx.key_password_command = params['key_password_command'] if params['key_password_command'] + + if params['cert'].nil? && params['cert_pem'].nil? + log_writer.error "Please specify the SSL cert via 'cert=' or 'cert_pem='" + end + + ctx.cert = params['cert'] if params['cert'] + ctx.cert_pem = params['cert_pem'] if params['cert_pem'] + + if ['peer', 'force_peer'].include?(params['verify_mode']) + unless params['ca'] + log_writer.error "Please specify the SSL ca via 'ca='" + end + # needed for Puma::MiniSSL::Socket#peercert, env['puma.peercert'] + require 'openssl' + end + + ctx.ca = params['ca'] if params['ca'] + ctx.ssl_cipher_filter = params['ssl_cipher_filter'] if params['ssl_cipher_filter'] + ctx.ssl_ciphersuites = params['ssl_ciphersuites'] if params['ssl_ciphersuites'] && HAS_TLS1_3 + + ctx.reuse = params['reuse'] if params['reuse'] + end + + ctx.no_tlsv1 = params['no_tlsv1'] == 'true' + ctx.no_tlsv1_1 = params['no_tlsv1_1'] == 'true' + + if params['verify_mode'] + ctx.verify_mode = case params['verify_mode'] + when "peer" + MiniSSL::VERIFY_PEER + when "force_peer" + MiniSSL::VERIFY_PEER | MiniSSL::VERIFY_FAIL_IF_NO_PEER_CERT + when "none" + MiniSSL::VERIFY_NONE + else + log_writer.error "Please specify a valid verify_mode=" + MiniSSL::VERIFY_NONE + end + end + + if params['verification_flags'] + ctx.verification_flags = params['verification_flags'].split(','). + map { |flag| MiniSSL::VERIFICATION_FLAGS.fetch(flag) }. + inject { |sum, flag| sum ? sum | flag : flag } + end + + ctx + end + + private + + attr_reader :params, :log_writer + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/null_io.rb b/vendor/cache/puma-fba741b91780/lib/puma/null_io.rb new file mode 100644 index 000000000..13534cd49 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/null_io.rb @@ -0,0 +1,101 @@ +# frozen_string_literal: true + +module Puma + # Provides an IO-like object that always appears to contain no data. + # Used as the value for rack.input when the request has no body. + # + class NullIO + def gets + nil + end + + def string + "" + end + + def each + end + + def pos + 0 + end + + # Mimics IO#read with no data. + # + def read(length = nil, buffer = nil) + if length.to_i < 0 + raise ArgumentError, "(negative length #{length} given)" + end + + buffer = if buffer.nil? + "".b + else + String.try_convert(buffer) or raise TypeError, "no implicit conversion of #{buffer.class} into String" + end + buffer.clear + if length.to_i > 0 + nil + else + buffer + end + end + + def rewind + end + + def seek(pos, whence = 0) + raise ArgumentError, "negative length #{pos} given" if pos.negative? + 0 + end + + def close + end + + def size + 0 + end + + def eof? + true + end + + def sync + true + end + + def sync=(v) + end + + def puts(*ary) + end + + def write(*ary) + end + + def flush + self + end + + # This is used as singleton class, so can't have state. + def closed? + false + end + + def set_encoding(enc) + self + end + + # per rack spec + def external_encoding + Encoding::ASCII_8BIT + end + + def binmode + self + end + + def binmode? + true + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/plugin.rb b/vendor/cache/puma-fba741b91780/lib/puma/plugin.rb new file mode 100644 index 000000000..8a943b594 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/plugin.rb @@ -0,0 +1,111 @@ +# frozen_string_literal: true + +module Puma + class UnknownPlugin < RuntimeError; end + + class PluginLoader + def initialize + @instances = [] + end + + def create(name) + if cls = Plugins.find(name) + plugin = cls.new + @instances << plugin + return plugin + end + + raise UnknownPlugin, "File failed to register properly named plugin" + end + + def fire_starts(launcher) + @instances.each do |i| + if i.respond_to? :start + i.start(launcher) + end + end + end + end + + class PluginRegistry + def initialize + @plugins = {} + @background = [] + end + + def register(name, cls) + @plugins[name] = cls + end + + def find(name) + name = name.to_s + + if cls = @plugins[name] + return cls + end + + begin + require "puma/plugin/#{name}" + rescue LoadError + raise UnknownPlugin, "Unable to find plugin: #{name}" + end + + if cls = @plugins[name] + return cls + end + + raise UnknownPlugin, "file failed to register a plugin" + end + + def add_background(blk) + @background << blk + end + + def fire_background + @background.each_with_index do |b, i| + Thread.new do + Puma.set_thread_name "plgn bg #{i}" + b.call + end + end + end + end + + Plugins = PluginRegistry.new + + class Plugin + # Matches + # "C:/Ruby22/lib/ruby/gems/2.2.0/gems/puma-3.0.1/lib/puma/plugin/tmp_restart.rb:3:in `'" + # AS + # C:/Ruby22/lib/ruby/gems/2.2.0/gems/puma-3.0.1/lib/puma/plugin/tmp_restart.rb + CALLER_FILE = / + \A # start of string + .+ # file path (one or more characters) + (?= # stop previous match when + :\d+ # a colon is followed by one or more digits + :in # followed by a colon followed by in + ) + /x + + def self.extract_name(ary) + path = ary.first[CALLER_FILE] + + m = %r!puma/plugin/([^/]*)\.rb$!.match(path) + m[1] + end + + def self.create(&blk) + name = extract_name(caller) + + cls = Class.new(self) + + cls.class_eval(&blk) + + Plugins.register name, cls + end + + def in_background(&blk) + Plugins.add_background blk + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/plugin/systemd.rb b/vendor/cache/puma-fba741b91780/lib/puma/plugin/systemd.rb new file mode 100644 index 000000000..d6c4715af --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/plugin/systemd.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require_relative '../plugin' + +# Puma's systemd integration allows Puma to inform systemd: +# 1. when it has successfully started +# 2. when it is starting shutdown +# 3. periodically for a liveness check with a watchdog thread +# 4. periodically set the status +Puma::Plugin.create do + def start(launcher) + require_relative '../sd_notify' + + launcher.log_writer.log "* Enabling systemd notification integration" + + # hook_events + launcher.events.on_booted { Puma::SdNotify.ready } + launcher.events.on_stopped { Puma::SdNotify.stopping } + launcher.events.on_restart { Puma::SdNotify.reloading } + + # start watchdog + if Puma::SdNotify.watchdog? + ping_f = watchdog_sleep_time + + in_background do + launcher.log_writer.log "Pinging systemd watchdog every #{ping_f.round(1)} sec" + loop do + sleep ping_f + Puma::SdNotify.watchdog + end + end + end + + # start status loop + instance = self + sleep_time = 1.0 + in_background do + launcher.log_writer.log "Sending status to systemd every #{sleep_time.round(1)} sec" + + loop do + sleep sleep_time + # TODO: error handling? + Puma::SdNotify.status(instance.status) + end + end + end + + def status + if clustered? + messages = stats[:worker_status].map do |worker| + common_message(worker[:last_status]) + end.join(',') + + "Puma #{Puma::Const::VERSION}: cluster: #{booted_workers}/#{workers}, worker_status: [#{messages}]" + else + "Puma #{Puma::Const::VERSION}: worker: #{common_message(stats)}" + end + end + + private + + def watchdog_sleep_time + usec = Integer(ENV["WATCHDOG_USEC"]) + + sec_f = usec / 1_000_000.0 + # "It is recommended that a daemon sends a keep-alive notification message + # to the service manager every half of the time returned here." + sec_f / 2 + end + + def stats + Puma.stats_hash + end + + def clustered? + stats.has_key?(:workers) + end + + def workers + stats.fetch(:workers, 1) + end + + def booted_workers + stats.fetch(:booted_workers, 1) + end + + def common_message(stats) + "{ #{stats[:running]}/#{stats[:max_threads]} threads, #{stats[:pool_capacity]} available, #{stats[:backlog]} backlog }" + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/plugin/tmp_restart.rb b/vendor/cache/puma-fba741b91780/lib/puma/plugin/tmp_restart.rb new file mode 100644 index 000000000..5136023cc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/plugin/tmp_restart.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +require_relative '../plugin' + +Puma::Plugin.create do + def start(launcher) + path = File.join("tmp", "restart.txt") + + orig = nil + + # If we can't write to the path, then just don't bother with this plugin + begin + File.write(path, "") unless File.exist?(path) + orig = File.stat(path).mtime + rescue SystemCallError + return + end + + in_background do + while true + sleep 2 + + begin + mtime = File.stat(path).mtime + rescue SystemCallError + # If the file has disappeared, assume that means don't restart + else + if mtime > orig + launcher.restart + break + end + end + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/rack/builder.rb b/vendor/cache/puma-fba741b91780/lib/puma/rack/builder.rb new file mode 100644 index 000000000..2d73c607c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/rack/builder.rb @@ -0,0 +1,297 @@ +# frozen_string_literal: true + +module Puma +end + +module Puma::Rack + class Options + def parse!(args) + options = {} + opt_parser = OptionParser.new("", 24, ' ') do |opts| + opts.banner = "Usage: rackup [ruby options] [rack options] [rackup config]" + + opts.separator "" + opts.separator "Ruby options:" + + lineno = 1 + opts.on("-e", "--eval LINE", "evaluate a LINE of code") { |line| + eval line, TOPLEVEL_BINDING, "-e", lineno + lineno += 1 + } + + opts.on("-b", "--builder BUILDER_LINE", "evaluate a BUILDER_LINE of code as a builder script") { |line| + options[:builder] = line + } + + opts.on("-d", "--debug", "set debugging flags (set $DEBUG to true)") { + options[:debug] = true + } + opts.on("-w", "--warn", "turn warnings on for your script") { + options[:warn] = true + } + opts.on("-q", "--quiet", "turn off logging") { + options[:quiet] = true + } + + opts.on("-I", "--include PATH", + "specify $LOAD_PATH (may be used more than once)") { |path| + (options[:include] ||= []).concat(path.split(":")) + } + + opts.on("-r", "--require LIBRARY", + "require the library, before executing your script") { |library| + options[:require] = library + } + + opts.separator "" + opts.separator "Rack options:" + opts.on("-s", "--server SERVER", "serve using SERVER (thin/puma/webrick/mongrel)") { |s| + options[:server] = s + } + + opts.on("-o", "--host HOST", "listen on HOST (default: localhost)") { |host| + options[:Host] = host + } + + opts.on("-p", "--port PORT", "use PORT (default: 9292)") { |port| + options[:Port] = port + } + + opts.on("-O", "--option NAME[=VALUE]", "pass VALUE to the server as option NAME. If no VALUE, sets it to true. Run '#{$0} -s SERVER -h' to get a list of options for SERVER") { |name| + name, value = name.split('=', 2) + value = true if value.nil? + options[name.to_sym] = value + } + + opts.on("-E", "--env ENVIRONMENT", "use ENVIRONMENT for defaults (default: development)") { |e| + options[:environment] = e + } + + opts.on("-P", "--pid FILE", "file to store PID") { |f| + options[:pid] = ::File.expand_path(f) + } + + opts.separator "" + opts.separator "Common options:" + + opts.on_tail("-h", "-?", "--help", "Show this message") do + puts opts + puts handler_opts(options) + + exit + end + + opts.on_tail("--version", "Show version") do + puts "Rack #{Rack.version} (Release: #{Rack.release})" + exit + end + end + + begin + opt_parser.parse! args + rescue OptionParser::InvalidOption => e + warn e.message + abort opt_parser.to_s + end + + options[:config] = args.last if args.last + options + end + + def handler_opts(options) + begin + info = [] + server = Rack::Handler.get(options[:server]) || Rack::Handler.default(options) + if server&.respond_to?(:valid_options) + info << "" + info << "Server-specific options for #{server.name}:" + + has_options = false + server.valid_options.each do |name, description| + next if /^(Host|Port)[^a-zA-Z]/.match? name.to_s # ignore handler's host and port options, we do our own. + + info << " -O %-21s %s" % [name, description] + has_options = true + end + return "" if !has_options + end + info.join("\n") + rescue NameError + return "Warning: Could not find handler specified (#{options[:server] || 'default'}) to determine handler-specific options" + end + end + end + + # Rack::Builder implements a small DSL to iteratively construct Rack + # applications. + # + # Example: + # + # require 'rack/lobster' + # app = Rack::Builder.new do + # use Rack::CommonLogger + # use Rack::ShowExceptions + # map "/lobster" do + # use Rack::Lint + # run Rack::Lobster.new + # end + # end + # + # run app + # + # Or + # + # app = Rack::Builder.app do + # use Rack::CommonLogger + # run lambda { |env| [200, {'Content-Type' => 'text/plain'}, ['OK']] } + # end + # + # run app + # + # +use+ adds middleware to the stack, +run+ dispatches to an application. + # You can use +map+ to construct a Rack::URLMap in a convenient way. + + class Builder + def self.parse_file(config, opts = Options.new) + options = {} + if config =~ /\.ru$/ + cfgfile = ::File.read(config) + if cfgfile[/^#\\(.*)/] && opts + options = opts.parse! $1.split(/\s+/) + end + cfgfile.sub!(/^__END__\n.*\Z/m, '') + app = new_from_string cfgfile, config + else + require config + app = Object.const_get(::File.basename(config, '.rb').capitalize) + end + [app, options] + end + + def self.new_from_string(builder_script, file="(rackup)") + eval "Puma::Rack::Builder.new {\n" + builder_script + "\n}.to_app", + TOPLEVEL_BINDING, file, 0 + end + + def initialize(default_app = nil, &block) + @use, @map, @run, @warmup = [], nil, default_app, nil + + # Conditionally load rack now, so that any rack middlewares, + # etc are available. + begin + require 'rack' + rescue LoadError + end + + instance_eval(&block) if block + end + + def self.app(default_app = nil, &block) + self.new(default_app, &block).to_app + end + + # Specifies middleware to use in a stack. + # + # class Middleware + # def initialize(app) + # @app = app + # end + # + # def call(env) + # env["rack.some_header"] = "setting an example" + # @app.call(env) + # end + # end + # + # use Middleware + # run lambda { |env| [200, { "Content-Type" => "text/plain" }, ["OK"]] } + # + # All requests through to this application will first be processed by the middleware class. + # The +call+ method in this example sets an additional environment key which then can be + # referenced in the application if required. + def use(middleware, *args, &block) + if @map + mapping, @map = @map, nil + @use << proc { |app| generate_map app, mapping } + end + @use << proc { |app| middleware.new(app, *args, &block) } + end + + # Takes an argument that is an object that responds to #call and returns a Rack response. + # The simplest form of this is a lambda object: + # + # run lambda { |env| [200, { "Content-Type" => "text/plain" }, ["OK"]] } + # + # However this could also be a class: + # + # class Heartbeat + # def self.call(env) + # [200, { "Content-Type" => "text/plain" }, ["OK"]] + # end + # end + # + # run Heartbeat + def run(app) + @run = app + end + + # Takes a lambda or block that is used to warm-up the application. + # + # warmup do |app| + # client = Rack::MockRequest.new(app) + # client.get('/') + # end + # + # use SomeMiddleware + # run MyApp + def warmup(prc=nil, &block) + @warmup = prc || block + end + + # Creates a route within the application. + # + # Rack::Builder.app do + # map '/' do + # run Heartbeat + # end + # end + # + # The +use+ method can also be used here to specify middleware to run under a specific path: + # + # Rack::Builder.app do + # map '/' do + # use Middleware + # run Heartbeat + # end + # end + # + # This example includes a piece of middleware which will run before requests hit +Heartbeat+. + # + def map(path, &block) + @map ||= {} + @map[path] = block + end + + def to_app + app = @map ? generate_map(@run, @map) : @run + fail "missing run or map statement" unless app + app = @use.reverse.inject(app) { |a,e| e[a] } + @warmup&.call app + app + end + + def call(env) + to_app.call(env) + end + + private + + def generate_map(default_app, mapping) + require_relative 'urlmap' + + mapped = default_app ? {'/' => default_app} : {} + mapping.each { |r,b| mapped[r] = self.class.new(default_app, &b).to_app } + URLMap.new(mapped) + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/rack/urlmap.rb b/vendor/cache/puma-fba741b91780/lib/puma/rack/urlmap.rb new file mode 100644 index 000000000..baab29990 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/rack/urlmap.rb @@ -0,0 +1,93 @@ +# frozen_string_literal: true + +module Puma::Rack + # Rack::URLMap takes a hash mapping urls or paths to apps, and + # dispatches accordingly. Support for HTTP/1.1 host names exists if + # the URLs start with http:// or https://. + # + # URLMap modifies the SCRIPT_NAME and PATH_INFO such that the part + # relevant for dispatch is in the SCRIPT_NAME, and the rest in the + # PATH_INFO. This should be taken care of when you need to + # reconstruct the URL in order to create links. + # + # URLMap dispatches in such a way that the longest paths are tried + # first, since they are most specific. + + class URLMap + NEGATIVE_INFINITY = -1.0 / 0.0 + INFINITY = 1.0 / 0.0 + + def initialize(map = {}) + remap(map) + end + + def remap(map) + @mapping = map.map { |location, app| + if location =~ %r{\Ahttps?://(.*?)(/.*)} + host, location = $1, $2 + else + host = nil + end + + unless location[0] == ?/ + raise ArgumentError, "paths need to start with /" + end + + location = location.chomp('/') + match = Regexp.new("^#{Regexp.quote(location).gsub('/', '/+')}(.*)", Regexp::NOENCODING) + + [host, location, match, app] + }.sort_by do |(host, location, _, _)| + [host ? -host.size : INFINITY, -location.size] + end + end + + def call(env) + path = env['PATH_INFO'] + script_name = env['SCRIPT_NAME'] + http_host = env['HTTP_HOST'] + server_name = env['SERVER_NAME'] + server_port = env['SERVER_PORT'] + + is_same_server = casecmp?(http_host, server_name) || + casecmp?(http_host, "#{server_name}:#{server_port}") + + @mapping.each do |host, location, match, app| + unless casecmp?(http_host, host) \ + || casecmp?(server_name, host) \ + || (!host && is_same_server) + next + end + + next unless m = match.match(path.to_s) + + rest = m[1] + next unless !rest || rest.empty? || rest[0] == ?/ + + env['SCRIPT_NAME'] = (script_name + location) + env['PATH_INFO'] = rest + + return app.call(env) + end + + [404, {'Content-Type' => "text/plain", "X-Cascade" => "pass"}, ["Not Found: #{path}"]] + + ensure + env['PATH_INFO'] = path + env['SCRIPT_NAME'] = script_name + end + + private + def casecmp?(v1, v2) + # if both nil, or they're the same string + return true if v1 == v2 + + # if either are nil... (but they're not the same) + return false if v1.nil? + return false if v2.nil? + + # otherwise check they're not case-insensitive the same + v1.casecmp(v2).zero? + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/rack_default.rb b/vendor/cache/puma-fba741b91780/lib/puma/rack_default.rb new file mode 100644 index 000000000..fa7ea2edb --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/rack_default.rb @@ -0,0 +1,24 @@ +# frozen_string_literal: true + +require_relative '../rack/handler/puma' + +# rackup was removed in Rack 3, it is now a separate gem +if Object.const_defined? :Rackup + module Rackup + module Handler + def self.default(options = {}) + ::Rackup::Handler::Puma + end + end + end +elsif Object.const_defined?(:Rack) && Rack.release < '3' + module Rack + module Handler + def self.default(options = {}) + ::Rack::Handler::Puma + end + end + end +else + raise "Rack 3 must be used with the Rackup gem" +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/reactor.rb b/vendor/cache/puma-fba741b91780/lib/puma/reactor.rb new file mode 100644 index 000000000..1d74b4212 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/reactor.rb @@ -0,0 +1,125 @@ +# frozen_string_literal: true + +module Puma + class UnsupportedBackend < StandardError; end + + # Monitors a collection of IO objects, calling a block whenever + # any monitored object either receives data or times out, or when the Reactor shuts down. + # + # The waiting/wake up is performed with nio4r, which will use the appropriate backend (libev, + # Java NIO or just plain IO#select). The call to `NIO::Selector#select` will + # 'wakeup' any IO object that receives data. + # + # This class additionally tracks a timeout for every added object, + # and wakes up any object when its timeout elapses. + # + # The implementation uses a Queue to synchronize adding new objects from the internal select loop. + class Reactor + # Create a new Reactor to monitor IO objects added by #add. + # The provided block will be invoked when an IO has data available to read, + # its timeout elapses, or when the Reactor shuts down. + def initialize(backend, &block) + require 'nio' + valid_backends = [:auto, *::NIO::Selector.backends] + unless valid_backends.include?(backend) + raise ArgumentError.new("unsupported IO selector backend: #{backend} (available backends: #{valid_backends.join(', ')})") + end + + @selector = ::NIO::Selector.new(NIO::Selector.backends.delete(backend)) + @input = Queue.new + @timeouts = [] + @block = block + end + + # Run the internal select loop, using a background thread by default. + def run(background=true) + if background + @thread = Thread.new do + Puma.set_thread_name "reactor" + select_loop + end + else + select_loop + end + end + + # Add a new client to monitor. + # The object must respond to #timeout and #timeout_at. + # Returns false if the reactor is already shut down. + def add(client) + @input << client + @selector.wakeup + true + rescue ClosedQueueError, IOError # Ignore if selector is already closed + false + end + + # Shutdown the reactor, blocking until the background thread is finished. + def shutdown + @input.close + begin + @selector.wakeup + rescue IOError # Ignore if selector is already closed + end + @thread&.join + end + + private + + def select_loop + close_selector = true + begin + until @input.closed? && @input.empty? + # Wakeup any registered object that receives incoming data. + # Block until the earliest timeout or Selector#wakeup is called. + timeout = (earliest = @timeouts.first) && earliest.timeout + @selector.select(timeout) {|mon| wakeup!(mon.value)} + + # Wakeup all objects that timed out. + timed_out = @timeouts.take_while {|t| t.timeout == 0} + timed_out.each { |c| wakeup! c } + + unless @input.empty? + until @input.empty? + client = @input.pop + register(client) if client.io_ok? + end + @timeouts.sort_by!(&:timeout_at) + end + end + rescue StandardError => e + STDERR.puts "Error in reactor loop escaped: #{e.message} (#{e.class})" + STDERR.puts e.backtrace + + # NoMethodError may be rarely raised when calling @selector.select, which + # is odd. Regardless, it may continue for thousands of calls if retried. + # Also, when it raises, @selector.close also raises an error. + if NoMethodError === e + close_selector = false + else + retry + end + end + # Wakeup all remaining objects on shutdown. + @timeouts.each(&@block) + @selector.close if close_selector + end + + # Start monitoring the object. + def register(client) + @selector.register(client.to_io, :r).value = client + @timeouts << client + rescue ArgumentError + # unreadable clients raise error when processed by NIO + end + + # 'Wake up' a monitored object by calling the provided block. + # Stop monitoring the object if the block returns `true`. + def wakeup!(client) + if @block.call client + @selector.deregister client.to_io + @timeouts.delete client + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/request.rb b/vendor/cache/puma-fba741b91780/lib/puma/request.rb new file mode 100644 index 000000000..5f621599e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/request.rb @@ -0,0 +1,688 @@ +# frozen_string_literal: true + +module Puma + #———————————————————————— DO NOT USE — this class is for internal use only ——— + + + # The methods here are included in Server, but are separated into this file. + # All the methods here pertain to passing the request to the app, then + # writing the response back to the client. + # + # None of the methods here are called externally, with the exception of + # #handle_request, which is called in Server#process_client. + # @version 5.0.3 + # + module Request # :nodoc: + + # Single element array body: smaller bodies are written to io_buffer first, + # then a single write from io_buffer. Larger sizes are written separately. + # Also fixes max size of chunked file body read. + BODY_LEN_MAX = 1_024 * 256 + + # File body: smaller bodies are combined with io_buffer, then written to + # socket. Larger bodies are written separately using `copy_stream` + IO_BODY_MAX = 1_024 * 64 + + # Array body: elements are collected in io_buffer. When io_buffer's size + # exceeds value, they are written to the socket. + IO_BUFFER_LEN_MAX = 1_024 * 512 + + SOCKET_WRITE_ERR_MSG = "Socket timeout writing data" + + CUSTOM_STAT = 'CUSTOM' + + include Puma::Const + + # Takes the request contained in +client+, invokes the Rack application to construct + # the response and writes it back to +client.io+. + # + # It'll return +false+ when the connection is closed, this doesn't mean + # that the response wasn't successful. + # + # It'll return +:async+ if the connection remains open but will be handled + # elsewhere, i.e. the connection has been hijacked by the Rack application. + # + # Finally, it'll return +true+ on keep-alive connections. + # @param client [Puma::Client] + # @param requests [Integer] + # @return [Boolean,:async] + # + def handle_request(client, requests) + env = client.env + io_buffer = client.io_buffer + socket = client.io # io may be a MiniSSL::Socket + app_body = nil + + return false if closed_socket?(socket) + + if client.http_content_length_limit_exceeded + return prepare_response(413, {}, ["Payload Too Large"], requests, client) + end + + normalize_env env, client + + env[PUMA_SOCKET] = socket + + if env[HTTPS_KEY] && socket.peercert + env[PUMA_PEERCERT] = socket.peercert + end + + env[HIJACK_P] = true + env[HIJACK] = client + + env[RACK_INPUT] = client.body + env[RACK_URL_SCHEME] ||= default_server_port(env) == PORT_443 ? HTTPS : HTTP + + if @early_hints + env[EARLY_HINTS] = lambda { |headers| + begin + unless (str = str_early_hints headers).empty? + fast_write_str socket, "HTTP/1.1 103 Early Hints\r\n#{str}\r\n" + end + rescue ConnectionError => e + @log_writer.debug_error e + # noop, if we lost the socket we just won't send the early hints + end + } + end + + req_env_post_parse env + + # A rack extension. If the app writes #call'ables to this + # array, we will invoke them when the request is done. + # + env[RACK_AFTER_REPLY] ||= [] + + begin + if @supported_http_methods == :any || @supported_http_methods.key?(env[REQUEST_METHOD]) + status, headers, app_body = @thread_pool.with_force_shutdown do + @app.call(env) + end + else + @log_writer.log "Unsupported HTTP method used: #{env[REQUEST_METHOD]}" + status, headers, app_body = [501, {}, ["#{env[REQUEST_METHOD]} method is not supported"]] + end + + # app_body needs to always be closed, hold value in case lowlevel_error + # is called + res_body = app_body + + # full hijack, app called env['rack.hijack'] + return :async if client.hijacked + + status = status.to_i + + if status == -1 + unless headers.empty? and res_body == [] + raise "async response must have empty headers and body" + end + + return :async + end + rescue ThreadPool::ForceShutdown => e + @log_writer.unknown_error e, client, "Rack app" + @log_writer.log "Detected force shutdown of a thread" + + status, headers, res_body = lowlevel_error(e, env, 503) + rescue Exception => e + @log_writer.unknown_error e, client, "Rack app" + + status, headers, res_body = lowlevel_error(e, env, 500) + end + prepare_response(status, headers, res_body, requests, client) + ensure + io_buffer.reset + uncork_socket client.io + app_body.close if app_body.respond_to? :close + client&.tempfile_close + after_reply = env[RACK_AFTER_REPLY] || [] + begin + after_reply.each { |o| o.call } + rescue StandardError => e + @log_writer.debug_error e + end unless after_reply.empty? + end + + # Assembles the headers and prepares the body for actually sending the + # response via `#fast_write_response`. + # + # @param status [Integer] the status returned by the Rack application + # @param headers [Hash] the headers returned by the Rack application + # @param res_body [Array] the body returned by the Rack application or + # a call to `Server#lowlevel_error` + # @param requests [Integer] number of inline requests handled + # @param client [Puma::Client] + # @return [Boolean,:async] keep-alive status or `:async` + def prepare_response(status, headers, res_body, requests, client) + env = client.env + socket = client.io + io_buffer = client.io_buffer + + return false if closed_socket?(socket) + + # Close the connection after a reasonable number of inline requests + # if the server is at capacity and the listener has a new connection ready. + # This allows Puma to service connections fairly when the number + # of concurrent connections exceeds the size of the threadpool. + force_keep_alive = if @enable_keep_alives + requests < @max_fast_inline || + @thread_pool.busy_threads < @max_threads || + !client.listener.to_io.wait_readable(0) + else + # Always set force_keep_alive to false if the server has keep-alives not enabled. + false + end + + resp_info = str_headers(env, status, headers, res_body, io_buffer, force_keep_alive) + + close_body = false + response_hijack = nil + content_length = resp_info[:content_length] + keep_alive = resp_info[:keep_alive] + + if res_body.respond_to?(:each) && !resp_info[:response_hijack] + # below converts app_body into body, dependent on app_body's characteristics, and + # content_length will be set if it can be determined + if !content_length && !resp_info[:transfer_encoding] && status != 204 + if res_body.respond_to?(:to_ary) && (array_body = res_body.to_ary) && + array_body.is_a?(Array) + body = array_body.compact + content_length = body.sum(&:bytesize) + elsif res_body.is_a?(File) && res_body.respond_to?(:size) + body = res_body + content_length = body.size + elsif res_body.respond_to?(:to_path) && File.readable?(fn = res_body.to_path) + body = File.open fn, 'rb' + content_length = body.size + close_body = true + else + body = res_body + end + elsif !res_body.is_a?(::File) && res_body.respond_to?(:to_path) && + File.readable?(fn = res_body.to_path) + body = File.open fn, 'rb' + content_length = body.size + close_body = true + elsif !res_body.is_a?(::File) && res_body.respond_to?(:filename) && + res_body.respond_to?(:bytesize) && File.readable?(fn = res_body.filename) + # Sprockets::Asset + content_length = res_body.bytesize unless content_length + if (body_str = res_body.to_hash[:source]) + body = [body_str] + else # avoid each and use a File object + body = File.open fn, 'rb' + close_body = true + end + else + body = res_body + end + else + # partial hijack, from Rack spec: + # Servers must ignore the body part of the response tuple when the + # rack.hijack response header is present. + response_hijack = resp_info[:response_hijack] || res_body + end + + line_ending = LINE_END + + cork_socket socket + + if resp_info[:no_body] + # 101 (Switching Protocols) doesn't return here or have content_length, + # it should be using `response_hijack` + unless status == 101 + if content_length && status != 204 + io_buffer.append CONTENT_LENGTH_S, content_length.to_s, line_ending + end + + io_buffer << LINE_END + fast_write_str socket, io_buffer.read_and_reset + socket.flush + return keep_alive + end + else + if content_length + io_buffer.append CONTENT_LENGTH_S, content_length.to_s, line_ending + chunked = false + elsif !response_hijack && resp_info[:allow_chunked] + io_buffer << TRANSFER_ENCODING_CHUNKED + chunked = true + end + end + + io_buffer << line_ending + + # partial hijack, we write headers, then hand the socket to the app via + # response_hijack.call + if response_hijack + fast_write_str socket, io_buffer.read_and_reset + uncork_socket socket + response_hijack.call socket + return :async + end + + fast_write_response socket, body, io_buffer, chunked, content_length.to_i + body.close if close_body + keep_alive + end + + # @param env [Hash] see Puma::Client#env, from request + # @return [Puma::Const::PORT_443,Puma::Const::PORT_80] + # + def default_server_port(env) + if ['on', HTTPS].include?(env[HTTPS_KEY]) || env[HTTP_X_FORWARDED_PROTO].to_s[0...5] == HTTPS || env[HTTP_X_FORWARDED_SCHEME] == HTTPS || env[HTTP_X_FORWARDED_SSL] == "on" + PORT_443 + else + PORT_80 + end + end + + # Used to write 'early hints', 'no body' responses, 'hijacked' responses, + # and body segments (called by `fast_write_response`). + # Writes a string to a socket (normally `Client#io`) using `write_nonblock`. + # Large strings may not be written in one pass, especially if `io` is a + # `MiniSSL::Socket`. + # @param socket [#write_nonblock] the request/response socket + # @param str [String] the string written to the io + # @raise [ConnectionError] + # + def fast_write_str(socket, str) + n = 0 + byte_size = str.bytesize + while n < byte_size + begin + n += socket.write_nonblock(n.zero? ? str : str.byteslice(n..-1)) + rescue Errno::EAGAIN, Errno::EWOULDBLOCK + unless socket.wait_writable WRITE_TIMEOUT + raise ConnectionError, SOCKET_WRITE_ERR_MSG + end + retry + rescue Errno::EPIPE, SystemCallError, IOError + raise ConnectionError, SOCKET_WRITE_ERR_MSG + end + end + end + + # Used to write headers and body. + # Writes to a socket (normally `Client#io`) using `#fast_write_str`. + # Accumulates `body` items into `io_buffer`, then writes to socket. + # @param socket [#write] the response socket + # @param body [Enumerable, File] the body object + # @param io_buffer [Puma::IOBuffer] contains headers + # @param chunked [Boolean] + # @paramn content_length [Integer + # @raise [ConnectionError] + # + def fast_write_response(socket, body, io_buffer, chunked, content_length) + if body.is_a?(::File) && body.respond_to?(:read) + if chunked # would this ever happen? + while chunk = body.read(BODY_LEN_MAX) + io_buffer.append chunk.bytesize.to_s(16), LINE_END, chunk, LINE_END + end + fast_write_str socket, CLOSE_CHUNKED + else + if content_length <= IO_BODY_MAX + io_buffer.write body.read(content_length) + fast_write_str socket, io_buffer.read_and_reset + else + fast_write_str socket, io_buffer.read_and_reset + IO.copy_stream body, socket + end + end + elsif body.is_a?(::Array) && body.length == 1 + body_first = nil + # using body_first = body.first causes issues? + body.each { |str| body_first ||= str } + + if body_first.is_a?(::String) && body_first.bytesize < BODY_LEN_MAX + # smaller body, write to io_buffer first + io_buffer.write body_first + fast_write_str socket, io_buffer.read_and_reset + else + # large body, write both header & body to socket + fast_write_str socket, io_buffer.read_and_reset + fast_write_str socket, body_first + end + elsif body.is_a?(::Array) + # for array bodies, flush io_buffer to socket when size is greater than + # IO_BUFFER_LEN_MAX + if chunked + body.each do |part| + next if (byte_size = part.bytesize).zero? + io_buffer.append byte_size.to_s(16), LINE_END, part, LINE_END + if io_buffer.length > IO_BUFFER_LEN_MAX + fast_write_str socket, io_buffer.read_and_reset + end + end + io_buffer.write CLOSE_CHUNKED + else + body.each do |part| + next if part.bytesize.zero? + io_buffer.write part + if io_buffer.length > IO_BUFFER_LEN_MAX + fast_write_str socket, io_buffer.read_and_reset + end + end + end + # may write last body part for non-chunked, also headers if array is empty + fast_write_str(socket, io_buffer.read_and_reset) unless io_buffer.length.zero? + else + # for enum bodies + if chunked + empty_body = true + body.each do |part| + next if part.nil? || (byte_size = part.bytesize).zero? + empty_body = false + io_buffer.append byte_size.to_s(16), LINE_END, part, LINE_END + fast_write_str socket, io_buffer.read_and_reset + end + if empty_body + io_buffer << CLOSE_CHUNKED + fast_write_str socket, io_buffer.read_and_reset + else + fast_write_str socket, CLOSE_CHUNKED + end + else + fast_write_str socket, io_buffer.read_and_reset + body.each do |part| + next if part.bytesize.zero? + fast_write_str socket, part + end + end + end + socket.flush + rescue Errno::EAGAIN, Errno::EWOULDBLOCK + raise ConnectionError, SOCKET_WRITE_ERR_MSG + rescue Errno::EPIPE, SystemCallError, IOError + raise ConnectionError, SOCKET_WRITE_ERR_MSG + end + + private :fast_write_str, :fast_write_response + + # Given a Hash +env+ for the request read from +client+, add + # and fixup keys to comply with Rack's env guidelines. + # @param env [Hash] see Puma::Client#env, from request + # @param client [Puma::Client] only needed for Client#peerip + # + def normalize_env(env, client) + if host = env[HTTP_HOST] + # host can be a hostname, ipv4 or bracketed ipv6. Followed by an optional port. + if colon = host.rindex("]:") # IPV6 with port + env[SERVER_NAME] = host[0, colon+1] + env[SERVER_PORT] = host[colon+2, host.bytesize] + elsif !host.start_with?("[") && colon = host.index(":") # not hostname or IPV4 with port + env[SERVER_NAME] = host[0, colon] + env[SERVER_PORT] = host[colon+1, host.bytesize] + else + env[SERVER_NAME] = host + env[SERVER_PORT] = default_server_port(env) + end + else + env[SERVER_NAME] = LOCALHOST + env[SERVER_PORT] = default_server_port(env) + end + + unless env[REQUEST_PATH] + # it might be a dumbass full host request header + uri = begin + URI.parse(env[REQUEST_URI]) + rescue URI::InvalidURIError + raise Puma::HttpParserError + end + env[REQUEST_PATH] = uri.path + + # A nil env value will cause a LintError (and fatal errors elsewhere), + # so only set the env value if there actually is a value. + env[QUERY_STRING] = uri.query if uri.query + end + + env[PATH_INFO] = env[REQUEST_PATH].to_s # #to_s in case it's nil + + # From https://www.ietf.org/rfc/rfc3875 : + # "Script authors should be aware that the REMOTE_ADDR and + # REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9) + # may not identify the ultimate source of the request. + # They identify the client for the immediate request to the + # server; that client may be a proxy, gateway, or other + # intermediary acting on behalf of the actual source client." + # + + unless env.key?(REMOTE_ADDR) + begin + addr = client.peerip + rescue Errno::ENOTCONN + # Client disconnects can result in an inability to get the + # peeraddr from the socket; default to unspec. + if client.peer_family == Socket::AF_INET6 + addr = UNSPECIFIED_IPV6 + else + addr = UNSPECIFIED_IPV4 + end + end + + # Set unix socket addrs to localhost + if addr.empty? + if client.peer_family == Socket::AF_INET6 + addr = LOCALHOST_IPV6 + else + addr = LOCALHOST_IPV4 + end + end + + env[REMOTE_ADDR] = addr + end + + # The legacy HTTP_VERSION header can be sent as a client header. + # Rack v4 may remove using HTTP_VERSION. If so, remove this line. + env[HTTP_VERSION] = env[SERVER_PROTOCOL] + end + private :normalize_env + + # @param header_key [#to_s] + # @return [Boolean] + # + def illegal_header_key?(header_key) + !!(ILLEGAL_HEADER_KEY_REGEX =~ header_key.to_s) + end + + # @param header_value [#to_s] + # @return [Boolean] + # + def illegal_header_value?(header_value) + !!(ILLEGAL_HEADER_VALUE_REGEX =~ header_value.to_s) + end + private :illegal_header_key?, :illegal_header_value? + + # Fixup any headers with `,` in the name to have `_` now. We emit + # headers with `,` in them during the parse phase to avoid ambiguity + # with the `-` to `_` conversion for critical headers. But here for + # compatibility, we'll convert them back. This code is written to + # avoid allocation in the common case (ie there are no headers + # with `,` in their names), that's why it has the extra conditionals. + # + # @note If a normalized version of a `,` header already exists, we ignore + # the `,` version. This prevents clobbering headers managed by proxies + # but not by clients (Like X-Forwarded-For). + # + # @param env [Hash] see Puma::Client#env, from request, modifies in place + # @version 5.0.3 + # + def req_env_post_parse(env) + to_delete = nil + to_add = nil + + env.each do |k,v| + if k.start_with?("HTTP_") && k.include?(",") && !UNMASKABLE_HEADERS.key?(k) + if to_delete + to_delete << k + else + to_delete = [k] + end + + new_k = k.tr(",", "_") + if env.key?(new_k) + next + end + + unless to_add + to_add = {} + end + + to_add[new_k] = v + end + end + + if to_delete # rubocop:disable Style/SafeNavigation + to_delete.each { |k| env.delete(k) } + end + + if to_add + env.merge! to_add + end + end + private :req_env_post_parse + + # Used in the lambda for env[ `Puma::Const::EARLY_HINTS` ] + # @param headers [Hash] the headers returned by the Rack application + # @return [String] + # @version 5.0.3 + # + def str_early_hints(headers) + eh_str = +"" + headers.each_pair do |k, vs| + next if illegal_header_key?(k) + + if vs.respond_to?(:to_s) && !vs.to_s.empty? + vs.to_s.split(NEWLINE).each do |v| + next if illegal_header_value?(v) + eh_str << "#{k}: #{v}\r\n" + end + elsif !(vs.to_s.empty? || !illegal_header_value?(vs)) + eh_str << "#{k}: #{vs}\r\n" + end + end + eh_str.freeze + end + private :str_early_hints + + # @param status [Integer] status from the app + # @return [String] the text description from Puma::HTTP_STATUS_CODES + # + def fetch_status_code(status) + HTTP_STATUS_CODES.fetch(status) { CUSTOM_STAT } + end + private :fetch_status_code + + # Processes and write headers to the IOBuffer. + # @param env [Hash] see Puma::Client#env, from request + # @param status [Integer] the status returned by the Rack application + # @param headers [Hash] the headers returned by the Rack application + # @param content_length [Integer,nil] content length if it can be determined from the + # response body + # @param io_buffer [Puma::IOBuffer] modified inn place + # @param force_keep_alive [Boolean] 'anded' with keep_alive, based on system + # status and `@max_fast_inline` + # @return [Hash] resp_info + # @version 5.0.3 + # + def str_headers(env, status, headers, res_body, io_buffer, force_keep_alive) + + line_ending = LINE_END + colon = COLON + + resp_info = {} + resp_info[:no_body] = env[REQUEST_METHOD] == HEAD + + http_11 = env[SERVER_PROTOCOL] == HTTP_11 + if http_11 + resp_info[:allow_chunked] = true + resp_info[:keep_alive] = env.fetch(HTTP_CONNECTION, "").downcase != CLOSE + + # An optimization. The most common response is 200, so we can + # reply with the proper 200 status without having to compute + # the response header. + # + if status == 200 + io_buffer << HTTP_11_200 + else + io_buffer.append "#{HTTP_11} #{status} ", fetch_status_code(status), line_ending + + resp_info[:no_body] ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status] + end + else + resp_info[:allow_chunked] = false + resp_info[:keep_alive] = env.fetch(HTTP_CONNECTION, "").downcase == KEEP_ALIVE + + # Same optimization as above for HTTP/1.1 + # + if status == 200 + io_buffer << HTTP_10_200 + else + io_buffer.append "HTTP/1.0 #{status} ", + fetch_status_code(status), line_ending + + resp_info[:no_body] ||= status < 200 || STATUS_WITH_NO_ENTITY_BODY[status] + end + end + + # regardless of what the client wants, we always close the connection + # if running without request queueing + resp_info[:keep_alive] &&= @queue_requests + + # see prepare_response + resp_info[:keep_alive] &&= force_keep_alive + + resp_info[:response_hijack] = nil + + headers.each do |k, vs| + next if illegal_header_key?(k) + + case k.downcase + when CONTENT_LENGTH2 + next if illegal_header_value?(vs) + # nil.to_i is 0, nil&.to_i is nil + resp_info[:content_length] = vs&.to_i + next + when TRANSFER_ENCODING + resp_info[:allow_chunked] = false + resp_info[:content_length] = nil + resp_info[:transfer_encoding] = vs + when HIJACK + resp_info[:response_hijack] = vs + next + when BANNED_HEADER_KEY + next + end + + ary = if vs.is_a?(::Array) && !vs.empty? + vs + elsif vs.respond_to?(:to_s) && !vs.to_s.empty? + vs.to_s.split NEWLINE + else + nil + end + if ary + ary.each do |v| + next if illegal_header_value?(v) + io_buffer.append k, colon, v, line_ending + end + else + io_buffer.append k, colon, line_ending + end + end + + # HTTP/1.1 & 1.0 assume different defaults: + # - HTTP 1.0 assumes the connection will be closed if not specified + # - HTTP 1.1 assumes the connection will be kept alive if not specified. + # Only set the header if we're doing something which is not the default + # for this protocol version + if http_11 + io_buffer << CONNECTION_CLOSE if !resp_info[:keep_alive] + else + io_buffer << CONNECTION_KEEP_ALIVE if resp_info[:keep_alive] + end + resp_info + end + private :str_headers + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/runner.rb b/vendor/cache/puma-fba741b91780/lib/puma/runner.rb new file mode 100644 index 000000000..fb9a78438 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/runner.rb @@ -0,0 +1,217 @@ +# frozen_string_literal: true + +require_relative 'server' +require_relative 'const' + +module Puma + # Generic class that is used by `Puma::Cluster` and `Puma::Single` to + # serve requests. This class spawns a new instance of `Puma::Server` via + # a call to `start_server`. + class Runner + def initialize(launcher) + @launcher = launcher + @log_writer = launcher.log_writer + @events = launcher.events + @config = launcher.config + @options = launcher.options + @app = nil + @control = nil + @started_at = Time.now + @wakeup = nil + end + + # Returns the hash of configuration options. + # @return [Puma::UserFileDefaultOptions] + attr_reader :options + + def wakeup! + return unless @wakeup + + @wakeup.write Puma::Const::PipeRequest::WAKEUP unless @wakeup.closed? + + rescue SystemCallError, IOError + Puma::Util.purge_interrupt_queue + end + + def development? + @options[:environment] == "development" + end + + def test? + @options[:environment] == "test" + end + + def log(str) + @log_writer.log str + end + + # @version 5.0.0 + def stop_control + @control&.stop true + end + + def error(str) + @log_writer.error str + end + + def debug(str) + @log_writer.log "- #{str}" if @options[:debug] + end + + def start_control + str = @options[:control_url] + return unless str + + require_relative 'app/status' + + if token = @options[:control_auth_token] + token = nil if token.empty? || token == 'none' + end + + app = Puma::App::Status.new @launcher, token + + # A Reactor is not created and nio4r is not loaded when 'queue_requests: false' + # Use `nil` for events, no hooks in control server + control = Puma::Server.new app, nil, + { min_threads: 0, max_threads: 1, queue_requests: false, log_writer: @log_writer } + + begin + control.binder.parse [str], nil, 'Starting control server' + rescue Errno::EADDRINUSE, Errno::EACCES => e + raise e, "Error: Control server address '#{str}' is already in use. Original error: #{e.message}" + end + + control.run thread_name: 'ctl' + @control = control + end + + # @version 5.0.0 + def close_control_listeners + @control.binder.close_listeners if @control + end + + # @!attribute [r] ruby_engine + # @deprecated Use `RUBY_DESCRIPTION` instead + def ruby_engine + warn "Puma::Runner#ruby_engine is deprecated; use RUBY_DESCRIPTION instead. It will be removed in puma v7." + + if !defined?(RUBY_ENGINE) || RUBY_ENGINE == "ruby" + "ruby #{RUBY_VERSION}-p#{RUBY_PATCHLEVEL}" + else + if defined?(RUBY_ENGINE_VERSION) + "#{RUBY_ENGINE} #{RUBY_ENGINE_VERSION} - ruby #{RUBY_VERSION}" + else + "#{RUBY_ENGINE} #{RUBY_VERSION}" + end + end + end + + def output_header(mode) + min_t = @options[:min_threads] + max_t = @options[:max_threads] + environment = @options[:environment] + + log "Puma starting in #{mode} mode..." + log "* Puma version: #{Puma::Const::PUMA_VERSION} (\"#{Puma::Const::CODE_NAME}\")" + log "* Ruby version: #{RUBY_DESCRIPTION}" + log "* Min threads: #{min_t}" + log "* Max threads: #{max_t}" + log "* Environment: #{environment}" + + if mode == "cluster" + log "* Master PID: #{Process.pid}" + else + log "* PID: #{Process.pid}" + end + end + + def redirected_io? + @options[:redirect_stdout] || @options[:redirect_stderr] + end + + def redirect_io + stdout = @options[:redirect_stdout] + stderr = @options[:redirect_stderr] + append = @options[:redirect_append] + + if stdout + ensure_output_directory_exists(stdout, 'STDOUT') + + STDOUT.reopen stdout, (append ? "a" : "w") + STDOUT.puts "=== puma startup: #{Time.now} ===" + STDOUT.flush unless STDOUT.sync + end + + if stderr + ensure_output_directory_exists(stderr, 'STDERR') + + STDERR.reopen stderr, (append ? "a" : "w") + STDERR.puts "=== puma startup: #{Time.now} ===" + STDERR.flush unless STDERR.sync + end + + if @options[:mutate_stdout_and_stderr_to_sync_on_write] + STDOUT.sync = true + STDERR.sync = true + end + end + + def load_and_bind + unless @config.app_configured? + error "No application configured, nothing to run" + exit 1 + end + + begin + @app = @config.app + rescue Exception => e + log "! Unable to load application: #{e.class}: #{e.message}" + raise e + end + + @launcher.binder.parse @options[:binds] + end + + # @!attribute [r] app + def app + @app ||= @config.app + end + + def start_server + server = Puma::Server.new(app, @events, @options) + server.inherit_binder(@launcher.binder) + server + end + + private + def ensure_output_directory_exists(path, io_name) + unless Dir.exist?(File.dirname(path)) + raise "Cannot redirect #{io_name} to #{path}" + end + end + + def utc_iso8601(val) + "#{val.utc.strftime '%FT%T'}Z" + end + + def stats + { + versions: { + puma: Puma::Const::PUMA_VERSION, + ruby: { + engine: RUBY_ENGINE, + version: RUBY_VERSION, + patchlevel: RUBY_PATCHLEVEL + } + } + } + end + + # this method call should always be guarded by `@log_writer.debug?` + def debug_loaded_extensions(str) + @log_writer.debug "────────────────────────────────── #{str}" + re_ext = /\.#{RbConfig::CONFIG['DLEXT']}\z/i + $LOADED_FEATURES.grep(re_ext).each { |f| @log_writer.debug(" #{f}") } + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/sd_notify.rb b/vendor/cache/puma-fba741b91780/lib/puma/sd_notify.rb new file mode 100644 index 000000000..f879a4970 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/sd_notify.rb @@ -0,0 +1,149 @@ +# frozen_string_literal: true + +require "socket" + +module Puma + # The MIT License + # + # Copyright (c) 2017-2022 Agis Anastasopoulos + # + # Permission is hereby granted, free of charge, to any person obtaining a copy of + # this software and associated documentation files (the "Software"), to deal in + # the Software without restriction, including without limitation the rights to + # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + # the Software, and to permit persons to whom the Software is furnished to do so, + # subject to the following conditions: + # + # The above copyright notice and this permission notice shall be included in all + # copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + # + # This is a copy of https://github.com/agis/ruby-sdnotify as of commit cca575c + # The only changes made was "rehoming" it within the Puma module to avoid + # namespace collisions and applying standard's code formatting style. + # + # SdNotify is a pure-Ruby implementation of sd_notify(3). It can be used to + # notify systemd about state changes. Methods of this package are no-op on + # non-systemd systems (eg. Darwin). + # + # The API maps closely to the original implementation of sd_notify(3), + # therefore be sure to check the official man pages prior to using SdNotify. + # + # @see https://www.freedesktop.org/software/systemd/man/sd_notify.html + module SdNotify + # Exception raised when there's an error writing to the notification socket + class NotifyError < RuntimeError; end + + READY = "READY=1" + RELOADING = "RELOADING=1" + STOPPING = "STOPPING=1" + STATUS = "STATUS=" + ERRNO = "ERRNO=" + MAINPID = "MAINPID=" + WATCHDOG = "WATCHDOG=1" + FDSTORE = "FDSTORE=1" + + def self.ready(unset_env=false) + notify(READY, unset_env) + end + + def self.reloading(unset_env=false) + notify(RELOADING, unset_env) + end + + def self.stopping(unset_env=false) + notify(STOPPING, unset_env) + end + + # @param status [String] a custom status string that describes the current + # state of the service + def self.status(status, unset_env=false) + notify("#{STATUS}#{status}", unset_env) + end + + # @param errno [Integer] + def self.errno(errno, unset_env=false) + notify("#{ERRNO}#{errno}", unset_env) + end + + # @param pid [Integer] + def self.mainpid(pid, unset_env=false) + notify("#{MAINPID}#{pid}", unset_env) + end + + def self.watchdog(unset_env=false) + notify(WATCHDOG, unset_env) + end + + def self.fdstore(unset_env=false) + notify(FDSTORE, unset_env) + end + + # @param [Boolean] true if the service manager expects watchdog keep-alive + # notification messages to be sent from this process. + # + # If the $WATCHDOG_USEC environment variable is set, + # and the $WATCHDOG_PID variable is unset or set to the PID of the current + # process + # + # @note Unlike sd_watchdog_enabled(3), this method does not mutate the + # environment. + def self.watchdog? + wd_usec = ENV["WATCHDOG_USEC"] + wd_pid = ENV["WATCHDOG_PID"] + + return false if !wd_usec + + begin + wd_usec = Integer(wd_usec) + rescue + return false + end + + return false if wd_usec <= 0 + return true if !wd_pid || wd_pid == $$.to_s + + false + end + + # Notify systemd with the provided state, via the notification socket, if + # any. + # + # Generally this method will be used indirectly through the other methods + # of the library. + # + # @param state [String] + # @param unset_env [Boolean] + # + # @return [Fixnum, nil] the number of bytes written to the notification + # socket or nil if there was no socket to report to (eg. the program wasn't + # started by systemd) + # + # @raise [NotifyError] if there was an error communicating with the systemd + # socket + # + # @see https://www.freedesktop.org/software/systemd/man/sd_notify.html + def self.notify(state, unset_env=false) + sock = ENV["NOTIFY_SOCKET"] + + return nil if !sock + + ENV.delete("NOTIFY_SOCKET") if unset_env + + begin + Addrinfo.unix(sock, :DGRAM).connect do |s| + s.close_on_exec = true + s.write(state) + end + rescue StandardError => e + raise NotifyError, "#{e.class}: #{e.message}", e.backtrace + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/server.rb b/vendor/cache/puma-fba741b91780/lib/puma/server.rb new file mode 100644 index 000000000..2f6f91f51 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/server.rb @@ -0,0 +1,684 @@ +# frozen_string_literal: true + +require 'stringio' + +require_relative 'thread_pool' +require_relative 'const' +require_relative 'log_writer' +require_relative 'events' +require_relative 'null_io' +require_relative 'reactor' +require_relative 'client' +require_relative 'binder' +require_relative 'util' +require_relative 'request' + +require 'socket' +require 'io/wait' unless Puma::HAS_NATIVE_IO_WAIT + +module Puma + + # This method was private on Ruby 2.4 but became public on Ruby 2.5+: + Thread.send(:attr_accessor, :puma_server) + + # The HTTP Server itself. Serves out a single Rack app. + # + # This class is used by the `Puma::Single` and `Puma::Cluster` classes + # to generate one or more `Puma::Server` instances capable of handling requests. + # Each Puma process will contain one `Puma::Server` instance. + # + # The `Puma::Server` instance pulls requests from the socket, adds them to a + # `Puma::Reactor` where they get eventually passed to a `Puma::ThreadPool`. + # + # Each `Puma::Server` will have one reactor and one thread pool. + class Server + include Puma::Const + include Request + + attr_reader :options + attr_reader :thread + attr_reader :log_writer + attr_reader :events + attr_reader :min_threads, :max_threads # for #stats + attr_reader :requests_count # @version 5.0.0 + + # @todo the following may be deprecated in the future + attr_reader :auto_trim_time, :early_hints, :first_data_timeout, + :leak_stack_on_error, + :persistent_timeout, :reaping_time + + attr_accessor :app + attr_accessor :binder + + + # Create a server for the rack app +app+. + # + # +log_writer+ is a Puma::LogWriter object used to log info and error messages. + # + # +events+ is a Puma::Events object used to notify application status events. + # + # Server#run returns a thread that you can join on to wait for the server + # to do its work. + # + # @note Several instance variables exist so they are available for testing, + # and have default values set via +fetch+. Normally the values are set via + # `::Puma::Configuration.puma_default_options`. + # + # @note The `events` parameter is set to nil, and set to `Events.new` in code. + # Often `options` needs to be passed, but `events` does not. Using nil allows + # calling code to not require events.rb. + # + def initialize(app, events = nil, options = {}) + @app = app + @events = events || Events.new + + @check, @notify = nil + @status = :stop + + @thread = nil + @thread_pool = nil + + @options = if options.is_a?(UserFileDefaultOptions) + options + else + UserFileDefaultOptions.new(options, Configuration::DEFAULTS) + end + + @clustered = (@options.fetch :workers, 0) > 0 + @worker_write = @options[:worker_write] + @log_writer = @options.fetch :log_writer, LogWriter.stdio + @early_hints = @options[:early_hints] + @first_data_timeout = @options[:first_data_timeout] + @persistent_timeout = @options[:persistent_timeout] + @idle_timeout = @options[:idle_timeout] + @min_threads = @options[:min_threads] + @max_threads = @options[:max_threads] + @queue_requests = @options[:queue_requests] + @max_fast_inline = @options[:max_fast_inline] + @enable_keep_alives = @options[:enable_keep_alives] + @io_selector_backend = @options[:io_selector_backend] + @http_content_length_limit = @options[:http_content_length_limit] + + # make this a hash, since we prefer `key?` over `include?` + @supported_http_methods = + if @options[:supported_http_methods] == :any + :any + else + if (ary = @options[:supported_http_methods]) + ary + else + SUPPORTED_HTTP_METHODS + end.sort.product([nil]).to_h.freeze + end + + temp = !!(@options[:environment] =~ /\A(development|test)\z/) + @leak_stack_on_error = @options[:environment] ? temp : true + + @binder = Binder.new(log_writer) + + ENV['RACK_ENV'] ||= "development" + + @mode = :http + + @precheck_closing = true + + @requests_count = 0 + + @idle_timeout_reached = false + end + + def inherit_binder(bind) + @binder = bind + end + + class << self + # @!attribute [r] current + def current + Thread.current.puma_server + end + + # :nodoc: + # @version 5.0.0 + def tcp_cork_supported? + Socket.const_defined?(:TCP_CORK) && Socket.const_defined?(:IPPROTO_TCP) + end + + # :nodoc: + # @version 5.0.0 + def closed_socket_supported? + Socket.const_defined?(:TCP_INFO) && Socket.const_defined?(:IPPROTO_TCP) + end + private :tcp_cork_supported? + private :closed_socket_supported? + end + + # On Linux, use TCP_CORK to better control how the TCP stack + # packetizes our stream. This improves both latency and throughput. + # socket parameter may be an MiniSSL::Socket, so use to_io + # + if tcp_cork_supported? + # 6 == Socket::IPPROTO_TCP + # 3 == TCP_CORK + # 1/0 == turn on/off + def cork_socket(socket) + skt = socket.to_io + begin + skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 1) if skt.kind_of? TCPSocket + rescue IOError, SystemCallError + Puma::Util.purge_interrupt_queue + end + end + + def uncork_socket(socket) + skt = socket.to_io + begin + skt.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_CORK, 0) if skt.kind_of? TCPSocket + rescue IOError, SystemCallError + Puma::Util.purge_interrupt_queue + end + end + else + def cork_socket(socket) + end + + def uncork_socket(socket) + end + end + + if closed_socket_supported? + UNPACK_TCP_STATE_FROM_TCP_INFO = "C".freeze + + def closed_socket?(socket) + skt = socket.to_io + return false unless skt.kind_of?(TCPSocket) && @precheck_closing + + begin + tcp_info = skt.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_INFO) + rescue IOError, SystemCallError + Puma::Util.purge_interrupt_queue + @precheck_closing = false + false + else + state = tcp_info.unpack(UNPACK_TCP_STATE_FROM_TCP_INFO)[0] + # TIME_WAIT: 6, CLOSE: 7, CLOSE_WAIT: 8, LAST_ACK: 9, CLOSING: 11 + (state >= 6 && state <= 9) || state == 11 + end + end + else + def closed_socket?(socket) + false + end + end + + # @!attribute [r] backlog + def backlog + @thread_pool&.backlog + end + + # @!attribute [r] running + def running + @thread_pool&.spawned + end + + + # This number represents the number of requests that + # the server is capable of taking right now. + # + # For example if the number is 5 then it means + # there are 5 threads sitting idle ready to take + # a request. If one request comes in, then the + # value would be 4 until it finishes processing. + # @!attribute [r] pool_capacity + def pool_capacity + @thread_pool&.pool_capacity + end + + # Runs the server. + # + # If +background+ is true (the default) then a thread is spun + # up in the background to handle requests. Otherwise requests + # are handled synchronously. + # + def run(background=true, thread_name: 'srv') + BasicSocket.do_not_reverse_lookup = true + + @events.fire :state, :booting + + @status = :run + + @thread_pool = ThreadPool.new(thread_name, options) { |client| process_client client } + + if @queue_requests + @reactor = Reactor.new(@io_selector_backend) { |c| reactor_wakeup c } + @reactor.run + end + + + @thread_pool.auto_reap! if options[:reaping_time] + @thread_pool.auto_trim! if options[:auto_trim_time] + + @check, @notify = Puma::Util.pipe unless @notify + + @events.fire :state, :running + + if background + @thread = Thread.new do + Puma.set_thread_name thread_name + handle_servers + end + return @thread + else + handle_servers + end + end + + # This method is called from the Reactor thread when a queued Client receives data, + # times out, or when the Reactor is shutting down. + # + # It is responsible for ensuring that a request has been completely received + # before it starts to be processed by the ThreadPool. This may be known as read buffering. + # If read buffering is not done, and no other read buffering is performed (such as by an application server + # such as nginx) then the application would be subject to a slow client attack. + # + # For a graphical representation of how the request buffer works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline). + # + # The method checks to see if it has the full header and body with + # the `Puma::Client#try_to_finish` method. If the full request has been sent, + # then the request is passed to the ThreadPool (`@thread_pool << client`) + # so that a "worker thread" can pick up the request and begin to execute application logic. + # The Client is then removed from the reactor (return `true`). + # + # If a client object times out, a 408 response is written, its connection is closed, + # and the object is removed from the reactor (return `true`). + # + # If the Reactor is shutting down, all Clients are either timed out or passed to the + # ThreadPool, depending on their current state (#can_close?). + # + # Otherwise, if the full request is not ready then the client will remain in the reactor + # (return `false`). When the client sends more data to the socket the `Puma::Client` object + # will wake up and again be checked to see if it's ready to be passed to the thread pool. + def reactor_wakeup(client) + shutdown = !@queue_requests + if client.try_to_finish || (shutdown && !client.can_close?) + @thread_pool << client + elsif shutdown || client.timeout == 0 + client.timeout! + else + client.set_timeout(@first_data_timeout) + false + end + rescue StandardError => e + client_error(e, client) + client.close + true + end + + def handle_servers + begin + check = @check + sockets = [check] + @binder.ios + pool = @thread_pool + queue_requests = @queue_requests + drain = options[:drain_on_shutdown] ? 0 : nil + + addr_send_name, addr_value = case options[:remote_address] + when :value + [:peerip=, options[:remote_address_value]] + when :header + [:remote_addr_header=, options[:remote_address_header]] + when :proxy_protocol + [:expect_proxy_proto=, options[:remote_address_proxy_protocol]] + else + [nil, nil] + end + + while @status == :run || (drain && shutting_down?) + begin + ios = IO.select sockets, nil, nil, (shutting_down? ? 0 : @idle_timeout) + unless ios + unless shutting_down? + @idle_timeout_reached = true + + if @clustered + @worker_write << "#{PipeRequest::IDLE}#{Process.pid}\n" rescue nil + next + else + @log_writer.log "- Idle timeout reached" + @status = :stop + end + end + + break + end + + if @idle_timeout_reached && @clustered + @idle_timeout_reached = false + @worker_write << "#{PipeRequest::IDLE}#{Process.pid}\n" rescue nil + end + + ios.first.each do |sock| + if sock == check + break if handle_check + else + pool.wait_until_not_full + pool.wait_for_less_busy_worker(options[:wait_for_less_busy_worker]) if @clustered + + io = begin + sock.accept_nonblock + rescue IO::WaitReadable + next + end + drain += 1 if shutting_down? + pool << Client.new(io, @binder.env(sock)).tap { |c| + c.listener = sock + c.http_content_length_limit = @http_content_length_limit + c.send(addr_send_name, addr_value) if addr_value + } + end + end + rescue IOError, Errno::EBADF + # In the case that any of the sockets are unexpectedly close. + raise + rescue StandardError => e + @log_writer.unknown_error e, nil, "Listen loop" + end + end + + @log_writer.debug "Drained #{drain} additional connections." if drain + @events.fire :state, @status + + if queue_requests + @queue_requests = false + @reactor.shutdown + end + + graceful_shutdown if @status == :stop || @status == :restart + rescue Exception => e + @log_writer.unknown_error e, nil, "Exception handling servers" + ensure + # Errno::EBADF is infrequently raised + [@check, @notify].each do |io| + begin + io.close unless io.closed? + rescue Errno::EBADF + end + end + @notify = nil + @check = nil + end + + @events.fire :state, :done + end + + # :nodoc: + def handle_check + cmd = @check.read(1) + + case cmd + when STOP_COMMAND + @status = :stop + return true + when HALT_COMMAND + @status = :halt + return true + when RESTART_COMMAND + @status = :restart + return true + end + + false + end + + # Given a connection on +client+, handle the incoming requests, + # or queue the connection in the Reactor if no request is available. + # + # This method is called from a ThreadPool worker thread. + # + # This method supports HTTP Keep-Alive so it may, depending on if the client + # indicates that it supports keep alive, wait for another request before + # returning. + # + # Return true if one or more requests were processed. + def process_client(client) + # Advertise this server into the thread + Thread.current.puma_server = self + + clean_thread_locals = options[:clean_thread_locals] + close_socket = true + + requests = 0 + + begin + if @queue_requests && + !client.eagerly_finish + + client.set_timeout(@first_data_timeout) + if @reactor.add client + close_socket = false + return false + end + end + + with_force_shutdown(client) do + client.finish(@first_data_timeout) + end + + while true + @requests_count += 1 + case handle_request(client, requests + 1) + when false + break + when :async + close_socket = false + break + when true + ThreadPool.clean_thread_locals if clean_thread_locals + + requests += 1 + + # As an optimization, try to read the next request from the + # socket for a short time before returning to the reactor. + fast_check = @status == :run + + # Always pass the client back to the reactor after a reasonable + # number of inline requests if there are other requests pending. + fast_check = false if requests >= @max_fast_inline && + @thread_pool.backlog > 0 + + next_request_ready = with_force_shutdown(client) do + client.reset(fast_check) + end + + unless next_request_ready + break unless @queue_requests + client.set_timeout @persistent_timeout + if @reactor.add client + close_socket = false + break + end + end + end + end + true + rescue StandardError => e + client_error(e, client, requests) + # The ensure tries to close +client+ down + requests > 0 + ensure + client.io_buffer.reset + + begin + client.close if close_socket + rescue IOError, SystemCallError + Puma::Util.purge_interrupt_queue + # Already closed + rescue StandardError => e + @log_writer.unknown_error e, nil, "Client" + end + end + end + + # Triggers a client timeout if the thread-pool shuts down + # during execution of the provided block. + def with_force_shutdown(client, &block) + @thread_pool.with_force_shutdown(&block) + rescue ThreadPool::ForceShutdown + client.timeout! + end + + # :nocov: + + # Handle various error types thrown by Client I/O operations. + def client_error(e, client, requests = 1) + # Swallow, do not log + return if [ConnectionError, EOFError].include?(e.class) + + case e + when MiniSSL::SSLError + lowlevel_error(e, client.env) + @log_writer.ssl_error e, client.io + when HttpParserError + response_to_error(client, requests, e, 400) + @log_writer.parse_error e, client + when HttpParserError501 + response_to_error(client, requests, e, 501) + @log_writer.parse_error e, client + else + response_to_error(client, requests, e, 500) + @log_writer.unknown_error e, nil, "Read" + end + end + + # A fallback rack response if +@app+ raises as exception. + # + def lowlevel_error(e, env, status=500) + if handler = options[:lowlevel_error_handler] + if handler.arity == 1 + return handler.call(e) + elsif handler.arity == 2 + return handler.call(e, env) + else + return handler.call(e, env, status) + end + end + + if @leak_stack_on_error + backtrace = e.backtrace.nil? ? '' : e.backtrace.join("\n") + [status, {}, ["Puma caught this error: #{e.message} (#{e.class})\n#{backtrace}"]] + else + [status, {}, [""]] + end + end + + def response_to_error(client, requests, err, status_code) + status, headers, res_body = lowlevel_error(err, client.env, status_code) + prepare_response(status, headers, res_body, requests, client) + end + private :response_to_error + + # Wait for all outstanding requests to finish. + # + def graceful_shutdown + if options[:shutdown_debug] + threads = Thread.list + total = threads.size + + pid = Process.pid + + $stdout.syswrite "#{pid}: === Begin thread backtrace dump ===\n" + + threads.each_with_index do |t,i| + $stdout.syswrite "#{pid}: Thread #{i+1}/#{total}: #{t.inspect}\n" + $stdout.syswrite "#{pid}: #{t.backtrace.join("\n#{pid}: ")}\n\n" + end + $stdout.syswrite "#{pid}: === End thread backtrace dump ===\n" + end + + if @status != :restart + @binder.close + end + + if @thread_pool + if timeout = options[:force_shutdown_after] + @thread_pool.shutdown timeout.to_f + else + @thread_pool.shutdown + end + end + end + + def notify_safely(message) + @notify << message + rescue IOError, NoMethodError, Errno::EPIPE, Errno::EBADF + # The server, in another thread, is shutting down + Puma::Util.purge_interrupt_queue + rescue RuntimeError => e + # Temporary workaround for https://bugs.ruby-lang.org/issues/13239 + if e.message.include?('IOError') + Puma::Util.purge_interrupt_queue + else + raise e + end + end + private :notify_safely + + # Stops the acceptor thread and then causes the worker threads to finish + # off the request queue before finally exiting. + + def stop(sync=false) + notify_safely(STOP_COMMAND) + @thread.join if @thread && sync + end + + def halt(sync=false) + notify_safely(HALT_COMMAND) + @thread.join if @thread && sync + end + + def begin_restart(sync=false) + notify_safely(RESTART_COMMAND) + @thread.join if @thread && sync + end + + def shutting_down? + @status == :stop || @status == :restart + end + + # List of methods invoked by #stats. + # @version 5.0.0 + STAT_METHODS = [:backlog, :running, :pool_capacity, :max_threads, :requests_count].freeze + + # Returns a hash of stats about the running server for reporting purposes. + # @version 5.0.0 + # @!attribute [r] stats + # @return [Hash] hash containing stat info from `Server` and `ThreadPool` + def stats + stats = @thread_pool&.stats || {} + stats[:max_threads] = @max_threads + stats[:requests_count] = @requests_count + stats + end + + # below are 'delegations' to binder + # remove in Puma 7? + + + def add_tcp_listener(host, port, optimize_for_latency = true, backlog = 1024) + @binder.add_tcp_listener host, port, optimize_for_latency, backlog + end + + def add_ssl_listener(host, port, ctx, optimize_for_latency = true, + backlog = 1024) + @binder.add_ssl_listener host, port, ctx, optimize_for_latency, backlog + end + + def add_unix_listener(path, umask = nil, mode = nil, backlog = 1024) + @binder.add_unix_listener path, umask, mode, backlog + end + + # @!attribute [r] connected_ports + def connected_ports + @binder.connected_ports + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/single.rb b/vendor/cache/puma-fba741b91780/lib/puma/single.rb new file mode 100644 index 000000000..1697f6b27 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/single.rb @@ -0,0 +1,69 @@ +# frozen_string_literal: true + +require_relative 'runner' +require_relative 'detect' +require_relative 'plugin' + +module Puma + # This class is instantiated by the `Puma::Launcher` and used + # to boot and serve a Ruby application when no puma "workers" are needed + # i.e. only using "threaded" mode. For example `$ puma -t 1:5` + # + # At the core of this class is running an instance of `Puma::Server` which + # gets created via the `start_server` method from the `Puma::Runner` class + # that this inherits from. + class Single < Runner + # @!attribute [r] stats + def stats + { + started_at: utc_iso8601(@started_at) + }.merge(@server.stats).merge(super) + end + + def restart + @server&.begin_restart + end + + def stop + @server&.stop false + end + + def halt + @server&.halt + end + + def stop_blocked + log "- Gracefully stopping, waiting for requests to finish" + @control&.stop true + @server&.stop true + end + + def run + output_header "single" + + load_and_bind + + Plugins.fire_background + + @launcher.write_state + + start_control + + @server = server = start_server + server_thread = server.run + + log "Use Ctrl-C to stop" + redirect_io + + @events.fire_on_booted! + + debug_loaded_extensions("Loaded Extensions:") if @log_writer.debug? + + begin + server_thread.join + rescue Interrupt + # Swallow it + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/state_file.rb b/vendor/cache/puma-fba741b91780/lib/puma/state_file.rb new file mode 100644 index 000000000..7f6ddbc1d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/state_file.rb @@ -0,0 +1,68 @@ +# frozen_string_literal: true + +module Puma + + # Puma::Launcher uses StateFile to write a yaml file for use with Puma::ControlCLI. + # + # In previous versions of Puma, YAML was used to read/write the state file. + # Since Puma is similar to Bundler/RubyGems in that it may load before one's app + # does, minimizing the dependencies that may be shared with the app is desired. + # + # At present, it only works with numeric and string values. It is still a valid + # yaml file, and the CI tests parse it with Psych. + # + class StateFile + + ALLOWED_FIELDS = %w!control_url control_auth_token pid running_from! + + def initialize + @options = {} + end + + def save(path, permission = nil) + contents = +"---\n" + @options.each do |k,v| + next unless ALLOWED_FIELDS.include? k + case v + when Numeric + contents << "#{k}: #{v}\n" + when String + next if v.strip.empty? + contents << (k == 'running_from' || v.to_s.include?(' ') ? + "#{k}: \"#{v}\"\n" : "#{k}: #{v}\n") + end + end + if permission + File.write path, contents, mode: 'wb:UTF-8' + else + File.write path, contents, mode: 'wb:UTF-8', perm: permission + end + end + + def load(path) + File.read(path).lines.each do |line| + next if line.start_with? '#' + k,v = line.split ':', 2 + next unless v && ALLOWED_FIELDS.include?(k) + v = v.strip + @options[k] = + case v + when '' then nil + when /\A\d+\z/ then v.to_i + when /\A\d+\.\d+\z/ then v.to_f + else v.gsub(/\A"|"\z/, '') + end + end + end + + ALLOWED_FIELDS.each do |f| + define_method f.to_sym do + @options[f] + end + + define_method :"#{f}=" do |v| + @options[f] = v + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/thread_pool.rb b/vendor/cache/puma-fba741b91780/lib/puma/thread_pool.rb new file mode 100644 index 000000000..bf8f6d88d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/thread_pool.rb @@ -0,0 +1,445 @@ +# frozen_string_literal: true + +require 'thread' + +require_relative 'io_buffer' + +module Puma + # Internal Docs for A simple thread pool management object. + # + # Each Puma "worker" has a thread pool to process requests. + # + # First a connection to a client is made in `Puma::Server`. It is wrapped in a + # `Puma::Client` instance and then passed to the `Puma::Reactor` to ensure + # the whole request is buffered into memory. Once the request is ready, it is passed into + # a thread pool via the `Puma::ThreadPool#<<` operator where it is stored in a `@todo` array. + # + # Each thread in the pool has an internal loop where it pulls a request from the `@todo` array + # and processes it. + class ThreadPool + class ForceShutdown < RuntimeError + end + + # How long, after raising the ForceShutdown of a thread during + # forced shutdown mode, to wait for the thread to try and finish + # up its work before leaving the thread to die on the vine. + SHUTDOWN_GRACE_TIME = 5 # seconds + + # Maintain a minimum of +min+ and maximum of +max+ threads + # in the pool. + # + # The block passed is the work that will be performed in each + # thread. + # + def initialize(name, options = {}, &block) + @not_empty = ConditionVariable.new + @not_full = ConditionVariable.new + @mutex = Mutex.new + + @todo = [] + + @spawned = 0 + @waiting = 0 + + @name = name + @min = Integer(options[:min_threads]) + @max = Integer(options[:max_threads]) + # Not an 'exposed' option, options[:pool_shutdown_grace_time] is used in CI + # to shorten @shutdown_grace_time from SHUTDOWN_GRACE_TIME. Parallel CI + # makes stubbing constants difficult. + @shutdown_grace_time = Float(options[:pool_shutdown_grace_time] || SHUTDOWN_GRACE_TIME) + @block = block + @out_of_band = options[:out_of_band] + @clean_thread_locals = options[:clean_thread_locals] + @before_thread_start = options[:before_thread_start] + @before_thread_exit = options[:before_thread_exit] + @reaping_time = options[:reaping_time] + @auto_trim_time = options[:auto_trim_time] + + @shutdown = false + + @trim_requested = 0 + @out_of_band_pending = false + + @workers = [] + + @auto_trim = nil + @reaper = nil + + @mutex.synchronize do + @min.times do + spawn_thread + @not_full.wait(@mutex) + end + end + + @force_shutdown = false + @shutdown_mutex = Mutex.new + end + + attr_reader :spawned, :trim_requested, :waiting + + def self.clean_thread_locals + Thread.current.keys.each do |key| # rubocop: disable Style/HashEachMethods + Thread.current[key] = nil unless key == :__recursive_key__ + end + end + + # generate stats hash so as not to perform multiple locks + # @return [Hash] hash containing stat info from ThreadPool + def stats + with_mutex do + { backlog: @todo.size, + running: @spawned, + pool_capacity: @waiting + (@max - @spawned) + } + end + end + + # How many objects have yet to be processed by the pool? + # + def backlog + with_mutex { @todo.size } + end + + # @!attribute [r] pool_capacity + def pool_capacity + waiting + (@max - spawned) + end + + # @!attribute [r] busy_threads + # @version 5.0.0 + def busy_threads + with_mutex { @spawned - @waiting + @todo.size } + end + + # :nodoc: + # + # Must be called with @mutex held! + # + def spawn_thread + @spawned += 1 + + trigger_before_thread_start_hooks + th = Thread.new(@spawned) do |spawned| + Puma.set_thread_name '%s tp %03i' % [@name, spawned] + todo = @todo + block = @block + mutex = @mutex + not_empty = @not_empty + not_full = @not_full + + while true + work = nil + + mutex.synchronize do + while todo.empty? + if @trim_requested > 0 + @trim_requested -= 1 + @spawned -= 1 + @workers.delete th + not_full.signal + trigger_before_thread_exit_hooks + Thread.exit + end + + @waiting += 1 + if @out_of_band_pending && trigger_out_of_band_hook + @out_of_band_pending = false + end + not_full.signal + begin + not_empty.wait mutex + ensure + @waiting -= 1 + end + end + + work = todo.shift + end + + if @clean_thread_locals + ThreadPool.clean_thread_locals + end + + begin + @out_of_band_pending = true if block.call(work) + rescue Exception => e + STDERR.puts "Error reached top of thread-pool: #{e.message} (#{e.class})" + end + end + end + + @workers << th + + th + end + + private :spawn_thread + + def trigger_before_thread_start_hooks + return unless @before_thread_start&.any? + + @before_thread_start.each do |b| + begin + b.call + rescue Exception => e + STDERR.puts "WARNING before_thread_start hook failed with exception (#{e.class}) #{e.message}" + end + end + nil + end + + private :trigger_before_thread_start_hooks + + def trigger_before_thread_exit_hooks + return unless @before_thread_exit&.any? + + @before_thread_exit.each do |b| + begin + b.call + rescue Exception => e + STDERR.puts "WARNING before_thread_exit hook failed with exception (#{e.class}) #{e.message}" + end + end + nil + end + + private :trigger_before_thread_exit_hooks + + # @version 5.0.0 + def trigger_out_of_band_hook + return false unless @out_of_band&.any? + + # we execute on idle hook when all threads are free + return false unless @spawned == @waiting + + @out_of_band.each(&:call) + true + rescue Exception => e + STDERR.puts "Exception calling out_of_band_hook: #{e.message} (#{e.class})" + true + end + + private :trigger_out_of_band_hook + + # @version 5.0.0 + def with_mutex(&block) + @mutex.owned? ? + yield : + @mutex.synchronize(&block) + end + + # Add +work+ to the todo list for a Thread to pickup and process. + def <<(work) + with_mutex do + if @shutdown + raise "Unable to add work while shutting down" + end + + @todo << work + + if @waiting < @todo.size and @spawned < @max + spawn_thread + end + + @not_empty.signal + end + end + + # This method is used by `Puma::Server` to let the server know when + # the thread pool can pull more requests from the socket and + # pass to the reactor. + # + # The general idea is that the thread pool can only work on a fixed + # number of requests at the same time. If it is already processing that + # number of requests then it is at capacity. If another Puma process has + # spare capacity, then the request can be left on the socket so the other + # worker can pick it up and process it. + # + # For example: if there are 5 threads, but only 4 working on + # requests, this method will not wait and the `Puma::Server` + # can pull a request right away. + # + # If there are 5 threads and all 5 of them are busy, then it will + # pause here, and wait until the `not_full` condition variable is + # signaled, usually this indicates that a request has been processed. + # + # It's important to note that even though the server might accept another + # request, it might not be added to the `@todo` array right away. + # For example if a slow client has only sent a header, but not a body + # then the `@todo` array would stay the same size as the reactor works + # to try to buffer the request. In that scenario the next call to this + # method would not block and another request would be added into the reactor + # by the server. This would continue until a fully buffered request + # makes it through the reactor and can then be processed by the thread pool. + def wait_until_not_full + with_mutex do + while true + return if @shutdown + + # If we can still spin up new threads and there + # is work queued that cannot be handled by waiting + # threads, then accept more work until we would + # spin up the max number of threads. + return if busy_threads < @max + + @not_full.wait @mutex + end + end + end + + # @version 5.0.0 + def wait_for_less_busy_worker(delay_s) + return unless delay_s && delay_s > 0 + + # Ruby MRI does GVL, this can result + # in processing contention when multiple threads + # (requests) are running concurrently + return unless Puma.mri? + + with_mutex do + return if @shutdown + + # do not delay, if we are not busy + return unless busy_threads > 0 + + # this will be signaled once a request finishes, + # which can happen earlier than delay + @not_full.wait @mutex, delay_s + end + end + + # If there are any free threads in the pool, tell one to go ahead + # and exit. If +force+ is true, then a trim request is requested + # even if all threads are being utilized. + # + def trim(force=false) + with_mutex do + free = @waiting - @todo.size + if (force or free > 0) and @spawned - @trim_requested > @min + @trim_requested += 1 + @not_empty.signal + end + end + end + + # If there are dead threads in the pool make them go away while decreasing + # spawned counter so that new healthy threads could be created again. + def reap + with_mutex do + dead_workers = @workers.reject(&:alive?) + + dead_workers.each do |worker| + worker.kill + @spawned -= 1 + end + + @workers.delete_if do |w| + dead_workers.include?(w) + end + end + end + + class Automaton + def initialize(pool, timeout, thread_name, message) + @pool = pool + @timeout = timeout + @thread_name = thread_name + @message = message + @running = false + end + + def start! + @running = true + + @thread = Thread.new do + Puma.set_thread_name @thread_name + while @running + @pool.public_send(@message) + sleep @timeout + end + end + end + + def stop + @running = false + @thread.wakeup + end + end + + def auto_trim!(timeout=@auto_trim_time) + @auto_trim = Automaton.new(self, timeout, "#{@name} threadpool trimmer", :trim) + @auto_trim.start! + end + + def auto_reap!(timeout=@reaping_time) + @reaper = Automaton.new(self, timeout, "#{@name} threadpool reaper", :reap) + @reaper.start! + end + + # Allows ThreadPool::ForceShutdown to be raised within the + # provided block if the thread is forced to shutdown during execution. + def with_force_shutdown + t = Thread.current + @shutdown_mutex.synchronize do + raise ForceShutdown if @force_shutdown + t[:with_force_shutdown] = true + end + yield + ensure + t[:with_force_shutdown] = false + end + + # Tell all threads in the pool to exit and wait for them to finish. + # Wait +timeout+ seconds then raise +ForceShutdown+ in remaining threads. + # Next, wait an extra +@shutdown_grace_time+ seconds then force-kill remaining + # threads. Finally, wait 1 second for remaining threads to exit. + # + def shutdown(timeout=-1) + threads = with_mutex do + @shutdown = true + @trim_requested = @spawned + @not_empty.broadcast + @not_full.broadcast + + @auto_trim&.stop + @reaper&.stop + # dup workers so that we join them all safely + @workers.dup + end + + if timeout == -1 + # Wait for threads to finish without force shutdown. + threads.each(&:join) + else + join = ->(inner_timeout) do + start = Process.clock_gettime(Process::CLOCK_MONOTONIC) + threads.reject! do |t| + elapsed = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start + t.join inner_timeout - elapsed + end + end + + # Wait +timeout+ seconds for threads to finish. + join.call(timeout) + + # If threads are still running, raise ForceShutdown and wait to finish. + @shutdown_mutex.synchronize do + @force_shutdown = true + threads.each do |t| + t.raise ForceShutdown if t[:with_force_shutdown] + end + end + join.call(@shutdown_grace_time) + + # If threads are _still_ running, forcefully kill them and wait to finish. + threads.each(&:kill) + join.call(1) + end + + @spawned = 0 + @workers = [] + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/puma/util.rb b/vendor/cache/puma-fba741b91780/lib/puma/util.rb new file mode 100644 index 000000000..a87dc731d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/puma/util.rb @@ -0,0 +1,141 @@ +# frozen_string_literal: true + +require 'uri/common' + +module Puma + module Util + module_function + + def pipe + IO.pipe + end + + # An instance method on Thread has been provided to address https://bugs.ruby-lang.org/issues/13632, + # which currently affects some older versions of Ruby: 2.2.7 2.2.8 2.2.9 2.2.10 2.3.4 2.4.1 + # Additional context: https://github.com/puma/puma/pull/1345 + def purge_interrupt_queue + Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue + end + + # Escapes and unescapes a URI escaped string with + # +encoding+. +encoding+ will be the target encoding of the string + # returned, and it defaults to UTF-8 + if defined?(::Encoding) + def escape(s, encoding = Encoding::UTF_8) + URI.encode_www_form_component(s, encoding) + end + + def unescape(s, encoding = Encoding::UTF_8) + URI.decode_www_form_component(s, encoding) + end + else + def escape(s, encoding = nil) + URI.encode_www_form_component(s, encoding) + end + + def unescape(s, encoding = nil) + URI.decode_www_form_component(s, encoding) + end + end + module_function :unescape, :escape + + DEFAULT_SEP = /[&;] */n + + # Stolen from Mongrel, with some small modifications: + # Parses a query string by breaking it up at the '&' + # and ';' characters. You can also use this to parse + # cookies by changing the characters used in the second + # parameter (which defaults to '&;'). + def parse_query(qs, d = nil, &unescaper) + unescaper ||= method(:unescape) + + params = {} + + (qs || '').split(d ? /[#{d}] */n : DEFAULT_SEP).each do |p| + next if p.empty? + k, v = p.split('=', 2).map(&unescaper) + + if cur = params[k] + if cur.class == Array + params[k] << v + else + params[k] = [cur, v] + end + else + params[k] = v + end + end + + params + end + + # A case-insensitive Hash that preserves the original case of a + # header when set. + class HeaderHash < Hash + def self.new(hash={}) + HeaderHash === hash ? hash : super(hash) + end + + def initialize(hash={}) + super() + @names = {} + hash.each { |k, v| self[k] = v } + end + + def each + super do |k, v| + yield(k, v.respond_to?(:to_ary) ? v.to_ary.join("\n") : v) + end + end + + # @!attribute [r] to_hash + def to_hash + hash = {} + each { |k,v| hash[k] = v } + hash + end + + def [](k) + super(k) || super(@names[k.downcase]) + end + + def []=(k, v) + canonical = k.downcase + delete k if @names[canonical] && @names[canonical] != k # .delete is expensive, don't invoke it unless necessary + @names[k] = @names[canonical] = k + super k, v + end + + def delete(k) + canonical = k.downcase + result = super @names.delete(canonical) + @names.delete_if { |name,| name.downcase == canonical } + result + end + + def include?(k) + @names.include?(k) || @names.include?(k.downcase) + end + + alias_method :has_key?, :include? + alias_method :member?, :include? + alias_method :key?, :include? + + def merge!(other) + other.each { |k, v| self[k] = v } + self + end + + def merge(other) + hash = dup + hash.merge! other + end + + def replace(other) + clear + other.each { |k, v| self[k] = v } + self + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/lib/rack/handler/puma.rb b/vendor/cache/puma-fba741b91780/lib/rack/handler/puma.rb new file mode 100644 index 000000000..7b3ce3db8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/lib/rack/handler/puma.rb @@ -0,0 +1,144 @@ +# frozen_string_literal: true + +module Puma + + # This module is used as an 'include' file in code at bottom of file. It loads + # into either `Rackup::Handler::Puma` or `Rack::Handler::Puma`. + + module RackHandler + DEFAULT_OPTIONS = { + :Verbose => false, + :Silent => false + } + + def config(app, options = {}) + require_relative '../../puma' + require_relative '../../puma/configuration' + require_relative '../../puma/log_writer' + require_relative '../../puma/launcher' + + default_options = DEFAULT_OPTIONS.dup + + # Libraries pass in values such as :Port and there is no way to determine + # if it is a default provided by the library or a special value provided + # by the user. A special key `user_supplied_options` can be passed. This + # contains an array of all explicitly defined user options. We then + # know that all other values are defaults + if user_supplied_options = options.delete(:user_supplied_options) + (options.keys - user_supplied_options).each do |k| + default_options[k] = options.delete(k) + end + end + + @events = options[:events] || ::Puma::Events.new + + conf = ::Puma::Configuration.new(options, default_options.merge({events: @events})) do |user_config, file_config, default_config| + if options.delete(:Verbose) + begin + require 'rack/commonlogger' # Rack 1.x + rescue LoadError + require 'rack/common_logger' # Rack 2 and later + end + app = ::Rack::CommonLogger.new(app, STDOUT) + end + + if options[:environment] + user_config.environment options[:environment] + end + + if options[:Threads] + min, max = options.delete(:Threads).split(':', 2) + user_config.threads min, max + end + + if options[:Host] || options[:Port] + host = options[:Host] || default_options[:Host] + port = options[:Port] || default_options[:Port] + self.set_host_port_to_config(host, port, user_config) + end + + if default_options[:Host] + file_config.set_default_host(default_options[:Host]) + end + self.set_host_port_to_config(default_options[:Host], default_options[:Port], default_config) + + user_config.app app + end + conf + end + + def run(app, **options) + conf = self.config(app, options) + + log_writer = options.delete(:Silent) ? ::Puma::LogWriter.strings : ::Puma::LogWriter.stdio + + launcher = ::Puma::Launcher.new(conf, :log_writer => log_writer, events: @events) + + yield launcher if block_given? + begin + launcher.run + rescue Interrupt + puts "* Gracefully stopping, waiting for requests to finish" + launcher.stop + puts "* Goodbye!" + end + end + + def valid_options + { + "Host=HOST" => "Hostname to listen on (default: localhost)", + "Port=PORT" => "Port to listen on (default: 8080)", + "Threads=MIN:MAX" => "min:max threads to use (default 0:16)", + "Verbose" => "Don't report each request (default: false)" + } + end + + def set_host_port_to_config(host, port, config) + config.clear_binds! if host || port + + if host&.start_with? '.', '/', '@' + config.bind "unix://#{host}" + elsif host&.start_with? 'ssl://' + uri = URI.parse(host) + uri.port ||= port || ::Puma::Configuration::DEFAULTS[:tcp_port] + config.bind uri.to_s + else + + if host + port ||= ::Puma::Configuration::DEFAULTS[:tcp_port] + end + + if port + host ||= ::Puma::Configuration::DEFAULTS[:tcp_host] + config.port port, host + end + end + end + end +end + +# rackup was removed in Rack 3, it is now a separate gem +if Object.const_defined?(:Rackup) && ::Rackup.const_defined?(:Handler) + module Rackup + module Handler + module Puma + class << self + include ::Puma::RackHandler + end + end + register :puma, Puma + end + end +else + do_register = Object.const_defined?(:Rack) && ::Rack.release < '3' + module Rack + module Handler + module Puma + class << self + include ::Puma::RackHandler + end + end + end + end + ::Rack::Handler.register(:puma, ::Rack::Handler::Puma) if do_register +end diff --git a/vendor/cache/puma-fba741b91780/puma.gemspec b/vendor/cache/puma-fba741b91780/puma.gemspec new file mode 100644 index 000000000..78d3ac08d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/puma.gemspec @@ -0,0 +1,36 @@ +# -*- encoding: utf-8 -*- +# stub: puma 6.4.3 ruby lib +# stub: ext/puma_http11/extconf.rb + +Gem::Specification.new do |s| + s.name = "puma".freeze + s.version = "6.4.3" + + s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= + s.metadata = { "bug_tracker_uri" => "https://github.com/puma/puma/issues", "changelog_uri" => "https://github.com/puma/puma/blob/master/History.md", "homepage_uri" => "https://puma.io", "rubygems_mfa_required" => "true", "source_code_uri" => "https://github.com/puma/puma" } if s.respond_to? :metadata= + s.require_paths = ["lib".freeze] + s.authors = ["Evan Phoenix".freeze] + s.date = "2024-11-19" + s.description = "Puma is a simple, fast, multi-threaded, and highly parallel HTTP 1.1 server\nfor Ruby/Rack applications. Puma is intended for use in both development and\nproduction environments. It's great for highly parallel Ruby implementations such as\nJRuby and TruffleRuby as well as as providing process worker support to support CRuby well.\n".freeze + s.email = ["evan@phx.io".freeze] + s.executables = ["puma".freeze, "pumactl".freeze] + s.extensions = ["ext/puma_http11/extconf.rb".freeze] + s.files = ["History.md".freeze, "LICENSE".freeze, "README.md".freeze, "bin/puma".freeze, "bin/puma-wild".freeze, "bin/pumactl".freeze, "docs/architecture.md".freeze, "docs/compile_options.md".freeze, "docs/deployment.md".freeze, "docs/fork_worker.md".freeze, "docs/images/puma-connection-flow-no-reactor.png".freeze, "docs/images/puma-connection-flow.png".freeze, "docs/images/puma-general-arch.png".freeze, "docs/java_options.md".freeze, "docs/jungle/README.md".freeze, "docs/jungle/rc.d/README.md".freeze, "docs/jungle/rc.d/puma".freeze, "docs/jungle/rc.d/puma.conf".freeze, "docs/kubernetes.md".freeze, "docs/nginx.md".freeze, "docs/plugins.md".freeze, "docs/rails_dev_mode.md".freeze, "docs/restart.md".freeze, "docs/signals.md".freeze, "docs/stats.md".freeze, "docs/systemd.md".freeze, "docs/testing_benchmarks_local_files.md".freeze, "docs/testing_test_rackup_ci_files.md".freeze, "ext/puma_http11/PumaHttp11Service.java".freeze, "ext/puma_http11/ext_help.h".freeze, "ext/puma_http11/extconf.rb".freeze, "ext/puma_http11/http11_parser.c".freeze, "ext/puma_http11/http11_parser.h".freeze, "ext/puma_http11/http11_parser.java.rl".freeze, "ext/puma_http11/http11_parser.rl".freeze, "ext/puma_http11/http11_parser_common.rl".freeze, "ext/puma_http11/mini_ssl.c".freeze, "ext/puma_http11/no_ssl/PumaHttp11Service.java".freeze, "ext/puma_http11/org/jruby/puma/Http11.java".freeze, "ext/puma_http11/org/jruby/puma/Http11Parser.java".freeze, "ext/puma_http11/org/jruby/puma/MiniSSL.java".freeze, "ext/puma_http11/puma_http11.c".freeze, "lib/puma.rb".freeze, "lib/puma/app/status.rb".freeze, "lib/puma/binder.rb".freeze, "lib/puma/cli.rb".freeze, "lib/puma/client.rb".freeze, "lib/puma/cluster.rb".freeze, "lib/puma/cluster/worker.rb".freeze, "lib/puma/cluster/worker_handle.rb".freeze, "lib/puma/commonlogger.rb".freeze, "lib/puma/configuration.rb".freeze, "lib/puma/const.rb".freeze, "lib/puma/control_cli.rb".freeze, "lib/puma/detect.rb".freeze, "lib/puma/dsl.rb".freeze, "lib/puma/error_logger.rb".freeze, "lib/puma/events.rb".freeze, "lib/puma/io_buffer.rb".freeze, "lib/puma/jruby_restart.rb".freeze, "lib/puma/json_serialization.rb".freeze, "lib/puma/launcher.rb".freeze, "lib/puma/launcher/bundle_pruner.rb".freeze, "lib/puma/log_writer.rb".freeze, "lib/puma/minissl.rb".freeze, "lib/puma/minissl/context_builder.rb".freeze, "lib/puma/null_io.rb".freeze, "lib/puma/plugin.rb".freeze, "lib/puma/plugin/systemd.rb".freeze, "lib/puma/plugin/tmp_restart.rb".freeze, "lib/puma/rack/builder.rb".freeze, "lib/puma/rack/urlmap.rb".freeze, "lib/puma/rack_default.rb".freeze, "lib/puma/reactor.rb".freeze, "lib/puma/request.rb".freeze, "lib/puma/runner.rb".freeze, "lib/puma/sd_notify.rb".freeze, "lib/puma/server.rb".freeze, "lib/puma/single.rb".freeze, "lib/puma/state_file.rb".freeze, "lib/puma/thread_pool.rb".freeze, "lib/puma/util.rb".freeze, "lib/rack/handler/puma.rb".freeze, "tools/Dockerfile".freeze, "tools/trickletest.rb".freeze] + s.homepage = "https://puma.io".freeze + s.licenses = ["BSD-3-Clause".freeze] + s.required_ruby_version = Gem::Requirement.new(">= 2.4".freeze) + s.rubygems_version = "3.3.7".freeze + s.summary = "A Ruby/Rack web server built for parallelism.".freeze + + s.installed_by_version = "3.3.7" if s.respond_to? :installed_by_version + + if s.respond_to? :specification_version then + s.specification_version = 4 + end + + if s.respond_to? :add_runtime_dependency then + s.add_runtime_dependency(%q.freeze, ["~> 2.0"]) + else + s.add_dependency(%q.freeze, ["~> 2.0"]) + end +end diff --git a/vendor/cache/puma-fba741b91780/test/bundle_app_config_test/Gemfile b/vendor/cache/puma-fba741b91780/test/bundle_app_config_test/Gemfile new file mode 100644 index 000000000..e0695ee62 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_app_config_test/Gemfile @@ -0,0 +1 @@ +gem 'puma', path: '../..' diff --git a/vendor/cache/puma-fba741b91780/test/bundle_app_config_test/config.ru b/vendor/cache/puma-fba741b91780/test/bundle_app_config_test/config.ru new file mode 100644 index 000000000..deb8fc822 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_app_config_test/config.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello World"]] } diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/.gitignore b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/.gitignore new file mode 100644 index 000000000..53c647135 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/.gitignore @@ -0,0 +1 @@ +Gemfile.bundle_env_preservation_test.lock diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/Gemfile.bundle_env_preservation_test b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/Gemfile.bundle_env_preservation_test new file mode 100644 index 000000000..e0695ee62 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/Gemfile.bundle_env_preservation_test @@ -0,0 +1 @@ +gem 'puma', path: '../..' diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/config.ru b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/config.ru new file mode 100644 index 000000000..1f0f2cc61 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/config.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [ENV['BUNDLE_GEMFILE'].inspect]] } diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/Gemfile b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/Gemfile new file mode 100644 index 000000000..e1437de3f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/Gemfile @@ -0,0 +1 @@ +gem 'puma', path: '../../..' diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/config.ru b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/config.ru new file mode 100644 index 000000000..1f0f2cc61 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/config.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [ENV['BUNDLE_GEMFILE'].inspect]] } diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/config/puma.rb b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/config/puma.rb new file mode 100644 index 000000000..ab019ff13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version1/config/puma.rb @@ -0,0 +1 @@ +directory File.expand_path("../../current", __dir__) diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/Gemfile b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/Gemfile new file mode 100644 index 000000000..e1437de3f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/Gemfile @@ -0,0 +1 @@ +gem 'puma', path: '../../..' diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/config.ru b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/config.ru new file mode 100644 index 000000000..1f0f2cc61 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/config.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [ENV['BUNDLE_GEMFILE'].inspect]] } diff --git a/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/config/puma.rb b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/config/puma.rb new file mode 100644 index 000000000..ab019ff13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/bundle_preservation_test/version2/config/puma.rb @@ -0,0 +1 @@ +directory File.expand_path("../../current", __dir__) diff --git a/vendor/cache/puma-fba741b91780/test/config/ab_rs.rb b/vendor/cache/puma-fba741b91780/test/config/ab_rs.rb new file mode 100644 index 000000000..8ba59e618 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/ab_rs.rb @@ -0,0 +1,22 @@ +url = ARGV.shift +count = (ARGV.shift || 1000).to_i + +STDOUT.sync = true + +1.upto(5) do |i| + print "#{i}: " + str = `ab -n #{count} -c #{i} #{url} 2>/dev/null` + + rs = /Requests per second:\s+([\d.]+)\s/.match(str) + puts rs[1] +end + +puts "Keep Alive:" + +1.upto(5) do |i| + print "#{i}: " + str = `ab -n #{count} -k -c #{i} #{url} 2>/dev/null` + + rs = /Requests per second:\s+([\d.]+)\s/.match(str) + puts rs[1] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/app.rb b/vendor/cache/puma-fba741b91780/test/config/app.rb new file mode 100644 index 000000000..55f72f156 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/app.rb @@ -0,0 +1,9 @@ +port ENV.fetch('PORT', 0) + +app do |env| + [200, {}, ["embedded app"]] +end + +lowlevel_error_handler do |err| + [200, {}, ["error page"]] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/control_no_token.rb b/vendor/cache/puma-fba741b91780/test/config/control_no_token.rb new file mode 100644 index 000000000..eb63d8725 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/control_no_token.rb @@ -0,0 +1,5 @@ +activate_control_app 'unix:///tmp/pumactl.sock', { no_token: true } + +app do |env| + [200, {}, ["embedded app"]] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/cpu_spin.rb b/vendor/cache/puma-fba741b91780/test/config/cpu_spin.rb new file mode 100644 index 000000000..cab7ce2ae --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/cpu_spin.rb @@ -0,0 +1,17 @@ +# call with "GET /cpu/ HTTP/1.1\r\n\r\n", +# where is the number of iterations + +require 'benchmark' + +# configure `wait_for_less_busy_workers` based on ENV, default `true` +wait_for_less_busy_worker ENV.fetch('PUMA_WAIT_FOR_LESS_BUSY_WORKERS', '0.005').to_f + +app do |env| + iterations = (env['REQUEST_PATH'][/\/cpu\/(\d.*)/,1] || '1000').to_i + + duration = Benchmark.measure do + iterations.times { rand } + end + + [200, {"Content-Type" => "text/plain"}, ["Run for #{duration.total} #{Process.pid}"]] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/custom_log_formatter.rb b/vendor/cache/puma-fba741b91780/test/config/custom_log_formatter.rb new file mode 100644 index 000000000..234748b37 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/custom_log_formatter.rb @@ -0,0 +1,3 @@ +log_formatter do |str| + "[#{Process.pid}] [#{Socket.gethostname}] #{Time.now}: #{str}" +end diff --git a/vendor/cache/puma-fba741b91780/test/config/custom_logger.rb b/vendor/cache/puma-fba741b91780/test/config/custom_logger.rb new file mode 100644 index 000000000..075972f68 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/custom_logger.rb @@ -0,0 +1,13 @@ +class CustomLogger + def initialize(output=STDOUT) + @output = output + end + + def write(msg) + @output.puts 'Custom logging: ' + msg + @output.flush + end +end + +log_requests +custom_logger CustomLogger.new(STDOUT) diff --git a/vendor/cache/puma-fba741b91780/test/config/event_on_booted_and_on_stopped.rb b/vendor/cache/puma-fba741b91780/test/config/event_on_booted_and_on_stopped.rb new file mode 100644 index 000000000..9dd590136 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/event_on_booted_and_on_stopped.rb @@ -0,0 +1,7 @@ +on_booted do + puts "on_booted called" +end + +on_stopped do + puts "on_stopped called" +end diff --git a/vendor/cache/puma-fba741b91780/test/config/event_on_booted_exit.rb b/vendor/cache/puma-fba741b91780/test/config/event_on_booted_exit.rb new file mode 100644 index 000000000..de17e9741 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/event_on_booted_exit.rb @@ -0,0 +1,12 @@ +on_booted do + pid = Process.pid + begin + Process.kill :TERM, pid + rescue Errno::ESRCH + end + + begin + Process.wait2 pid + rescue Errno::ECHILD + end +end diff --git a/vendor/cache/puma-fba741b91780/test/config/hook_data.rb b/vendor/cache/puma-fba741b91780/test/config/hook_data.rb new file mode 100644 index 000000000..318e1c21d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/hook_data.rb @@ -0,0 +1,9 @@ +workers 2 + +on_worker_boot(:test) do |index, data| + data[:test] = index +end + +on_worker_shutdown(:test) do |index, data| + STDOUT.syswrite "\nindex #{index} data #{data[:test]}" +end diff --git a/vendor/cache/puma-fba741b91780/test/config/process_detach_before_fork.rb b/vendor/cache/puma-fba741b91780/test/config/process_detach_before_fork.rb new file mode 100644 index 000000000..c61a45331 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/process_detach_before_fork.rb @@ -0,0 +1,11 @@ +worker_shutdown_timeout 0 + +before_fork do + pid = fork do + sleep 30 # This has to exceed the test timeout + end + + pid_filename = File.join(Dir.tmpdir, 'process_detach_test.pid') + File.write(pid_filename, pid) + Process.detach(pid) +end diff --git a/vendor/cache/puma-fba741b91780/test/config/prune_bundler_print_json_defined.rb b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_print_json_defined.rb new file mode 100644 index 000000000..dcef22c03 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_print_json_defined.rb @@ -0,0 +1,4 @@ +prune_bundler true +before_fork do + puts "defined?(::JSON): #{defined?(::JSON).inspect}" +end diff --git a/vendor/cache/puma-fba741b91780/test/config/prune_bundler_print_nio_defined.rb b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_print_nio_defined.rb new file mode 100644 index 000000000..8e2b65dd3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_print_nio_defined.rb @@ -0,0 +1,4 @@ +prune_bundler true +before_fork do + puts "defined?(::NIO): #{defined?(::NIO).inspect}" +end diff --git a/vendor/cache/puma-fba741b91780/test/config/prune_bundler_with_deps.rb b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_with_deps.rb new file mode 100644 index 000000000..5535df654 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_with_deps.rb @@ -0,0 +1,7 @@ +prune_bundler true +extra_runtime_dependencies ["minitest"] +before_fork do + $LOAD_PATH.each do |path| + puts "LOAD_PATH: #{path}" + end +end diff --git a/vendor/cache/puma-fba741b91780/test/config/prune_bundler_with_multiple_workers.rb b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_with_multiple_workers.rb new file mode 100644 index 000000000..a93043f9a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/prune_bundler_with_multiple_workers.rb @@ -0,0 +1,14 @@ +require 'bundler/setup' +Bundler.setup + +prune_bundler true + +workers 2 + +app do |env| + [200, {}, ["embedded app"]] +end + +lowlevel_error_handler do |err| + [200, {}, ["error page"]] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/rack_url_scheme.rb b/vendor/cache/puma-fba741b91780/test/config/rack_url_scheme.rb new file mode 100644 index 000000000..7c2aec35a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/rack_url_scheme.rb @@ -0,0 +1 @@ +rack_url_scheme "https" diff --git a/vendor/cache/puma-fba741b91780/test/config/settings.rb b/vendor/cache/puma-fba741b91780/test/config/settings.rb new file mode 100644 index 000000000..4c2b8f229 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/settings.rb @@ -0,0 +1,2 @@ +port 3000 +threads 3, 5 diff --git a/vendor/cache/puma-fba741b91780/test/config/ssl_config.rb b/vendor/cache/puma-fba741b91780/test/config/ssl_config.rb new file mode 100644 index 000000000..efc2c7a37 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/ssl_config.rb @@ -0,0 +1,13 @@ +key = File.expand_path "../../../examples/puma/puma_keypair.pem", __FILE__ +cert = File.expand_path "../../../examples/puma/cert_puma.pem", __FILE__ +ca = File.expand_path "../../../examples/puma/client_certs/ca.crt", __FILE__ + +ssl_bind "0.0.0.0", 9292, :cert => cert, :key => key, :verify_mode => "peer", :ca => ca + +app do |env| + [200, {}, ["embedded app"]] +end + +lowlevel_error_handler do |err| + [200, {}, ["error page"]] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/ssl_self_signed_config.rb b/vendor/cache/puma-fba741b91780/test/config/ssl_self_signed_config.rb new file mode 100644 index 000000000..d70b51d16 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/ssl_self_signed_config.rb @@ -0,0 +1,7 @@ +require "localhost" + +ssl_bind "0.0.0.0", 9292 + +app do |env| + [200, {}, ["self-signed certificate app"]] +end diff --git a/vendor/cache/puma-fba741b91780/test/config/state_file_testing_config.rb b/vendor/cache/puma-fba741b91780/test/config/state_file_testing_config.rb new file mode 100644 index 000000000..a920fb0dc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/state_file_testing_config.rb @@ -0,0 +1,13 @@ +pidfile "t3-pid" +workers 3 +on_worker_boot do |index| + File.open("t3-worker-#{index}-pid", "w") { |f| f.puts Process.pid } +end + +before_fork { 1 } +on_worker_shutdown { 1 } +on_worker_boot { 1 } +on_worker_fork { 1 } +on_restart { 1 } +after_worker_boot { 1 } +lowlevel_error_handler { 1 } diff --git a/vendor/cache/puma-fba741b91780/test/config/suppress_exception.rb b/vendor/cache/puma-fba741b91780/test/config/suppress_exception.rb new file mode 100644 index 000000000..28caf503c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/suppress_exception.rb @@ -0,0 +1 @@ +raise_exception_on_sigterm false diff --git a/vendor/cache/puma-fba741b91780/test/config/t1_conf.rb b/vendor/cache/puma-fba741b91780/test/config/t1_conf.rb new file mode 100644 index 000000000..cae5662c7 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/t1_conf.rb @@ -0,0 +1,3 @@ +log_requests +stdout_redirect "t1-stdout" +pidfile "t1-pid" diff --git a/vendor/cache/puma-fba741b91780/test/config/t2_conf.rb b/vendor/cache/puma-fba741b91780/test/config/t2_conf.rb new file mode 100644 index 000000000..35533397c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/t2_conf.rb @@ -0,0 +1,3 @@ +log_requests +stdout_redirect "t2-stdout" +pidfile "t2-pid" diff --git a/vendor/cache/puma-fba741b91780/test/config/t3_conf.rb b/vendor/cache/puma-fba741b91780/test/config/t3_conf.rb new file mode 100644 index 000000000..1be95ce21 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/t3_conf.rb @@ -0,0 +1,5 @@ +pidfile "t3-pid" +workers 3 +on_worker_boot do |index| + File.open("t3-worker-#{index}-pid", "w") { |f| f.puts Process.pid } +end diff --git a/vendor/cache/puma-fba741b91780/test/config/with_float_convert.rb b/vendor/cache/puma-fba741b91780/test/config/with_float_convert.rb new file mode 100644 index 000000000..c1be13f86 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/with_float_convert.rb @@ -0,0 +1 @@ +max_fast_inline Float::INFINITY diff --git a/vendor/cache/puma-fba741b91780/test/config/with_integer_convert.rb b/vendor/cache/puma-fba741b91780/test/config/with_integer_convert.rb new file mode 100644 index 000000000..2ad83bc1f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/with_integer_convert.rb @@ -0,0 +1,9 @@ +persistent_timeout "6" +first_data_timeout "3" + +workers "2" +threads "4", "8" + +worker_timeout "90" +worker_boot_timeout "120" +worker_shutdown_timeout "150" diff --git a/vendor/cache/puma-fba741b91780/test/config/with_rackup_from_dsl.rb b/vendor/cache/puma-fba741b91780/test/config/with_rackup_from_dsl.rb new file mode 100644 index 000000000..3c310d877 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/with_rackup_from_dsl.rb @@ -0,0 +1 @@ +rackup "test/rackup/hello-env.ru" diff --git a/vendor/cache/puma-fba741b91780/test/config/with_symbol_convert.rb b/vendor/cache/puma-fba741b91780/test/config/with_symbol_convert.rb new file mode 100644 index 000000000..9162a5c22 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/with_symbol_convert.rb @@ -0,0 +1 @@ +io_selector_backend :ruby diff --git a/vendor/cache/puma-fba741b91780/test/config/worker_shutdown_timeout_2.rb b/vendor/cache/puma-fba741b91780/test/config/worker_shutdown_timeout_2.rb new file mode 100644 index 000000000..ddb8c6453 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/config/worker_shutdown_timeout_2.rb @@ -0,0 +1 @@ +worker_shutdown_timeout 2 diff --git a/vendor/cache/puma-fba741b91780/test/helper.rb b/vendor/cache/puma-fba741b91780/test/helper.rb new file mode 100644 index 000000000..645af2984 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helper.rb @@ -0,0 +1,374 @@ +# frozen_string_literal: true +# Copyright (c) 2011 Evan Phoenix +# Copyright (c) 2005 Zed A. Shaw + +if RUBY_VERSION == '2.4.1' + begin + require 'stopgap_13632' + rescue LoadError + puts "For test stability, you must install the stopgap_13632 gem." + exit(1) + end +end + +require "securerandom" + +# needs to be loaded before minitest for Ruby 2.7 and earlier +require_relative "helpers/test_puma/assertions" + +require_relative "minitest/verbose" +require "minitest/autorun" +require "minitest/pride" +require "minitest/proveit" +require "minitest/stub_const" +require "net/http" +require_relative "helpers/apps" + +Thread.abort_on_exception = true + +$debugging_info = [] +$debugging_hold = false # needed for TestCLI#test_control_clustered +$test_case_timeout = ENV.fetch("TEST_CASE_TIMEOUT") do + RUBY_ENGINE == "ruby" ? 45 : 60 +end.to_i + +require "puma" +require "puma/detect" + +unless ::Puma::HAS_NATIVE_IO_WAIT + require "io/wait" +end + +# used in various ssl test files, see test_puma_server_ssl.rb and +# test_puma_localhost_authority.rb +if Puma::HAS_SSL + require 'puma/log_writer' + class SSLLogWriterHelper < ::Puma::LogWriter + attr_accessor :addr, :cert, :error + + def ssl_error(error, ssl_socket) + self.error = error + self.addr = ssl_socket.peeraddr.last rescue "" + self.cert = ssl_socket.peercert + end + end +end + +# Either takes a string to do a get request against, or a tuple of [URI, HTTP] where +# HTTP is some kind of Net::HTTP request object (POST, HEAD, etc.) +def hit(uris) + uris.map do |u| + response = + if u.kind_of? String + Net::HTTP.get(URI.parse(u)) + else + url = URI.parse(u[0]) + Net::HTTP.new(url.host, url.port).start {|h| h.request(u[1]) } + end + + assert response, "Didn't get a response: #{u}" + response + end +end + +module UniquePort + def self.call(host = '127.0.0.1') + TCPServer.open(host, 0) do |server| + server.connect_address.ip_port + end + end +end + +require "timeout" + +if Minitest::VERSION < '5.25' + module TimeoutEveryTestCase + # our own subclass so we never confuse different timeouts + class TestTookTooLong < Timeout::Error + end + + def run + with_info_handler do + time_it do + capture_exceptions do + ::Timeout.timeout($test_case_timeout, TestTookTooLong) do + before_setup; setup; after_setup + self.send self.name + end + end + + capture_exceptions do + ::Timeout.timeout($test_case_timeout, TestTookTooLong) do + Minitest::Test::TEARDOWN_METHODS.each { |hook| self.send hook } + end + end + if respond_to? :clean_tmp_paths + clean_tmp_paths + end + end + end + + Minitest::Result.from self # per contract + end + end +else + module TimeoutEveryTestCase + # our own subclass so we never confuse different timeouts + class TestTookTooLong < Timeout::Error + end + + def run + time_it do + capture_exceptions do + ::Timeout.timeout($test_case_timeout, TestTookTooLong) do + Minitest::Test::SETUP_METHODS.each { |hook| self.send hook } + self.send self.name + end + end + + capture_exceptions do + ::Timeout.timeout($test_case_timeout, TestTookTooLong) do + Minitest::Test::TEARDOWN_METHODS.each { |hook| self.send hook } + end + end + if respond_to? :clean_tmp_paths + clean_tmp_paths + end + end + + Minitest::Result.from self # per contract + end + end +end + +Minitest::Test.prepend TimeoutEveryTestCase + +if ENV['CI'] + require 'minitest/retry' + + SUMMARY_FILE = ENV['GITHUB_STEP_SUMMARY'] + + Minitest::Retry.use! + + if SUMMARY_FILE && ENV['GITHUB_ACTIONS'] == 'true' + + GITHUB_STEP_SUMMARY_MUTEX = Mutex.new + + Minitest::Retry.on_failure do |klass, test_name, result| + full_method = "#{klass}##{test_name}" + result_str = result.to_s.gsub(/#{full_method}:?\s*/, '').dup + result_str.gsub!(/\A(Failure:|Error:)\s/, '\1 ') + issue = result_str[/\A[^\n]+/] + result_str.gsub!(issue, '') + # shorten directory lists + result_str.gsub! ENV['GITHUB_WORKSPACE'], 'puma' + result_str.gsub! ENV['RUNNER_TOOL_CACHE'], '' + # remove indent + result_str.gsub!(/^ +/, '') + str = "\n**#{full_method}**\n**#{issue}**\n```\n#{result_str.strip}\n```\n" + GITHUB_STEP_SUMMARY_MUTEX.synchronize { + File.write SUMMARY_FILE, str, mode: 'a+' + } + end + end +end + +module TestSkips + + HAS_FORK = ::Process.respond_to? :fork + UNIX_SKT_EXIST = Object.const_defined?(:UNIXSocket) && !Puma::IS_WINDOWS + + MSG_FORK = "Kernel.fork isn't available on #{RUBY_ENGINE} on #{RUBY_PLATFORM}" + MSG_UNIX = "UNIXSockets aren't available on the #{RUBY_PLATFORM} platform" + MSG_AUNIX = "Abstract UNIXSockets aren't available on the #{RUBY_PLATFORM} platform" + + SIGNAL_LIST = Signal.list.keys.map(&:to_sym) - (Puma.windows? ? [:INT, :TERM] : []) + + JRUBY_HEAD = Puma::IS_JRUBY && RUBY_DESCRIPTION.include?('SNAPSHOT') + + DARWIN = RUBY_PLATFORM.include? 'darwin' + + TRUFFLE = RUBY_ENGINE == 'truffleruby' + TRUFFLE_HEAD = TRUFFLE && RUBY_DESCRIPTION.include?('-dev-') + + # usage: skip_unless_signal_exist? :USR2 + def skip_unless_signal_exist?(sig, bt: caller) + signal = sig.to_s.sub(/\ASIG/, '').to_sym + unless SIGNAL_LIST.include? signal + skip "Signal #{signal} isn't available on the #{RUBY_PLATFORM} platform", bt + end + end + + # called with one or more params, like skip_if :jruby, :windows + # optional suffix kwarg is appended to the skip message + # optional suffix bt should generally not used + def skip_if(*engs, suffix: '', bt: caller) + engs.each do |eng| + skip_msg = case eng + when :linux then "Skipped if Linux#{suffix}" if Puma::IS_LINUX + when :darwin then "Skipped if darwin#{suffix}" if Puma::IS_OSX + when :jruby then "Skipped if JRuby#{suffix}" if Puma::IS_JRUBY + when :truffleruby then "Skipped if TruffleRuby#{suffix}" if TRUFFLE + when :windows then "Skipped if Windows#{suffix}" if Puma::IS_WINDOWS + when :ci then "Skipped if ENV['CI']#{suffix}" if ENV['CI'] + when :no_bundler then "Skipped w/o Bundler#{suffix}" if !defined?(Bundler) + when :ssl then "Skipped if SSL is supported" if Puma::HAS_SSL + when :fork then "Skipped if Kernel.fork exists" if HAS_FORK + when :unix then "Skipped if UNIXSocket exists" if Puma::HAS_UNIX_SOCKET + when :aunix then "Skipped if abstract UNIXSocket" if Puma.abstract_unix_socket? + when :rack3 then "Skipped if Rack 3.x" if Rack.release >= '3' + else false + end + skip skip_msg, bt if skip_msg + end + end + + # called with only one param + def skip_unless(eng, bt: caller) + skip_msg = case eng + when :linux then "Skip unless Linux" unless Puma::IS_LINUX + when :darwin then "Skip unless darwin" unless Puma::IS_OSX + when :jruby then "Skip unless JRuby" unless Puma::IS_JRUBY + when :windows then "Skip unless Windows" unless Puma::IS_WINDOWS + when :mri then "Skip unless MRI" unless Puma::IS_MRI + when :ssl then "Skip unless SSL is supported" unless Puma::HAS_SSL + when :fork then MSG_FORK unless HAS_FORK + when :unix then MSG_UNIX unless Puma::HAS_UNIX_SOCKET + when :aunix then MSG_AUNIX unless Puma.abstract_unix_socket? + when :rack3 then "Skipped unless Rack >= 3.x" unless ::Rack.release >= '3' + else false + end + skip skip_msg, bt if skip_msg + end +end + +Minitest::Test.include TestSkips + +class Minitest::Test + + PROJECT_ROOT = File.dirname(__dir__) + + def self.run(reporter, options = {}) # :nodoc: + prove_it! + super + end + + def full_name + "#{self.class.name}##{name}" + end +end + +Minitest.after_run do + # needed for TestCLI#test_control_clustered + if !$debugging_hold && ENV['PUMA_TEST_DEBUG'] + $debugging_info.sort! + out = $debugging_info.join.strip + unless out.empty? + dash = "\u2500" + wid = ENV['GITHUB_ACTIONS'] ? 88 : 90 + txt = " Debugging Info #{dash * 2}".rjust wid, dash + if ENV['GITHUB_ACTIONS'] + puts "", "##[group]#{txt}", out, dash * wid, '', '::[endgroup]' + else + puts "", txt, out, dash * wid, '' + end + end + end +end + +module AggregatedResults + def aggregated_results(io) + is_github_actions = ENV['GITHUB_ACTIONS'] == 'true' + filtered_results = results.dup + + if options[:verbose] + skips = filtered_results.select(&:skipped?) + unless skips.empty? + dash = "\u2500" + if is_github_actions + puts "", "##[group]Skips:" + else + io.puts '', 'Skips:' + end + hsh = skips.group_by { |f| f.failures.first.error.message } + hsh_s = {} + hsh.each { |k, ary| + hsh_s[k] = ary.map { |s| + [s.source_location, s.klass, s.name] + }.sort_by(&:first) + } + num = 0 + hsh_s = hsh_s.sort.to_h + hsh_s.each { |k,v| + io.puts " #{k} #{dash * 2}".rjust 90, dash + hsh_1 = v.group_by { |i| i.first.first } + hsh_1.each { |k1,v1| + io.puts " #{k1[/\/test\/(.*)/,1]}" + v1.each { |item| + num += 1 + io.puts format(" %3s %-5s #{item[1]} #{item[2]}", "#{num})", ":#{item[0][1]}") + } + puts '' + } + } + puts '::[endgroup]' if is_github_actions + end + end + + filtered_results.reject!(&:skipped?) + + io.puts "Errors & Failures:" unless filtered_results.empty? + + filtered_results.each_with_index { |result, i| + io.puts "\n%3d) %s" % [i+1, result] + } + io.puts + io + end +end +Minitest::SummaryReporter.prepend AggregatedResults + +module TestTempFile + require "tempfile" + def tempfile_create(basename, data, mode: File::BINARY) + fio = Tempfile.create(basename, mode: mode) + fio.write data + fio.flush + fio.rewind + @ios << fio if defined?(@ios) + @ios_to_close << fio if defined?(@ios_to_close) + fio + end +end +Minitest::Test.include TestTempFile + +# This module is modified based on https://github.com/rails/rails/blob/7-1-stable/activesupport/lib/active_support/testing/method_call_assertions.rb +module MethodCallAssertions + def assert_called_on_instance_of(klass, method_name, message = nil, times: 1, returns: nil) + times_called = 0 + klass.send(:define_method, :"stubbed_#{method_name}") do |*| + times_called += 1 + + returns + end + + klass.send(:alias_method, :"original_#{method_name}", method_name) + klass.send(:alias_method, method_name, :"stubbed_#{method_name}") + + yield + + error = "Expected #{method_name} to be called #{times} times, but was called #{times_called} times" + error = "#{message}.\n#{error}" if message + + assert_equal times, times_called, error + ensure + klass.send(:alias_method, method_name, :"original_#{method_name}") + klass.send(:undef_method, :"original_#{method_name}") + klass.send(:undef_method, :"stubbed_#{method_name}") + end + + def assert_not_called_on_instance_of(klass, method_name, message = nil, &block) + assert_called_on_instance_of(klass, method_name, message, times: 0, &block) + end +end +Minitest::Test.include MethodCallAssertions diff --git a/vendor/cache/puma-fba741b91780/test/helpers/apps.rb b/vendor/cache/puma-fba741b91780/test/helpers/apps.rb new file mode 100644 index 000000000..f74e3eb09 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/apps.rb @@ -0,0 +1,12 @@ +module TestApps + + # call with "GET /sleep HTTP/1.1\r\n\r\n", where is the number of + # seconds to sleep + # same as rackup/sleep.ru + SLEEP = -> (env) do + dly = (env['REQUEST_PATH'][/\/sleep(\d+)/,1] || '0').to_i + sleep dly + [200, {"Content-Type" => "text/plain"}, ["Slept #{dly}"]] + end + +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/integration.rb b/vendor/cache/puma-fba741b91780/test/helpers/integration.rb new file mode 100644 index 000000000..0c77cec19 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/integration.rb @@ -0,0 +1,546 @@ +# frozen_string_literal: true + +require "puma/control_cli" +require "json" +require "open3" +require_relative 'tmp_path' + +# Only single mode tests go here. Cluster and pumactl tests +# have their own files, use those instead +class TestIntegration < Minitest::Test + include TmpPath + HOST = "127.0.0.1" + TOKEN = "xxyyzz" + RESP_READ_LEN = 65_536 + RESP_READ_TIMEOUT = 10 + RESP_SPLIT = "\r\n\r\n" + + # used in wait_for_server_to_* methods + LOG_TIMEOUT = Puma::IS_JRUBY ? 20 : 10 + LOG_WAIT_READ = Puma::IS_JRUBY ? 5 : 2 + LOG_ERROR_SLEEP = 0.2 + LOG_ERROR_QTY = 5 + + # rubyopt requires bundler/setup, so we don't need it here + BASE = "#{Gem.ruby} -Ilib" + + def setup + @server = nil + @config_file = nil + @server_log = +'' + @pid = nil + @ios_to_close = [] + @bind_path = tmp_path('.sock') + end + + def teardown + if @server && defined?(@control_tcp_port) && Puma.windows? + cli_pumactl 'stop' + elsif @server && @pid && !Puma.windows? + stop_server @pid, signal: :INT + end + + @ios_to_close&.each do |io| + begin + io.close if io.respond_to?(:close) && !io.closed? + rescue + ensure + io = nil + end + end + + if @bind_path + refute File.exist?(@bind_path), "Bind path must be removed after stop" + File.unlink(@bind_path) rescue nil + end + + # wait until the end for OS buffering? + if @server + begin + @server.close unless @server.closed? + rescue + ensure + @server = nil + + if @config_file + File.unlink(@config_file.path) rescue nil + @config_file = nil + end + end + end + end + + private + + def silent_and_checked_system_command(*args) + assert(system(*args, out: File::NULL, err: File::NULL)) + end + + def with_unbundled_env + bundler_ver = Gem::Version.new(Bundler::VERSION) + if bundler_ver < Gem::Version.new('2.1.0') + Bundler.with_clean_env { yield } + else + Bundler.with_unbundled_env { yield } + end + end + + def cli_server(argv, # rubocop:disable Metrics/ParameterLists + unix: false, # uses a UNIXSocket for the server listener when true + config: nil, # string to use for config file + no_bind: nil, # bind is defined by args passed or config file + merge_err: false, # merge STDERR into STDOUT + log: false, # output server log to console (for debugging) + no_wait: false, # don't wait for server to boot + puma_debug: nil, # set env['PUMA_DEBUG'] = 'true' + env: {}) # pass env setting to Puma process in IO.popen + + if config + @config_file = Tempfile.create(%w(config .rb)) + @config_file.syswrite config + # not supported on some OS's, all GitHub Actions OS's support it + @config_file.fsync rescue nil + @config_file.close + config = "-C #{@config_file.path}" + end + + puma_path = File.expand_path '../../../bin/puma', __FILE__ + + cmd = + if no_bind + "#{BASE} #{puma_path} #{config} #{argv}" + elsif unix + "#{BASE} #{puma_path} #{config} -b unix://#{@bind_path} #{argv}" + else + @tcp_port = UniquePort.call + @bind_port = @tcp_port + "#{BASE} #{puma_path} #{config} -b tcp://#{HOST}:#{@tcp_port} #{argv}" + end + + env['PUMA_DEBUG'] = 'true' if puma_debug + + STDOUT.syswrite "\n#{full_name}\n #{cmd}\n" if log + + if merge_err + @server = IO.popen(env, cmd, :err=>[:child, :out]) + else + @server = IO.popen(env, cmd) + end + @pid = @server.pid + wait_for_server_to_boot(log: log) unless no_wait + @server + end + + # rescue statements are just in case method is called with a server + # that is already stopped/killed, especially since Process.wait2 is + # blocking + def stop_server(pid = @pid, signal: :TERM) + begin + Process.kill signal, pid + rescue Errno::ESRCH + end + begin + Process.wait2 pid + rescue Errno::ECHILD + end + end + + # Most integration tests do not stop/shutdown the server, which is handled by + # `teardown` in this file. + # For tests that do stop/shutdown the server, use this method to check with `wait2`, + # and also clear variables so `teardown` will not run its code. + def wait_server(exit_code = 0, pid: @pid) + return unless pid + begin + _, status = Process.wait2 pid + assert_equal exit_code, status + rescue Errno::ECHILD # raised on Windows ? + end + ensure + @server.close unless @server.closed? + @server = nil + end + + def restart_server_and_listen(argv, env: {}, log: false) + cli_server argv, env: env, log: log + connection = connect + initial_reply = read_body(connection) + restart_server connection, log: log + [initial_reply, read_body(connect)] + end + + # reuses an existing connection to make sure that works + def restart_server(connection, log: false) + Process.kill :USR2, @pid + wait_for_server_to_include 'Restarting', log: log + connection.write "GET / HTTP/1.1\r\n\r\n" # trigger it to start by sending a new request + wait_for_server_to_boot log: log + end + + # wait for server to say it booted + # @server and/or @server.gets may be nil on slow CI systems + def wait_for_server_to_boot(timeout: LOG_TIMEOUT, log: false) + @puma_pid = wait_for_server_to_match(/(?:Master| ) PID: (\d+)$/, 1, timeout: timeout, log: log)&.to_i + @pid = @puma_pid if @pid != @puma_pid + wait_for_server_to_include 'Ctrl-C', timeout: timeout, log: log + end + + # Returns true if and when server log includes str. Will timeout otherwise. + def wait_for_server_to_include(str, timeout: LOG_TIMEOUT, log: false) + time_timeout = Process.clock_gettime(Process::CLOCK_MONOTONIC) + timeout + line = '' + + puts "\n——— #{full_name} waiting for '#{str}'" if log + line = server_gets(str, time_timeout, log: log) until line&.include?(str) + true + end + + # Returns line if and when server log matches re, unless idx is specified, + # then returns regex match. Will timeout otherwise. + def wait_for_server_to_match(re, idx = nil, timeout: LOG_TIMEOUT, log: false) + time_timeout = Process.clock_gettime(Process::CLOCK_MONOTONIC) + timeout + line = '' + + puts "\n——— #{full_name} waiting for '#{re.inspect}'" if log + line = server_gets(re, time_timeout, log: log) until line&.match?(re) + idx ? line[re, idx] : line + end + + def server_gets(match_obj, time_timeout, log: false) + error_retries = 0 + line = '' + + sleep 0.05 until @server.is_a?(IO) || Process.clock_gettime(Process::CLOCK_MONOTONIC) > time_timeout + + raise Minitest::Assertion, "@server is not an IO" unless @server.is_a?(IO) + if Process.clock_gettime(Process::CLOCK_MONOTONIC) > time_timeout + raise Minitest::Assertion, "Timeout waiting for server to log #{match_obj.inspect}" + end + + begin + if @server.wait_readable(LOG_WAIT_READ) and line = @server&.gets + @server_log << line + puts " #{line}" if log + end + rescue StandardError => e + error_retries += 1 + raise(e, "Waiting for server to log #{match_obj.inspect}") if error_retries == LOG_ERROR_QTY + sleep LOG_ERROR_SLEEP + retry + end + if Process.clock_gettime(Process::CLOCK_MONOTONIC) > time_timeout + raise Minitest::Assertion, "Timeout waiting for server to log #{match_obj.inspect}" + end + line + end + + def connect(path = nil, unix: false) + s = unix ? UNIXSocket.new(@bind_path) : TCPSocket.new(HOST, @tcp_port) + @ios_to_close << s + s << "GET /#{path} HTTP/1.1\r\n\r\n" + s + end + + # use only if all socket writes are fast + # does not wait for a read + def fast_connect(path = nil, unix: false) + s = unix ? UNIXSocket.new(@bind_path) : TCPSocket.new(HOST, @tcp_port) + @ios_to_close << s + fast_write s, "GET /#{path} HTTP/1.1\r\n\r\n" + s + end + + def fast_write(io, str) + n = 0 + while true + begin + n = io.syswrite str + rescue Errno::EAGAIN, Errno::EWOULDBLOCK => e + unless io.wait_writable 5 + raise e + end + + retry + rescue Errno::EPIPE, SystemCallError, IOError => e + raise e + end + + return if n == str.bytesize + str = str.byteslice(n..-1) + end + end + + def read_body(connection, timeout = nil) + read_response(connection, timeout).last + end + + def read_response(connection, timeout = nil) + timeout ||= RESP_READ_TIMEOUT + content_length = nil + chunked = nil + response = +'' + t_st = Process.clock_gettime Process::CLOCK_MONOTONIC + if connection.to_io.wait_readable timeout + loop do + begin + part = connection.read_nonblock(RESP_READ_LEN, exception: false) + case part + when String + unless content_length || chunked + chunked ||= part.include? "\r\nTransfer-Encoding: chunked\r\n" + content_length = (t = part[/^Content-Length: (\d+)/i , 1]) ? t.to_i : nil + end + + response << part + hdrs, body = response.split RESP_SPLIT, 2 + unless body.nil? + # below could be simplified, but allows for debugging... + ret = + if content_length + body.bytesize == content_length + elsif chunked + body.end_with? "\r\n0\r\n\r\n" + elsif !hdrs.empty? && !body.empty? + true + else + false + end + if ret + return [hdrs, body] + end + end + sleep 0.000_1 + when :wait_readable, :wait_writable # :wait_writable for ssl + sleep 0.000_2 + when nil + raise EOFError + end + if timeout < Process.clock_gettime(Process::CLOCK_MONOTONIC) - t_st + raise Timeout::Error, 'Client Read Timeout' + end + end + end + else + raise Timeout::Error, 'Client Read Timeout' + end + end + + # gets worker pids from @server output + def get_worker_pids(phase = 0, size = workers, log: false) + pids = [] + re = /PID: (\d+)\) booted in [.0-9]+s, phase: #{phase}/ + while pids.size < size + if pid = wait_for_server_to_match(re, 1, log: log) + pids << pid + end + end + pids.map(&:to_i) + end + + # used to define correct 'refused' errors + def thread_run_refused(unix: false) + if unix + DARWIN ? [IOError, Errno::ENOENT, Errno::EPIPE, Errno::EBADF] : + [IOError, Errno::ENOENT] + else + # Errno::ECONNABORTED is thrown intermittently on TCPSocket.new + # Errno::ECONNABORTED is thrown by Windows on read or write + DARWIN ? [IOError, Errno::ECONNREFUSED, Errno::EPIPE, Errno::EBADF, EOFError, Errno::ECONNABORTED] : + [IOError, Errno::ECONNREFUSED, Errno::EPIPE, Errno::ECONNABORTED] + end + end + + def set_pumactl_args(unix: false) + if unix + @control_path = tmp_path('.cntl_sock') + "--control-url unix://#{@control_path} --control-token #{TOKEN}" + else + @control_tcp_port = UniquePort.call + "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN}" + end + end + + def cli_pumactl(argv, unix: false, no_bind: nil) + arg = + if no_bind + argv.split(/ +/) + elsif unix + %W[-C unix://#{@control_path} -T #{TOKEN} #{argv}] + else + %W[-C tcp://#{HOST}:#{@control_tcp_port} -T #{TOKEN} #{argv}] + end + + r, w = IO.pipe + @ios_to_close << r + Puma::ControlCLI.new(arg, w, w).run + w.close + r + end + + def cli_pumactl_spawn(argv, unix: false, no_bind: nil) + arg = + if no_bind + argv + elsif unix + %Q[-C unix://#{@control_path} -T #{TOKEN} #{argv}] + else + %Q[-C tcp://#{HOST}:#{@control_tcp_port} -T #{TOKEN} #{argv}] + end + + pumactl_path = File.expand_path '../../../bin/pumactl', __FILE__ + + cmd = "#{BASE} #{pumactl_path} #{arg}" + + io = IO.popen(cmd, :err=>[:child, :out]) + @ios_to_close << io + io + end + + def get_stats + read_pipe = cli_pumactl "stats" + JSON.parse(read_pipe.readlines.last) + end + + def hot_restart_does_not_drop_connections(num_threads: 1, total_requests: 500) + skipped = true + skip_if :jruby, suffix: <<-MSG + - file descriptors are not preserved on exec on JRuby; connection reset errors are expected during restarts + MSG + skip_if :truffleruby, suffix: ' - Undiagnosed failures on TruffleRuby' + + args = "-w #{workers} -t 5:5 -q test/rackup/hello_with_delay.ru" + if Puma.windows? + @control_tcp_port = UniquePort.call + cli_server "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN} #{args}" + else + cli_server args + end + + skipped = false + replies = Hash.new 0 + refused = thread_run_refused unix: false + message = 'A' * 16_256 # 2^14 - 128 + + mutex = Mutex.new + restart_count = 0 + client_threads = [] + + num_requests = (total_requests/num_threads).to_i + + num_threads.times do |thread| + client_threads << Thread.new do + num_requests.times do |req_num| + begin + begin + socket = TCPSocket.new HOST, @tcp_port + fast_write socket, "POST / HTTP/1.1\r\nContent-Length: #{message.bytesize}\r\n\r\n#{message}" + rescue => e + replies[:write_error] += 1 + raise e + end + body = read_body(socket, 10) + if body == "Hello World" + mutex.synchronize { + replies[:success] += 1 + replies[:restart] += 1 if restart_count > 0 + } + else + mutex.synchronize { replies[:unexpected_response] += 1 } + end + rescue Errno::ECONNRESET, Errno::EBADF, Errno::ENOTCONN + # connection was accepted but then closed + # client would see an empty response + # Errno::EBADF Windows may not be able to make a connection + mutex.synchronize { replies[:reset] += 1 } + rescue *refused, IOError + # IOError intermittently thrown by Ubuntu, add to allow retry + mutex.synchronize { replies[:refused] += 1 } + rescue ::Timeout::Error + mutex.synchronize { replies[:read_timeout] += 1 } + ensure + if socket.is_a?(IO) && !socket.closed? + begin + socket.close + rescue Errno::EBADF + end + end + end + end + # STDOUT.puts "#{thread} #{replies[:success]}" + end + end + + run = true + + restart_thread = Thread.new do + sleep 0.2 # let some connections in before 1st restart + while run + if Puma.windows? + cli_pumactl 'restart' + else + Process.kill :USR2, @pid + end + sleep 0.5 + # If 'wait_for_server_to_boot' times out, error in thread shuts down CI + begin + wait_for_server_to_boot timeout: 5 + rescue Minitest::Assertion # Timeout + run = false + end + restart_count += 1 + sleep(Puma.windows? ? 2.0 : 0.5) + end + end + + client_threads.each(&:join) + run = false + restart_thread.join + if Puma.windows? + cli_pumactl 'stop' + wait_server + else + stop_server + end + @server = nil + + msg = (" %4d unexpected_response\n" % replies.fetch(:unexpected_response,0)).dup + msg << " %4d refused\n" % replies.fetch(:refused,0) + msg << " %4d read timeout\n" % replies.fetch(:read_timeout,0) + msg << " %4d reset\n" % replies.fetch(:reset,0) + msg << " %4d success\n" % replies.fetch(:success,0) + msg << " %4d success after restart\n" % replies.fetch(:restart,0) + msg << " %4d restart count\n" % restart_count + + refused = replies[:refused] + reset = replies[:reset] + + if Puma.windows? + # 5 is default thread count in Puma? + reset_max = num_threads * restart_count + assert_operator reset_max, :>=, reset, "#{msg}Expected reset_max >= reset errors" + assert_operator 40, :>=, refused, "#{msg}Too many refused connections" + else + assert_equal 0, reset, "#{msg}Expected no reset errors" + max_refused = (0.001 * replies.fetch(:success,0)).round + assert_operator max_refused, :>=, refused, "#{msg}Expected no than #{max_refused} refused connections" + end + assert_equal 0, replies[:unexpected_response], "#{msg}Unexpected response" + assert_equal 0, replies[:read_timeout], "#{msg}Expected no read timeouts" + + if Puma.windows? + assert_equal (num_threads * num_requests) - reset - refused, replies[:success] + else + assert_equal (num_threads * num_requests), replies[:success] + end + + ensure + return if skipped + if passed? + msg = " #{restart_count} restarts, #{reset} resets, #{refused} refused, #{replies[:restart]} success after restart, #{replies[:write_error]} write error" + $debugging_info << "#{full_name}\n#{msg}\n" + else + client_threads.each { |thr| thr.kill if thr.is_a? Thread } + $debugging_info << "#{full_name}\n#{msg}\n" + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/ssl.rb b/vendor/cache/puma-fba741b91780/test/helpers/ssl.rb new file mode 100644 index 000000000..b3ad4e948 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/ssl.rb @@ -0,0 +1,27 @@ +module SSLHelper + def ssl_query + @ssl_query ||= if Puma.jruby? + @keystore = File.expand_path "../../examples/puma/keystore.jks", __dir__ + @ssl_cipher_list = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + "keystore=#{@keystore}&keystore-pass=jruby_puma&ssl_cipher_list=#{@ssl_cipher_list}" + else + @cert = File.expand_path "../../examples/puma/cert_puma.pem", __dir__ + @key = File.expand_path "../../examples/puma/puma_keypair.pem", __dir__ + "key=#{@key}&cert=#{@cert}" + end + end + + # sets and returns an opts hash for use with Puma::DSL.ssl_bind_str + def ssl_opts + @ssl_opts ||= if Puma.jruby? + @ssl_opts = {} + @ssl_opts[:keystore] = File.expand_path '../../examples/puma/keystore.jks', __dir__ + @ssl_opts[:keystore_pass] = 'jruby_puma' + @ssl_opts[:ssl_cipher_list] = 'TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256' + else + @ssl_opts = {} + @ssl_opts[:cert] = File.expand_path '../../examples/puma/cert_puma.pem', __dir__ + @ssl_opts[:key] = File.expand_path '../../examples/puma/puma_keypair.pem', __dir__ + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/test_puma.rb b/vendor/cache/puma-fba741b91780/test/helpers/test_puma.rb new file mode 100644 index 000000000..24ca2e24a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/test_puma.rb @@ -0,0 +1,64 @@ +# frozen_string_literal: true + +require 'socket' + +module TestPuma + + RESP_SPLIT = "\r\n\r\n" + LINE_SPLIT = "\r\n" + + RE_HOST_TO_IP = /\A\[|\]\z/o + + HOST4 = begin + t = Socket.ip_address_list.select(&:ipv4_loopback?).map(&:ip_address) + .uniq.sort_by(&:length) + # puts "IPv4 Loopback #{t}" + str = t.include?('127.0.0.1') ? +'127.0.0.1' : +"#{t.first}" + str.define_singleton_method(:ip) { self } + str.freeze + end + + HOST6 = begin + t = Socket.ip_address_list.select(&:ipv6_loopback?).map(&:ip_address) + .uniq.sort_by(&:length) + # puts "IPv6 Loopback #{t}" + str = t.include?('::1') ? +'[::1]' : +"[#{t.first}]" + str.define_singleton_method(:ip) { self.gsub RE_HOST_TO_IP, '' } + str.freeze + end + + LOCALHOST = ENV.fetch 'PUMA_CI_DFLT_HOST', 'localhost' + + if ENV['PUMA_CI_DFLT_IP'] =='IPv6' + HOST = HOST6 + ALT_HOST = HOST4 + else + HOST = HOST4 + ALT_HOST = HOST6 + end + + DARWIN = RUBY_PLATFORM.include? 'darwin' + + TOKEN = "xxyyzz" + + # Returns an available port by using `TCPServer.open(host, 0)` + def new_port(host = HOST) + TCPServer.open(host, 0) { |server| server.connect_address.ip_port } + end + + def bind_uri_str + if @bind_port + "tcp://#{HOST}:#{@bind_port}" + elsif @bind_path + "unix://#{HOST}:#{@bind_path}" + end + end + + def control_uri_str + if @control_port + "tcp://#{HOST}:#{@control_port}" + elsif @control_path + "unix://#{HOST}:#{@control_path}" + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/test_puma/assertions.rb b/vendor/cache/puma-fba741b91780/test/helpers/test_puma/assertions.rb new file mode 100644 index 000000000..72532d43f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/test_puma/assertions.rb @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +module TestPuma + module Assertions + def assert_start_with(obj, str, msg = nil) + msg = message(msg) { + "Expected\n#{obj}\nto start with #{str}" + } + assert_respond_to obj, :start_with? + assert obj.start_with?(str), msg + end + + def assert_end_with(obj, str, msg = nil) + msg = message(msg) { + "Expected\n#{obj}\nto end with #{str}" + } + assert_respond_to obj, :end_with? + assert obj.end_with?(str), msg + end + + # if obj is longer than 80 characters, show as string, not inspected + def assert_match(matcher, obj, msg = nil) + msg = if obj.length < 80 + message(msg) { "Expected #{mu_pp matcher} to match #{mu_pp obj}" } + else + message(msg) { "Expected #{mu_pp matcher} to match:\n#{obj}\n" } + end + assert_respond_to matcher, :"=~" + matcher = Regexp.new Regexp.escape matcher if String === matcher + assert matcher =~ obj, msg + end + end +end + +module Minitest + module Assertions + prepend TestPuma::Assertions + end +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/test_puma/puma_socket.rb b/vendor/cache/puma-fba741b91780/test/helpers/test_puma/puma_socket.rb new file mode 100644 index 000000000..00c5009b0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/test_puma/puma_socket.rb @@ -0,0 +1,410 @@ +# frozen_string_literal: true + +require 'socket' +require_relative '../test_puma' +require_relative 'response' + +module TestPuma + + # @!macro [new] req + # @param req [String, GET_11] request path + + # @!macro [new] skt + # @param host: [String] tcp/ssl host + # @param port: [Integer/String] tcp/ssl port + # @param path: [String] unix socket, full path + # @param ctx: [OpenSSL::SSL::SSLContext] ssl context + # @param session: [OpenSSL::SSL::Session] ssl session + + # @!macro [new] resp + # @param timeout: [Float, nil] total socket read timeout, defaults to `RESP_READ_TIMEOUT` + # @param len: [ Integer, nil] the `read_nonblock` maxlen, defaults to `RESP_READ_LEN` + + # This module is included in CI test files, and provides methods to create + # client sockets. Normally, the socket parameters are defined by the code + # creating the Puma server (in-process or spawned), so they do not need to be + # specified. Regardless, many of the less frequently used parameters still + # have keyword arguments and they can be set to whatever is required. + # + # This module closes all sockets and performs all reads non-blocking and all + # writes using syswrite. These are helpful for reliable tests. Please do not + # use native Ruby sockets except if absolutely necessary. + # + # #### Methods that return a socket or sockets: + # * `new_socket` - Opens a socket + # * `send_http` - Opens a socket and sends a request, which defaults to `GET_11` + # * `send_http_array` - Creates an array of sockets. It opens each and sends a request on each + # + # All methods that create a socket have the following optional keyword parameters: + # * `host:` - tcp/ssl host (`String`) + # * `port:` - tcp/ssl port (`Integer`, `String`) + # * `path:` - unix socket, full path (`String`) + # * `ctx:` - ssl context (`OpenSSL::SSL::SSLContext`) + # * `session:` - ssl session (`OpenSSL::SSL::Session`) + # + # #### Methods that process the response: + # * `send_http_read_response` - sends a request and returns the whole response + # * `send_http_read_resp_body` - sends a request and returns the response body + # * `send_http_read_resp_headers` - sends a request and returns the response with the body removed as an array of lines + # + # All methods that process the response have the following optional keyword parameters: + # * `timeout:` - total socket read timeout, defaults to `RESP_READ_TIMEOUT` (`Float`) + # * `len:` - the `read_nonblock` maxlen, defaults to `RESP_READ_LEN` (`Integer`) + # + # #### Methods added to socket instances: + # * `read_response` - reads the response and returns it, uses `READ_RESPONSE` + # * `read_body` - reads the response and returns the body, uses `READ_BODY` + # * `<<` - overrides the standard method, writes to the socket with `syswrite`, returns the socket + # + module PumaSocket + GET_10 = "GET / HTTP/1.0\r\n\r\n" + GET_11 = "GET / HTTP/1.1\r\n\r\n" + + HELLO_11 = "HTTP/1.1 200 OK\r\ncontent-type: text/plain\r\n" \ + "Content-Length: 11\r\n\r\nHello World" + + RESP_READ_LEN = 65_536 + RESP_READ_TIMEOUT = 10 + NO_ENTITY_BODY = Puma::STATUS_WITH_NO_ENTITY_BODY + EMPTY_200 = [200, {}, ['']] + + UTF8 = ::Encoding::UTF_8 + + SET_TCP_NODELAY = Socket.const_defined?(:IPPROTO_TCP) && ::Socket.const_defined?(:TCP_NODELAY) + + def before_setup + @ios_to_close ||= [] + @bind_port = nil + @bind_path = nil + @control_port = nil + @control_path = nil + super + end + + # Closes all io's in `@ios_to_close`, also deletes them if they are files + def after_teardown + return if skipped? + super + # Errno::EBADF raised on macOS + @ios_to_close.each do |io| + begin + if io.respond_to? :sysclose + io.sync_close = true + io.sysclose unless io.closed? + else + io.close if io.respond_to?(:close) && !io.closed? + if io.is_a?(File) && (path = io&.path) && File.exist?(path) + File.unlink path + end + end + rescue Errno::EBADF, Errno::ENOENT, IOError + ensure + io = nil + end + end + # not sure about below, may help with gc... + @ios_to_close.clear + @ios_to_close = nil + end + + # rubocop: disable Metrics/ParameterLists + + # Sends a request and returns the response header lines as an array of strings. + # Includes the status line. + # @!macro req + # @!macro skt + # @!macro resp + # @return [Array] array of header lines in the response + def send_http_read_resp_headers(req = GET_11, host: nil, port: nil, path: nil, ctx: nil, + session: nil, len: nil, timeout: nil) + skt = send_http req, host: host, port: port, path: path, ctx: ctx, session: session + resp = skt.read_response timeout: timeout, len: len + resp.split(RESP_SPLIT, 2).first.split "\r\n" + end + + # Sends a request and returns the HTTP response body. + # @!macro req + # @!macro skt + # @!macro resp + # @return [Response] the body portion of the HTTP response + def send_http_read_resp_body(req = GET_11, host: nil, port: nil, path: nil, ctx: nil, + session: nil, len: nil, timeout: nil) + skt = send_http req, host: host, port: port, path: path, ctx: ctx, session: session + skt.read_body timeout: timeout, len: len + end + + # Sends a request and returns whatever can be read. Use when multiple + # responses are sent by the server + # @!macro req + # @!macro skt + # @return [String] socket read string + def send_http_read_all(req = GET_11, host: nil, port: nil, path: nil, ctx: nil, + session: nil, len: nil, timeout: nil) + skt = send_http req, host: host, port: port, path: path, ctx: ctx, session: session + read = String.new # rubocop: disable Performance/UnfreezeString + counter = 0 + prev_size = 0 + loop do + raise(Timeout::Error, 'Client Read Timeout') if counter > 5 + if skt.wait_readable 1 + read << skt.sysread(RESP_READ_LEN) + end + ttl_read = read.bytesize + return read if prev_size == ttl_read && !ttl_read.zero? + prev_size = ttl_read + counter += 1 + end + rescue EOFError + return read + rescue => e + raise e + end + + # Sends a request and returns the HTTP response. Assumes one response is sent + # @!macro req + # @!macro skt + # @!macro resp + # @return [Response] the HTTP response + def send_http_read_response(req = GET_11, host: nil, port: nil, path: nil, ctx: nil, + session: nil, len: nil, timeout: nil) + skt = send_http req, host: host, port: port, path: path, ctx: ctx, session: session + skt.read_response timeout: timeout, len: len + end + + # Sends a request and returns the socket + # @param req [String, nil] The request stirng. + # @!macro req + # @!macro skt + # @return [OpenSSL::SSL::SSLSocket, TCPSocket, UNIXSocket] the created socket + def send_http(req = GET_11, host: nil, port: nil, path: nil, ctx: nil, session: nil) + skt = new_socket host: host, port: port, path: path, ctx: ctx, session: session + skt.syswrite req + skt + end + + # Determines whether the socket has been closed by the server. Only works when + # `Socket::TCP_INFO is defined`, linux/Ubuntu + # @param socket [OpenSSL::SSL::SSLSocket, TCPSocket, UNIXSocket] + # @return [Boolean] true if closed by server, false is indeterminate, as + # it may not be writable + # + def skt_closed_by_server(socket) + skt = socket.to_io + return false unless skt.kind_of?(TCPSocket) + + begin + tcp_info = skt.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_INFO) + rescue IOError, SystemCallError + false + else + state = tcp_info.unpack('C')[0] + # TIME_WAIT: 6, CLOSE: 7, CLOSE_WAIT: 8, LAST_ACK: 9, CLOSING: 11 + (state >= 6 && state <= 9) || state == 11 + end + end + + READ_BODY = -> (timeout: nil, len: nil) { + self.read_response(timeout: nil, len: nil) + .split(RESP_SPLIT, 2).last + } + + READ_RESPONSE = -> (timeout: nil, len: nil) do + content_length = nil + chunked = nil + status = nil + no_body = nil + response = Response.new + read_len = len || RESP_READ_LEN + + timeout ||= RESP_READ_TIMEOUT + time_start = Process.clock_gettime(Process::CLOCK_MONOTONIC) + time_end = time_start + timeout + times = [] + time_read = nil + + loop do + begin + self.to_io.wait_readable timeout + time_read ||= Process.clock_gettime(Process::CLOCK_MONOTONIC) + part = self.read_nonblock(read_len, exception: false) + case part + when String + times << (Process.clock_gettime(Process::CLOCK_MONOTONIC) - time_read).round(4) + status ||= part[/\AHTTP\/1\.[01] (\d{3})/, 1] + if status + no_body ||= NO_ENTITY_BODY.key? status.to_i || status.to_i < 200 + end + if no_body && part.end_with?(RESP_SPLIT) + response.times = times + return response << part + end + + unless content_length || chunked + chunked ||= part.downcase.include? "\r\ntransfer-encoding: chunked\r\n" + content_length = (t = part[/^Content-Length: (\d+)/i , 1]) ? t.to_i : nil + end + response << part + hdrs, body = response.split RESP_SPLIT, 2 + unless body.nil? + # below could be simplified, but allows for debugging... + finished = + if content_length + body.bytesize == content_length + elsif chunked + body.end_with? "0\r\n\r\n" + elsif !hdrs.empty? && !body.empty? + true + else + false + end + response.times = times + return response if finished + end + sleep 0.000_1 + when :wait_readable + # continue loop + when :wait_writable # :wait_writable for ssl + to = time_end - Process.clock_gettime(Process::CLOCK_MONOTONIC) + self.to_io.wait_writable to + when nil + if response.empty? + raise EOFError + else + response.times = times + return response + end + end + timeout = time_end - Process.clock_gettime(Process::CLOCK_MONOTONIC) + if timeout <= 0 + raise Timeout::Error, 'Client Read Timeout' + end + end + end + end + + # @todo verify whole string is written + REQ_WRITE = -> (str) { self.syswrite str; self } + + # Helper for creating an `OpenSSL::SSL::SSLContext`. + # @param &blk [Block] Passed the SSLContext. + # @yield [OpenSSL::SSL::SSLContext] + # @return [OpenSSL::SSL::SSLContext] The new socket + def new_ctx(&blk) + ctx = OpenSSL::SSL::SSLContext.new + if blk + yield ctx + else + ctx.verify_mode = OpenSSL::SSL::VERIFY_NONE + end + ctx + end + + # Creates a new client socket. TCP, SSL, and UNIX are supported + # @!macro req + # @return [OpenSSL::SSL::SSLSocket, TCPSocket, UNIXSocket] the created socket + # + def new_socket(host: nil, port: nil, path: nil, ctx: nil, session: nil) + port ||= @bind_port + path ||= @bind_path + ip ||= (host || HOST.ip).gsub RE_HOST_TO_IP, '' # in case a URI style IPv6 is passed + + skt = + if path && !port && !ctx + UNIXSocket.new path.sub(/\A@/, "\0") # sub is for abstract + elsif port # && !path + tcp = TCPSocket.new ip, port.to_i + tcp.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if SET_TCP_NODELAY + if ctx + ::OpenSSL::SSL::SSLSocket.new tcp, ctx + else + tcp + end + else + raise 'port or path must be set!' + end + + skt.define_singleton_method :read_response, READ_RESPONSE + skt.define_singleton_method :read_body, READ_BODY + skt.define_singleton_method :<<, REQ_WRITE + skt.define_singleton_method :req_write, REQ_WRITE # used for chaining + @ios_to_close << skt + if ctx + @ios_to_close << tcp + skt.session = session if session + skt.sync_close = true + skt.connect + end + skt + end + + # Creates an array of sockets, sending a request on each + # @param req [String] the request + # @param len [Integer] the number of requests to send + # @return [Array] + # + def send_http_array(req = GET_11, len, dly: 0.000_1, max_retries: 5) + Array.new(len) { + retries = 0 + begin + skt = send_http req + sleep dly + skt + rescue Errno::ECONNREFUSED + retries += 1 + if retries < max_retries + retry + else + flunk 'Generate requests failed from Errno::ECONNREFUSED' + end + end + } + end + + # Reads an array of sockets that have already had requests sent. + # @param skts [Array] an array matching the order of the parameter + # `skts`, contains the response or the error class generated by the socket. + # + def read_response_array(skts, resp_count: nil, body_only: nil) + results = Array.new skts.length + Thread.new do + until skts.compact.empty? + skts.each_with_index do |skt, idx| + next if skt.nil? + begin + next unless skt.wait_readable 0.000_5 + if resp_count + resp = skt.read_response.dup + cntr = 0 + until resp.split(RESP_SPLIT).length == resp_count + 1 || cntr > 20 + cntr += 1 + Thread.pass + if skt.wait_readable 0.001 + begin + resp << skt.read_response + rescue EOFError + break + end + end + end + results[idx] = resp + else + results[idx] = body_only ? skt.read_body : skt.read_response + end + rescue StandardError => e + results[idx] = e.class + end + begin + skt.close unless skt.closed? # skt.close may return Errno::EBADF + rescue StandardError => e + results[idx] ||= e.class + end + skts[idx] = nil + end + end + end.join 15 + results + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/test_puma/response.rb b/vendor/cache/puma-fba741b91780/test/helpers/test_puma/response.rb new file mode 100644 index 000000000..0907c4eb4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/test_puma/response.rb @@ -0,0 +1,56 @@ +module TestPuma + + # A subclass of String, allows processing the response returned by + # `PumaSocket#send_http_read_response` and the `read_response` method added + # to native socket instances (created with `PumaSocket#new_socket` and + # `PumaSocket#send_http`. + # + class Response < String + + attr_accessor :times + + # Returns response headers as an array of lines + # @return [Array] + def headers + @headers ||= begin + ary = self.split(RESP_SPLIT, 2).first.split LINE_SPLIT + @status = ary.shift + ary + end + end + + # Returns response headers as a hash. All keys and values are strings. + # @return [Hash] + def headers_hash + @headers_hash ||= headers.map { |hdr| hdr.split ': ', 2 }.to_h + end + + def status + headers + @status + end + + def body + self.split(RESP_SPLIT, 2).last + end + + # Decodes a chunked body + # @return [String] the decoded body + def decode_body + decoded = String.new # rubocop: disable Performance/UnfreezeString + + body = self.split(RESP_SPLIT, 2).last + body = body.byteslice 0, body.bytesize - 5 # remove terminating bytes + + loop do + size, body = body.split LINE_SPLIT, 2 + size = size.to_i 16 + + decoded << body.byteslice(0, size) + body = body.byteslice (size+2)..-1 # remove segment ending "\r\n" + break if body.empty? || body.nil? + end + decoded + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/helpers/tmp_path.rb b/vendor/cache/puma-fba741b91780/test/helpers/tmp_path.rb new file mode 100644 index 000000000..0e2f73a07 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/helpers/tmp_path.rb @@ -0,0 +1,43 @@ +# frozen_string_literal: true + +module TmpPath + def clean_tmp_paths + while path = tmp_paths.pop + delete_tmp_path(path) + end + end + + private + + # With some macOS configurations, the following error may be raised when + # creating a UNIXSocket: + # + # too long unix socket path (106 bytes given but 104 bytes max) (ArgumentError) + # + PUMA_TMPDIR = + begin + if RUBY_DESCRIPTION.include? 'darwin' + # adds subdirectory 'tmp' in repository folder + dir_temp = File.absolute_path("#{__dir__}/../../tmp") + Dir.mkdir dir_temp unless Dir.exist? dir_temp + './tmp' + else + nil + end + end + + def tmp_path(extension=nil) + path = Tempfile.create(['', extension], PUMA_TMPDIR) { |f| f.path } + tmp_paths << path + path + end + + def tmp_paths + @tmp_paths ||= [] + end + + def delete_tmp_path(path) + File.unlink(path) + rescue Errno::ENOENT + end +end diff --git a/vendor/cache/puma-fba741b91780/test/minitest/verbose.rb b/vendor/cache/puma-fba741b91780/test/minitest/verbose.rb new file mode 100644 index 000000000..0f4747fac --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/minitest/verbose.rb @@ -0,0 +1,5 @@ +require "minitest" +require_relative "verbose_progress_plugin" + +Minitest.load_plugins +Minitest.extensions << 'verbose_progress' unless Minitest.extensions.include?('verbose_progress') diff --git a/vendor/cache/puma-fba741b91780/test/minitest/verbose_progress_plugin.rb b/vendor/cache/puma-fba741b91780/test/minitest/verbose_progress_plugin.rb new file mode 100644 index 000000000..372511e75 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/minitest/verbose_progress_plugin.rb @@ -0,0 +1,34 @@ +module Minitest + # Adds minimal support for parallel tests to the default verbose progress reporter. + def self.plugin_verbose_progress_init(options) + if options[:verbose] + self.reporter.reporters. + delete_if {|r| r.is_a?(ProgressReporter)}. + push(VerboseProgressReporter.new(options[:io], options)) + end + end + + # Verbose progress reporter that supports parallel test execution. + class VerboseProgressReporter < Reporter + def prerecord(klass, name) + @current ||= nil + @current = [klass.name, name].tap { |t| print_start t } + end + + def record(result) + print_start [result.klass, result.name] + @current = nil + io.print "%.2f s = " % [result.time] + io.print result.result_code + io.puts + end + + def print_start(test) + unless @current == test + io.puts '…' if @current + io.print "%s#%s = " % test + io.flush + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/rackup/big_file.ru b/vendor/cache/puma-fba741b91780/test/rackup/big_file.ru new file mode 100644 index 000000000..88abcc472 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/big_file.ru @@ -0,0 +1,7 @@ +static_file_path = File.join(Dir.tmpdir, "puma-static.txt") +File.write(static_file_path, "Hello World" * 100_000) + +run lambda { |env| + f = File.open(static_file_path) + [200, {"Content-Type" => "text/plain", "Content-Length" => f.size.to_s}, f] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/big_response.ru b/vendor/cache/puma-fba741b91780/test/rackup/big_response.ru new file mode 100644 index 000000000..8d5dee625 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/big_response.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello World" * 100_000]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/ci_array.ru b/vendor/cache/puma-fba741b91780/test/rackup/ci_array.ru new file mode 100644 index 000000000..36e93dede --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/ci_array.ru @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +# Generates a response with array bodies, size set via ENV['CI_BODY_CONF'] or +# `Body-Conf` request header. +# See 'CI - test/rackup/ci-*.ru files' or docs/test_rackup_ci_files.md + +require 'securerandom' + +headers = {} +headers['Content-Type'] = 'text/plain; charset=utf-8'.freeze +25.times { |i| headers["X-My-Header-#{i}"] = SecureRandom.hex(25) } + +hdr_dly = 'HTTP_DLY' +hdr_body_conf = 'HTTP_BODY_CONF' + +# length = 1018 bytesize = 1024 +str_1kb = "──#{SecureRandom.hex 507}─\n".freeze + +env_len = (t = ENV['CI_BODY_CONF']) ? t[/\d+\z/].to_i : 10 + +cache_array = {} + +run lambda { |env| + info = if (dly = env[hdr_dly]) + hash_key = +"#{dly}," + sleep dly.to_f + "#{Process.pid}\nHello World\nSlept #{dly}\n" + else + hash_key = +"," + "#{Process.pid}\nHello World\n" + end + info_len_adj = 1023 - info.bytesize + + len = (t = env[hdr_body_conf]) ? t[/\d+\z/].to_i : env_len + + hash_key << len.to_s + + headers[hdr_content_length] = (1_024 * len).to_s + body = cache_array[hash_key] ||= begin + temp = Array.new len, str_1kb + temp[0] = info + str_1kb.byteslice(0, info_len_adj) + "\n" + temp + end + [200, headers, body] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/ci_chunked.ru b/vendor/cache/puma-fba741b91780/test/rackup/ci_chunked.ru new file mode 100644 index 000000000..fb389359c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/ci_chunked.ru @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +# Generates a response with chunked bodies, size set via ENV['CI_BODY_CONF'] or +# `Body-Conf` request header. +# See 'CI - test/rackup/ci-*.ru files' or docs/test_rackup_ci_files.md + +require 'securerandom' + +headers = {} +headers['Content-Type'] = 'text/plain; charset=utf-8'.freeze +25.times { |i| headers["X-My-Header-#{i}"] = SecureRandom.hex(25) } + +hdr_dly = 'HTTP_DLY' +hdr_body_conf = 'HTTP_BODY_CONF' + +# length = 1018 bytesize = 1024 +str_1kb = "──#{SecureRandom.hex 507}─\n".freeze + +env_len = (t = ENV['CI_BODY_CONF']) ? t[/\d+\z/].to_i : 10 + +cache_chunked = {} + +run lambda { |env| + info = if (dly = env[hdr_dly]) + hash_key = +"#{dly}," + sleep dly.to_f + "#{Process.pid}\nHello World\nSlept #{dly}\n" + else + hash_key = +"," + "#{Process.pid}\nHello World\n" + end + info_len_adj = 1023 - info.bytesize + + len = (t = env[hdr_body_conf]) ? t[/\d+\z/].to_i : env_len + + hash_key << len.to_s + + body = cache_chunked[hash_key] ||= begin + temp = Array.new len, str_1kb + temp[0] = info + str_1kb.byteslice(0, info_len_adj) + "\n" + temp.to_enum + end + [200, headers, body] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/ci_io.ru b/vendor/cache/puma-fba741b91780/test/rackup/ci_io.ru new file mode 100644 index 000000000..01ae9011f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/ci_io.ru @@ -0,0 +1,39 @@ +# frozen_string_literal: true + +# Generates a response with File/IO bodies, size set via ENV['CI_BODY_CONF'] or +# `Body-Conf` request header. +# See 'CI - test/rackup/ci-*.ru files' or docs/test_rackup_ci_files.md + +require 'securerandom' +require 'tmpdir' + +headers = {} +headers['Content-Type'] = 'text/plain; charset=utf-8' +25.times { |i| headers["X-My-Header-#{i}"] = SecureRandom.hex(25) } + +hdr_dly = 'HTTP_DLY' +hdr_body_conf = 'HTTP_BODY_CONF' +hdr_content_length = 'Content-Length' + +env_len = (t = ENV['CI_BODY_CONF']) ? t[/\d+\z/].to_i : 10 + +tmp_folder = "#{Dir.tmpdir}/.puma_response_body_io" + +unless Dir.exist? tmp_folder + STDOUT.syswrite "\nNeeded files do not exist. Run `TestPuma.create_io_files" \ + " contained in benchmarks/local/bench_base.rb\n" + exit 1 +end + +fn_format = "#{tmp_folder}/body_io_%04d.txt" + +run lambda { |env| + if (dly = env[hdr_dly]) + sleep dly.to_f + end + len = (t = env[hdr_body_conf]) ? t[/\d+\z/].to_i : env_len + headers[hdr_content_length] = (1024*len).to_s + fn = format fn_format, len + body = File.open fn + [200, headers, body] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/ci_select.ru b/vendor/cache/puma-fba741b91780/test/rackup/ci_select.ru new file mode 100644 index 000000000..3f847f5e5 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/ci_select.ru @@ -0,0 +1,82 @@ +# frozen_string_literal: true + +# Generates a response with various body types and sizes, set via ENV['CI_BODY_CONF'] or +# `Body-Conf` request header. +# See 'CI - test/rackup/ci-*.ru files' or docs/test_rackup_ci_files.md + +require 'securerandom' +require 'tmpdir' + +headers = {} +headers['Content-Type'] = 'text/plain; charset=utf-8'.freeze +25.times { |i| headers["X-My-Header-#{i}"] = SecureRandom.hex(25) } + +hdr_dly = 'HTTP_DLY' +hdr_body_conf = 'HTTP_BODY_CONF' +hdr_content_length = 'Content-Length' + +# length = 1018 bytesize = 1024 +str_1kb = "──#{SecureRandom.hex 507}─\n".freeze + +fn_format = "#{Dir.tmpdir}/.puma_response_body_io/body_io_%04d.txt".freeze + +body_types = %w[a c i s].freeze + +cache_array = {} +cache_chunked = {} +cache_string = {} + +run lambda { |env| + info = if (dly = env[hdr_dly]) + hash_key = +"#{dly}," + sleep dly.to_f + +"#{Process.pid}\nHello World\nSlept #{dly}\n" + else + hash_key = +"," + +"#{Process.pid}\nHello World\n" + end + info_len_adj = 1023 - info.bytesize + + body_conf = env[hdr_body_conf] + + if body_conf && body_conf.start_with?(*body_types) + type = body_conf.slice!(0).to_sym + len = body_conf.to_i + elsif body_conf + type = :s + len = body_conf[/\d+\z/].to_i + else # default + type = :s + len = 1 + end + + hash_key << len.to_s + + case type + when :a # body is an array + headers[hdr_content_length] = (1_024 * len).to_s + body = cache_array[hash_key] ||= begin + temp = Array.new len, str_1kb + temp[0] = info + str_1kb.byteslice(0, info_len_adj) + "\n" + temp + end + when :c # body is chunked + headers.delete hdr_content_length + body = cache_chunked[hash_key] ||= begin + temp = Array.new len, str_1kb + temp[0] = info + str_1kb.byteslice(0, info_len_adj) + "\n" + temp.to_enum + end + when :i # body is an io + headers[hdr_content_length] = (1_024 * len).to_s + fn = format fn_format, len + body = File.open fn, 'rb' + when :s # body is a single string in an array + headers[hdr_content_length] = (1_024 * len).to_s + body = cache_string[hash_key] ||= begin + info << str_1kb.byteslice(0, info_len_adj) << "\n" << (str_1kb * (len-1)) + [info] + end + end + [200, headers, body] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/ci_string.ru b/vendor/cache/puma-fba741b91780/test/rackup/ci_string.ru new file mode 100644 index 000000000..2195830d2 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/ci_string.ru @@ -0,0 +1,47 @@ +# frozen_string_literal: true + +# Generates a response with single string bodies, size set via ENV['CI_BODY_CONF'] or +# `Body-Conf` request header. +# See 'CI - test/rackup/ci-*.ru files' or docs/test_rackup_ci_files.md + +require 'securerandom' + +env_len = (t = ENV['CI_BODY_CONF']) ? t[/\d+\z/].to_i : 10 + +headers = {} +headers['Content-Type'] = 'text/plain; charset=utf-8'.freeze +25.times { |i| headers["X-My-Header-#{i}"] = SecureRandom.hex(25) } + +hdr_dly = 'HTTP_DLY' +hdr_body_conf = 'HTTP_BODY_CONF' +hdr_content_length = 'Content-Length' + +# length = 1018 bytesize = 1024 +str_1kb = "──#{SecureRandom.hex 507}─\n".freeze + +env_len = (t = ENV['CI_BODY_CONF']) ? t[/\d+\z/].to_i : 10 + +cache_string = {} + +run lambda { |env| + info = if (dly = env[hdr_dly]) + +hash_key = "#{dly}," + sleep dly.to_f + +"#{Process.pid}\nHello World\nSlept #{dly}\n" + else + +hash_key = "," + +"#{Process.pid}\nHello World\n" + end + info_len_adj = 1023 - info.bytesize + + len = (t = env[hdr_body_conf]) ? t[/\d+\z/].to_i : env_len + + hash_key << len.to_s + + headers[hdr_content_length] = (1_024 * len).to_s + body = cache_string[hash_key] ||= begin + info << str_1kb.byteslice(0, info_len_adj) << "\n" << (str_1kb * (len-1)) + [info] + end + [200, headers, body] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/close_listeners.ru b/vendor/cache/puma-fba741b91780/test/rackup/close_listeners.ru new file mode 100644 index 000000000..d7d37225f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/close_listeners.ru @@ -0,0 +1,6 @@ +require 'objspace' + +run lambda { |env| + ios = ObjectSpace.each_object(::TCPServer).to_a.tap { |a| a.each(&:close) } + [200, [], ["#{ios.inspect}\n"]] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/env-dump.ru b/vendor/cache/puma-fba741b91780/test/rackup/env-dump.ru new file mode 100644 index 000000000..e7f28da76 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/env-dump.ru @@ -0,0 +1,6 @@ +run lambda { |env| + body = +"#{'─' * 70} Headers\n" + env.sort.each { |k,v| body << "#{k.ljust 30} #{v}\n" } + body << "#{'─' * 78}\n" + [200, {"Content-Type" => "text/plain"}, [body]] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/hello-bind.ru b/vendor/cache/puma-fba741b91780/test/rackup/hello-bind.ru new file mode 100644 index 000000000..483eb9d46 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/hello-bind.ru @@ -0,0 +1,2 @@ +#\ -O bind=tcp://127.0.0.1:9292 +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello World"]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/hello-bind_rack3.ru b/vendor/cache/puma-fba741b91780/test/rackup/hello-bind_rack3.ru new file mode 100644 index 000000000..deb8fc822 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/hello-bind_rack3.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello World"]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/hello-env.ru b/vendor/cache/puma-fba741b91780/test/rackup/hello-env.ru new file mode 100644 index 000000000..4b5c8d3c2 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/hello-env.ru @@ -0,0 +1,2 @@ +ENV["RAND"] ||= rand.to_s +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello RAND #{ENV["RAND"]}"]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/hello.ru b/vendor/cache/puma-fba741b91780/test/rackup/hello.ru new file mode 100644 index 000000000..86e78f2c2 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/hello.ru @@ -0,0 +1,3 @@ +hdrs = {'Content-Type'.freeze => 'text/plain'.freeze}.freeze +body = ['Hello World'.freeze].freeze +run lambda { |env| [200, hdrs.dup, body] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/hello_with_delay.ru b/vendor/cache/puma-fba741b91780/test/rackup/hello_with_delay.ru new file mode 100644 index 000000000..2417badf6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/hello_with_delay.ru @@ -0,0 +1,4 @@ +run lambda { |env| + sleep 0.001 + [200, {"Content-Type" => "text/plain"}, ["Hello World"]] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/lobster.ru b/vendor/cache/puma-fba741b91780/test/rackup/lobster.ru new file mode 100644 index 000000000..cc7ffcae8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/lobster.ru @@ -0,0 +1,4 @@ +require 'rack/lobster' + +use Rack::ShowExceptions +run Rack::Lobster.new diff --git a/vendor/cache/puma-fba741b91780/test/rackup/many_long_headers.ru b/vendor/cache/puma-fba741b91780/test/rackup/many_long_headers.ru new file mode 100644 index 000000000..f9c5f53e8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/many_long_headers.ru @@ -0,0 +1,9 @@ +require 'securerandom' + +long_header_hash = {} + +30.times do |i| + long_header_hash["X-My-Header-#{i}"] = SecureRandom.hex(1000) +end + +run lambda { |env| [200, long_header_hash, ["Hello World"]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/realistic_response.ru b/vendor/cache/puma-fba741b91780/test/rackup/realistic_response.ru new file mode 100644 index 000000000..8b6ede20d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/realistic_response.ru @@ -0,0 +1,11 @@ +require 'securerandom' + +long_header_hash = {} + +25.times do |i| + long_header_hash["X-My-Header-#{i}"] = SecureRandom.hex(25) +end + +response = SecureRandom.hex(100_000) # A 100kb document + +run lambda { |env| [200, long_header_hash.dup, [response.dup]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/sleep.ru b/vendor/cache/puma-fba741b91780/test/rackup/sleep.ru new file mode 100644 index 000000000..020090b5b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/sleep.ru @@ -0,0 +1,10 @@ +# call with "GET /sleep HTTP/1.1\r\n\r\n", where is the number of +# seconds to sleep, can be a float or an int +# same as TestApps::SLEEP + +regex_delay = /\A\/sleep(\d+(?:\.\d+)?)/ +run lambda { |env| + delay = (env['REQUEST_PATH'][regex_delay,1] || '0').to_f + sleep delay + [200, {"Content-Type" => "text/plain"}, ["Slept #{delay}"]] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/sleep_pid.ru b/vendor/cache/puma-fba741b91780/test/rackup/sleep_pid.ru new file mode 100644 index 000000000..3e18574b2 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/sleep_pid.ru @@ -0,0 +1,9 @@ +# call with "GET /sleep HTTP/1.1\r\n\r\n", where is the number of +# seconds to sleep, can be a float or an int, returns process pid + +regex_delay = /\A\/sleep(\d+(?:\.\d+)?)/ +run lambda { |env| + delay = (env['REQUEST_PATH'][regex_delay,1] || '0').to_f + sleep delay + [200, {"Content-Type" => "text/plain"}, ["Slept #{delay} #{Process.pid}"]] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/sleep_step.ru b/vendor/cache/puma-fba741b91780/test/rackup/sleep_step.ru new file mode 100644 index 000000000..88eb747c8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/sleep_step.ru @@ -0,0 +1,11 @@ +# call with "GET /sleep- HTTP/1.1\r\n\r\n", where is the number of +# seconds to sleep (can be a float or an int) and is the step + +regex_delay = /\A\/sleep(\d+(?:\.\d+)?)/ +run lambda { |env| + p = env['REQUEST_PATH'] + delay = (p[regex_delay,1] || '0').to_f + step = p[/(\d+)\z/,1].to_i + sleep delay + [200, {"Content-Type" => "text/plain"}, ["Slept #{delay} #{step}"]] +} diff --git a/vendor/cache/puma-fba741b91780/test/rackup/url_scheme.ru b/vendor/cache/puma-fba741b91780/test/rackup/url_scheme.ru new file mode 100644 index 000000000..add5d3309 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/url_scheme.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {"Content-Type" => "text/plain"}, [env["rack.url_scheme"]]] } diff --git a/vendor/cache/puma-fba741b91780/test/rackup/write_to_stdout.ru b/vendor/cache/puma-fba741b91780/test/rackup/write_to_stdout.ru new file mode 100644 index 000000000..880d3d2ec --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/write_to_stdout.ru @@ -0,0 +1,6 @@ +app = lambda do |env| + $stdout.write "hello\n" + [200, {"Content-Type" => "text/plain"}, ["Hello World"]] +end + +run app diff --git a/vendor/cache/puma-fba741b91780/test/rackup/write_to_stdout_on_boot.ru b/vendor/cache/puma-fba741b91780/test/rackup/write_to_stdout_on_boot.ru new file mode 100644 index 000000000..faf29d5ff --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/rackup/write_to_stdout_on_boot.ru @@ -0,0 +1,2 @@ +puts "Loading app" +run lambda { |env| [200, {"Content-Type" => "text/plain"}, ["Hello World"]] } diff --git a/vendor/cache/puma-fba741b91780/test/runner b/vendor/cache/puma-fba741b91780/test/runner new file mode 100755 index 000000000..d7bb6a6a4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/runner @@ -0,0 +1,53 @@ +#!/usr/bin/env ruby + +=begin +A simplified test runner; it runs all tests by default. +It assumes that "bundle exec rake compile" has been run. + +It can also be passed a glob or test file names. If multiple test file names +are used, separate them by the File::PATH_SEPARATOR character with no spaces. +The file extension is optional. + +If arguments are used that take values (eg seed), use the 'no space' version, +like -s33388 or --seed=33388 + +Finally, to keep the code simple, if you pass an invalid argument for file +filtering it will either error or run minitest with no tests loaded + +Examples, run from the top Puma repo folder: +test/runner +test/runner -v +test/runner -v test_puma_server +test/runner --verbose test_puma_server* +test/runner --verbose test_integration_cluster:test_integration_single +test/runner --verbose test*ssl* + +Note that on macOS, globs (using '*') need to be quoted, as in: +test/runner -v 'test_integration_*.rb' +=end + +require 'bundler/setup' + +if ARGV.empty? || ARGV.last.start_with?('-') + if RUBY_VERSION >= '2.5' + Dir['test_*.rb', base: __dir__].sort.each { |tf| require_relative tf } + else + Dir["#{__dir__}/test_*.rb"].sort.each { |tf| require tf } + end +else + file_arg = ARGV.pop.sub(/\.rb\z/, '') + if file_arg.include? File::PATH_SEPARATOR + file_args = file_arg.split(File::PATH_SEPARATOR).map { |fn| fn.sub(/\.rb\z/, '') } + file_args.each { |tf| require_relative "#{tf}.rb" } + elsif file_arg.include? '*' + if RUBY_VERSION >= '2.5' + Dir["#{file_arg}.rb", base: __dir__].sort.each { |tf| require_relative tf } + else + Dir["#{__dir__}/#{file_arg}.rb"].sort.each { |tf| require tf } + end + else + require_relative "#{file_arg}.rb" + end +end + +require 'minitest' diff --git a/vendor/cache/puma-fba741b91780/test/runner.cmd b/vendor/cache/puma-fba741b91780/test/runner.cmd new file mode 100644 index 000000000..9f5876fb6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/runner.cmd @@ -0,0 +1,2 @@ +@ECHO OFF +@ruby.exe -x "%~dpn0" %* diff --git a/vendor/cache/puma-fba741b91780/test/test_app_status.rb b/vendor/cache/puma-fba741b91780/test/test_app_status.rb new file mode 100644 index 000000000..770c825dc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_app_status.rb @@ -0,0 +1,90 @@ +# frozen_string_literal: true + +require_relative "helper" + +require "puma/app/status" +require "rack" + +class TestAppStatus < Minitest::Test + + class FakeServer + def initialize + @status = :running + end + + attr_reader :status + + def stop + @status = :stop + end + + def halt + @status = :halt + end + + def stats + {} + end + end + + def setup + @server = FakeServer.new + @app = Puma::App::Status.new(@server) + end + + def lint(uri) + app = Rack::Lint.new @app + mock_env = Rack::MockRequest.env_for uri + app.call mock_env + end + + def test_bad_token + @app.instance_variable_set(:@auth_token, "abcdef") + + status, _, _ = lint('/whatever') + + assert_equal 403, status + end + + def test_good_token + @app.instance_variable_set(:@auth_token, "abcdef") + + status, _, _ = lint('/whatever?token=abcdef') + + assert_equal 404, status + end + + def test_unsupported + status, _, _ = lint('/not-real') + + assert_equal 404, status + end + + def test_stop + status, _ , app = lint('/stop') + + assert_equal :stop, @server.status + assert_equal 200, status + assert_equal ['{ "status": "ok" }'], app.enum_for.to_a + end + + def test_halt + status, _ , app = lint('/halt') + + assert_equal :halt, @server.status + assert_equal 200, status + assert_equal ['{ "status": "ok" }'], app.enum_for.to_a + end + + def test_stats + status, _ , app = lint('/stats') + + assert_equal 200, status + assert_equal ['{}'], app.enum_for.to_a + end + + def test_alternate_location + status, _ , _ = lint('__alternatE_location_/stats') + assert_equal 200, status + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_binder.rb b/vendor/cache/puma-fba741b91780/test/test_binder.rb new file mode 100644 index 000000000..cbdbdc3ca --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_binder.rb @@ -0,0 +1,574 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/ssl" if ::Puma::HAS_SSL +require_relative "helpers/tmp_path" + +require "puma/binder" +require "puma/events" +require "puma/configuration" + +class TestBinderBase < Minitest::Test + include SSLHelper if ::Puma::HAS_SSL + include TmpPath + + def setup + @log_writer = Puma::LogWriter.strings + @binder = Puma::Binder.new(@log_writer) + end + + def teardown + @binder.ios.reject! { |io| Minitest::Mock === io || io.to_io.closed? } + @binder.close + @binder.unix_paths.select! { |path| File.exist? path } + @binder.close_listeners + end + + private + + def ssl_context_for_binder(binder = @binder) + binder.ios[0].instance_variable_get(:@ctx) + end +end + +class TestBinderParallel < TestBinderBase + parallelize_me! + + def test_synthesize_binds_from_activated_fds_no_sockets + binds = ['tcp://0.0.0.0:3000'] + result = @binder.synthesize_binds_from_activated_fs(binds, true) + + assert_equal ['tcp://0.0.0.0:3000'], result + end + + def test_synthesize_binds_from_activated_fds_non_matching_together + binds = ['tcp://0.0.0.0:3000'] + sockets = {['tcp', '0.0.0.0', '5000'] => nil} + @binder.instance_variable_set(:@activated_sockets, sockets) + result = @binder.synthesize_binds_from_activated_fs(binds, false) + + assert_equal ['tcp://0.0.0.0:3000', 'tcp://0.0.0.0:5000'], result + end + + def test_synthesize_binds_from_activated_fds_non_matching_only + binds = ['tcp://0.0.0.0:3000'] + sockets = {['tcp', '0.0.0.0', '5000'] => nil} + @binder.instance_variable_set(:@activated_sockets, sockets) + result = @binder.synthesize_binds_from_activated_fs(binds, true) + + assert_equal ['tcp://0.0.0.0:5000'], result + end + + def test_synthesize_binds_from_activated_fds_complex_binds + binds = [ + 'tcp://0.0.0.0:3000', + 'ssl://192.0.2.100:5000', + 'ssl://192.0.2.101:5000?no_tlsv1=true', + 'unix:///run/puma.sock' + ] + sockets = { + ['tcp', '0.0.0.0', '5000'] => nil, + ['tcp', '192.0.2.100', '5000'] => nil, + ['tcp', '192.0.2.101', '5000'] => nil, + ['unix', '/run/puma.sock'] => nil + } + @binder.instance_variable_set(:@activated_sockets, sockets) + result = @binder.synthesize_binds_from_activated_fs(binds, false) + + expected = ['tcp://0.0.0.0:3000', 'ssl://192.0.2.100:5000', 'ssl://192.0.2.101:5000?no_tlsv1=true', 'unix:///run/puma.sock', 'tcp://0.0.0.0:5000'] + assert_equal expected, result + end + + def test_localhost_addresses_dont_alter_listeners_for_tcp_addresses + @binder.parse ["tcp://localhost:0"], @log_writer + + assert_empty @binder.listeners + end + + def test_home_alters_listeners_for_tcp_addresses + port = UniquePort.call + @binder.parse ["tcp://127.0.0.1:#{port}"], @log_writer + + assert_equal "tcp://127.0.0.1:#{port}", @binder.listeners[0][0] + assert_kind_of TCPServer, @binder.listeners[0][1] + end + + def test_connected_ports + ports = (1..3).map { |_| UniquePort.call } + + @binder.parse(ports.map { |p| "tcp://localhost:#{p}" }, @log_writer) + + assert_equal ports, @binder.connected_ports + end + + def test_localhost_addresses_dont_alter_listeners_for_ssl_addresses + skip_unless :ssl + @binder.parse ["ssl://localhost:0?#{ssl_query}"], @log_writer + + assert_empty @binder.listeners + end + + def test_home_alters_listeners_for_ssl_addresses + skip_unless :ssl + port = UniquePort.call + @binder.parse ["ssl://127.0.0.1:#{port}?#{ssl_query}"], @log_writer + + assert_equal "ssl://127.0.0.1:#{port}?#{ssl_query}", @binder.listeners[0][0] + assert_kind_of TCPServer, @binder.listeners[0][1] + end + + def test_correct_zero_port + @binder.parse ["tcp://localhost:0"], @log_writer + + m = %r!http://127.0.0.1:(\d+)!.match(@log_writer.stdout.string) + port = m[1].to_i + + refute_equal 0, port + end + + def test_correct_zero_port_ssl + skip_unless :ssl + + ssl_regex = %r!ssl://127.0.0.1:(\d+)! + + @binder.parse ["ssl://localhost:0?#{ssl_query}"], @log_writer + + port = ssl_regex.match(@log_writer.stdout.string)[1].to_i + + refute_equal 0, port + end + + def test_logs_all_localhost_bindings + @binder.parse ["tcp://localhost:0"], @log_writer + + assert_match %r!http://127.0.0.1:(\d+)!, @log_writer.stdout.string + if Socket.ip_address_list.any? {|i| i.ipv6_loopback? } + assert_match %r!http://\[::1\]:(\d+)!, @log_writer.stdout.string + end + end + + def test_logs_all_localhost_bindings_ssl + skip_unless :ssl + + @binder.parse ["ssl://localhost:0?#{ssl_query}"], @log_writer + + assert_match %r!ssl://127.0.0.1:(\d+)!, @log_writer.stdout.string + if Socket.ip_address_list.any? {|i| i.ipv6_loopback? } + assert_match %r!ssl://\[::1\]:(\d+)!, @log_writer.stdout.string + end + end + + def test_allows_both_ssl_and_tcp + assert_parsing_logs_uri [:ssl, :tcp] + end + + def test_allows_both_unix_and_tcp + skip_if :jruby # Undiagnosed thread race. TODO fix + assert_parsing_logs_uri [:unix, :tcp] + end + + def test_allows_both_tcp_and_unix + assert_parsing_logs_uri [:tcp, :unix] + end + + def test_pre_existing_unix + skip_unless :unix + + unix_path = tmp_path('.sock') + File.open(unix_path, mode: 'wb') { |f| f.puts 'pre existing' } + @binder.parse ["unix://#{unix_path}"], @log_writer + + assert_match %r!unix://#{unix_path}!, @log_writer.stdout.string + + refute_includes @binder.unix_paths, unix_path + + @binder.close_listeners + + assert File.exist?(unix_path) + + ensure + if UNIX_SKT_EXIST + File.unlink unix_path if File.exist? unix_path + end + end + + def test_binder_tcp_defaults_to_low_latency_off + skip_if :jruby + @binder.parse ["tcp://0.0.0.0:0"], @log_writer + + socket = @binder.listeners.first.last + + refute socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY).bool + end + + def test_binder_tcp_parses_nil_low_latency + skip_if :jruby + @binder.parse ["tcp://0.0.0.0:0?low_latency"], @log_writer + + socket = @binder.listeners.first.last + + assert socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY).bool + end + + def test_binder_tcp_parses_true_low_latency + skip_if :jruby + @binder.parse ["tcp://0.0.0.0:0?low_latency=true"], @log_writer + + socket = @binder.listeners.first.last + + assert socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY).bool + end + + def test_binder_parses_false_low_latency + skip_if :jruby + @binder.parse ["tcp://0.0.0.0:0?low_latency=false"], @log_writer + + socket = @binder.listeners.first.last + + refute socket.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY).bool + end + + def test_binder_ssl_defaults_to_true_low_latency + skip_unless :ssl + skip_if :jruby + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}"], @log_writer + + socket = @binder.listeners.first.last + + assert socket.to_io.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY).bool + end + + def test_binder_ssl_parses_false_low_latency + skip_unless :ssl + skip_if :jruby + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}&low_latency=false"], @log_writer + + socket = @binder.listeners.first.last + + refute socket.to_io.getsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY).bool + end + + def test_binder_parses_tlsv1_disabled + skip_unless :ssl + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}&no_tlsv1=true"], @log_writer + + assert ssl_context_for_binder.no_tlsv1 + end + + def test_binder_parses_tlsv1_enabled + skip_unless :ssl + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}&no_tlsv1=false"], @log_writer + + refute ssl_context_for_binder.no_tlsv1 + end + + def test_binder_parses_tlsv1_tlsv1_1_unspecified_defaults_to_enabled + skip_unless :ssl + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}"], @log_writer + + refute ssl_context_for_binder.no_tlsv1 + refute ssl_context_for_binder.no_tlsv1_1 + end + + def test_binder_parses_tlsv1_1_disabled + skip_unless :ssl + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}&no_tlsv1_1=true"], @log_writer + + assert ssl_context_for_binder.no_tlsv1_1 + end + + def test_binder_parses_tlsv1_1_enabled + skip_unless :ssl + @binder.parse ["ssl://0.0.0.0:0?#{ssl_query}&no_tlsv1_1=false"], @log_writer + + refute ssl_context_for_binder.no_tlsv1_1 + end + + def test_env_contains_protoenv + skip_unless :ssl + @binder.parse ["ssl://localhost:0?#{ssl_query}"], @log_writer + + env_hash = @binder.envs[@binder.ios.first] + + @binder.proto_env.each do |k,v| + assert env_hash[k] == v + end + end + + def test_env_contains_stderr + skip_unless :ssl + @binder.parse ["ssl://localhost:0?#{ssl_query}"], @log_writer + + env_hash = @binder.envs[@binder.ios.first] + + assert_equal @log_writer.stderr, env_hash["rack.errors"] + end + + def test_close_calls_close_on_ios + @mocked_ios = [Minitest::Mock.new, Minitest::Mock.new] + @mocked_ios.each { |m| m.expect(:close, true) } + @binder.ios = @mocked_ios + + @binder.close + + assert @mocked_ios.map(&:verify).all? + end + + def test_redirects_for_restart_creates_a_hash + @binder.parse ["tcp://127.0.0.1:0"], @log_writer + + result = @binder.redirects_for_restart + ios = @binder.listeners.map { |_l, io| io.to_i } + + ios.each { |int| assert_equal int, result[int] } + assert result[:close_others] + end + + def test_redirects_for_restart_env + @binder.parse ["tcp://127.0.0.1:0"], @log_writer + + result = @binder.redirects_for_restart_env + + @binder.listeners.each_with_index do |l, i| + assert_equal "#{l[1].to_i}:#{l[0]}", result["PUMA_INHERIT_#{i}"] + end + end + + def test_close_listeners_closes_ios + @binder.parse ["tcp://127.0.0.1:#{UniquePort.call}"], @log_writer + + refute @binder.listeners.any? { |_l, io| io.closed? } + + @binder.close_listeners + + assert @binder.listeners.all? { |_l, io| io.closed? } + end + + def test_close_listeners_closes_ios_unless_closed? + @binder.parse ["tcp://127.0.0.1:0"], @log_writer + + bomb = @binder.listeners.first[1] + bomb.close + def bomb.close; raise "Boom!"; end # the bomb has been planted + + assert @binder.listeners.any? { |_l, io| io.closed? } + + @binder.close_listeners + + assert @binder.listeners.all? { |_l, io| io.closed? } + end + + def test_listeners_file_unlink_if_unix_listener + skip_unless :unix + + unix_path = tmp_path('.sock') + @binder.parse ["unix://#{unix_path}"], @log_writer + assert File.socket?(unix_path) + + @binder.close_listeners + refute File.socket?(unix_path) + end + + def test_import_from_env_listen_inherit + @binder.parse ["tcp://127.0.0.1:0"], @log_writer + removals = @binder.create_inherited_fds(@binder.redirects_for_restart_env) + + @binder.listeners.each do |l, io| + assert_equal io.to_i, @binder.inherited_fds[l] + end + assert_includes removals, "PUMA_INHERIT_0" + end + + # Socket activation tests. We have to skip all of these on non-UNIX platforms + # because the check that we do in the code only works if you support UNIX sockets. + # This is OK, because systemd obviously only works on Linux. + def test_socket_activation_tcp + skip_unless :unix + url = "127.0.0.1" + port = UniquePort.call + sock = Addrinfo.tcp(url, port).listen + assert_activates_sockets(url: url, port: port, sock: sock) + end + + def test_socket_activation_tcp_ipv6 + skip_unless :unix + url = "::" + port = UniquePort.call + sock = Addrinfo.tcp(url, port).listen + assert_activates_sockets(url: url, port: port, sock: sock) + end + + def test_socket_activation_unix + skip_if :jruby # Failing with what I think is a JRuby bug + skip_unless :unix + + state_path = tmp_path('.state') + sock = Addrinfo.unix(state_path).listen + assert_activates_sockets(path: state_path, sock: sock) + ensure + File.unlink(state_path) rescue nil # JRuby race? + end + + def test_rack_multithread_default_configuration + binder = Puma::Binder.new(@log_writer) + + assert binder.proto_env["rack.multithread"] + end + + def test_rack_multithread_custom_configuration + conf = Puma::Configuration.new(max_threads: 1) + + binder = Puma::Binder.new(@log_writer, conf) + + refute binder.proto_env["rack.multithread"] + end + + def test_rack_multiprocess_default_configuration + binder = Puma::Binder.new(@log_writer) + + refute binder.proto_env["rack.multiprocess"] + end + + def test_rack_multiprocess_custom_configuration + conf = Puma::Configuration.new(workers: 1) + + binder = Puma::Binder.new(@log_writer, conf) + + assert binder.proto_env["rack.multiprocess"] + end + + private + + def assert_activates_sockets(path: nil, port: nil, url: nil, sock: nil) + hash = { "LISTEN_FDS" => 1, "LISTEN_PID" => $$ } + @log_writer.instance_variable_set(:@debug, true) + + @binder.instance_variable_set(:@sock_fd, sock.fileno) + def @binder.socket_activation_fd(int); @sock_fd; end + @result = @binder.create_activated_fds(hash) + + url = "[::]" if url == "::" + ary = path ? [:unix, path] : [:tcp, url, port] + + assert_kind_of TCPServer, @binder.activated_sockets[ary] + assert_match "Registered #{ary.join(":")} for activation from LISTEN_FDS", @log_writer.stdout.string + assert_equal ["LISTEN_FDS", "LISTEN_PID"], @result + end + + def assert_parsing_logs_uri(order = [:unix, :tcp]) + skip MSG_UNIX if order.include?(:unix) && !UNIX_SKT_EXIST + skip_unless :ssl + + unix_path = tmp_path('.sock') + prepared_paths = { + ssl: "ssl://127.0.0.1:#{UniquePort.call}?#{ssl_query}", + tcp: "tcp://127.0.0.1:#{UniquePort.call}", + unix: "unix://#{unix_path}" + } + + expected_logs = prepared_paths.dup.tap do |logs| + logs[:tcp] = logs[:tcp].gsub('tcp://', 'http://') + end + + tested_paths = [prepared_paths[order[0]], prepared_paths[order[1]]] + + @binder.parse tested_paths, @log_writer + stdout = @log_writer.stdout.string + + order.each do |prot| + assert_match expected_logs[prot], stdout + end + ensure + @binder.close_listeners if order.include?(:unix) && UNIX_SKT_EXIST + end +end + +class TestBinderSingle < TestBinderBase + def test_ssl_binder_sets_backlog + skip_unless :ssl + + host = '127.0.0.1' + port = UniquePort.call + tcp_server = TCPServer.new(host, port) + tcp_server.define_singleton_method(:listen) do |backlog| + Thread.current[:backlog] = backlog + super(backlog) + end + + TCPServer.stub(:new, tcp_server) do + @binder.parse ["ssl://#{host}:#{port}?#{ssl_query}&backlog=2048"], @log_writer + end + + assert_equal 2048, Thread.current[:backlog] + end +end + +class TestBinderJRuby < TestBinderBase + def test_binder_parses_jruby_ssl_options + skip_unless :ssl + + cipher_suites = ['TLS_DHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256'] + + @binder.parse ["ssl://0.0.0.0:8080?#{ssl_query}"], @log_writer + + assert_equal @keystore, ssl_context_for_binder.keystore + assert_equal cipher_suites, ssl_context_for_binder.cipher_suites + assert_equal cipher_suites, ssl_context_for_binder.ssl_cipher_list + end + + def test_binder_parses_jruby_ssl_protocols_and_cipher_suites_options + skip_unless :ssl + + keystore = File.expand_path "../../examples/puma/keystore.jks", __FILE__ + cipher = "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + ssl_query = "keystore=#{keystore}&keystore-pass=jruby_puma&cipher_suites=#{cipher}&protocols=TLSv1.3,TLSv1.2" + + @binder.parse ["ssl://0.0.0.0:8080?#{ssl_query}"], @log_writer + + assert_equal [ 'TLSv1.3', 'TLSv1.2' ], ssl_context_for_binder.protocols + assert_equal [ cipher ], ssl_context_for_binder.cipher_suites + end +end if ::Puma::IS_JRUBY + +class TestBinderMRI < TestBinderBase + def test_binder_parses_ssl_cipher_filter + skip_unless :ssl + + ssl_cipher_filter = "AES@STRENGTH" + + @binder.parse ["ssl://0.0.0.0?#{ssl_query}&ssl_cipher_filter=#{ssl_cipher_filter}"], @log_writer + + assert_equal ssl_cipher_filter, ssl_context_for_binder.ssl_cipher_filter + end + + def test_binder_parses_ssl_ciphersuites + skip_unless :ssl + skip('Requires TLSv1.3') unless Puma::MiniSSL::HAS_TLS1_3 + + ssl_ciphersuites = "TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256" + + @binder.parse ["ssl://0.0.0.0?#{ssl_query}&ssl_ciphersuites=#{ssl_ciphersuites}"], @log_writer + + assert_equal ssl_ciphersuites, ssl_context_for_binder.ssl_ciphersuites + end + + def test_binder_parses_ssl_verification_flags_one + skip_unless :ssl + + input = "&verification_flags=TRUSTED_FIRST" + + @binder.parse ["ssl://0.0.0.0?#{ssl_query}#{input}"], @log_writer + + assert_equal 0x8000, ssl_context_for_binder.verification_flags + end + + def test_binder_parses_ssl_verification_flags_multiple + skip_unless :ssl + + input = "&verification_flags=TRUSTED_FIRST,NO_CHECK_TIME" + + @binder.parse ["ssl://0.0.0.0?#{ssl_query}#{input}"], @log_writer + + assert_equal 0x8000 | 0x200000, ssl_context_for_binder.verification_flags + end +end unless ::Puma::IS_JRUBY diff --git a/vendor/cache/puma-fba741b91780/test/test_bundle_pruner.rb b/vendor/cache/puma-fba741b91780/test/test_bundle_pruner.rb new file mode 100644 index 000000000..af69ef8b9 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_bundle_pruner.rb @@ -0,0 +1,58 @@ +require_relative 'helper' + +require 'puma/events' + +class TestBundlePruner < Minitest::Test + + PUMA_VERS = "puma-#{Puma::Const::PUMA_VERSION}" + + def test_paths_to_require_after_prune_is_correctly_built_for_no_extra_deps + skip_if :no_bundler + + dirs = bundle_pruner.send(:paths_to_require_after_prune) + + assert_equal(2, dirs.length) + assert_equal(File.join(PROJECT_ROOT, "lib"), dirs[0]) # lib dir + assert_operator dirs[1], :end_with?, PUMA_VERS # native extension dir + refute_match(%r{gems/minitest-[\d.]+/lib$}, dirs[2]) + end + + def test_paths_to_require_after_prune_is_correctly_built_with_extra_deps + skip_if :no_bundler + + dirs = bundle_pruner([], ['minitest']).send(:paths_to_require_after_prune) + + assert_equal(3, dirs.length) + assert_equal(File.join(PROJECT_ROOT, "lib"), dirs[0]) # lib dir + assert_operator dirs[1], :end_with?, PUMA_VERS # native extension dir + assert_match(%r{gems/minitest-[\d.]+/lib$}, dirs[2]) # minitest dir + end + + def test_extra_runtime_deps_paths_is_empty_for_no_config + assert_equal([], bundle_pruner.send(:extra_runtime_deps_paths)) + end + + def test_extra_runtime_deps_paths_is_correctly_built + skip_if :no_bundler + + dep_dirs = bundle_pruner([], ['minitest']).send(:extra_runtime_deps_paths) + + assert_equal(1, dep_dirs.length) + assert_match(%r{gems/minitest-[\d.]+/lib$}, dep_dirs.first) + end + + def test_puma_wild_path_is_an_absolute_path + skip_if :no_bundler + puma_wild_path = bundle_pruner.send(:puma_wild_path) + + assert_match(%r{bin/puma-wild$}, puma_wild_path) + # assert no "/../" in path + refute_match(%r{/\.\./}, puma_wild_path) + end + + private + + def bundle_pruner(original_argv = nil, extra_runtime_dependencies = nil) + @bundle_pruner ||= Puma::Launcher::BundlePruner.new(original_argv, extra_runtime_dependencies, Puma::LogWriter.null) + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_busy_worker.rb b/vendor/cache/puma-fba741b91780/test/test_busy_worker.rb new file mode 100644 index 000000000..fc4f3f812 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_busy_worker.rb @@ -0,0 +1,110 @@ +require_relative "helper" +require_relative "helpers/test_puma/puma_socket" + +class TestBusyWorker < Minitest::Test + + include ::TestPuma::PumaSocket + + def setup + skip_unless :mri # This feature only makes sense on MRI + @server = nil + end + + def teardown + return if skipped? + @server&.stop true + end + + def with_server(**options, &app) + @requests_count = 0 # number of requests processed + @requests_running = 0 # current number of requests running + @requests_max_running = 0 # max number of requests running in parallel + @mutex = Mutex.new + + request_handler = ->(env) do + @mutex.synchronize do + @requests_count += 1 + @requests_running += 1 + if @requests_running > @requests_max_running + @requests_max_running = @requests_running + end + end + + begin + yield(env) + ensure + @mutex.synchronize do + @requests_running -= 1 + end + end + end + + options[:min_threads] ||= 1 + options[:max_threads] ||= 10 + options[:log_writer] ||= Puma::LogWriter.strings + + @server = Puma::Server.new request_handler, nil, **options + @bind_port = (@server.add_tcp_listener '127.0.0.1', 0).addr[1] + @server.run + end + + # Multiple concurrent requests are not processed + # sequentially as a small delay is introduced + def test_multiple_requests_waiting_on_less_busy_worker + with_server(wait_for_less_busy_worker: 1.0, workers: 2) do |_| + sleep(0.1) + + [200, {}, [""]] + end + + n = 2 + + sockets = send_http_array GET_10, n + + read_response_array(sockets) + + assert_equal n, @requests_count, "number of requests needs to match" + assert_equal 0, @requests_running, "none of requests needs to be running" + assert_equal 1, @requests_max_running, "maximum number of concurrent requests needs to be 1" + end + + # Multiple concurrent requests are processed + # in parallel as a delay is disabled + def test_multiple_requests_processing_in_parallel + with_server(wait_for_less_busy_worker: 0.0, workers: 2) do |_| + sleep(0.1) + + [200, {}, [""]] + end + + n = 4 + + sockets = send_http_array GET_10, n + + read_response_array(sockets) + + assert_equal n, @requests_count, "number of requests needs to match" + assert_equal 0, @requests_running, "none of requests needs to be running" + assert_equal n, @requests_max_running, "maximum number of concurrent requests needs to match" + end + + def test_not_wait_for_less_busy_worker + with_server do + [200, {}, [""]] + end + + assert_not_called_on_instance_of(Puma::ThreadPool, :wait_for_less_busy_worker) do + send_http_read_response "GET / HTTP/1.0\r\n\r\n" + end + end + + def test_wait_for_less_busy_worker + with_server(workers: 2) do + [200, {}, [""]] + end + + assert_called_on_instance_of(Puma::ThreadPool, :wait_for_less_busy_worker) do + send_http_read_response "GET / HTTP/1.0\r\n\r\n" + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_cli.rb b/vendor/cache/puma-fba741b91780/test/test_cli.rb new file mode 100644 index 000000000..f0c97ef0a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_cli.rb @@ -0,0 +1,359 @@ +require_relative "helper" +require_relative "helpers/ssl" if ::Puma::HAS_SSL +require_relative "helpers/tmp_path" +require_relative "helpers/test_puma/puma_socket" + +require "puma/cli" +require "json" +require "psych" + +class TestCLI < Minitest::Test + include SSLHelper if ::Puma::HAS_SSL + include TmpPath + include TestPuma::PumaSocket + + def setup + @environment = 'production' + + @tmp_path = tmp_path('puma-test') + @tmp_path2 = "#{@tmp_path}2" + + File.unlink @tmp_path if File.exist? @tmp_path + File.unlink @tmp_path2 if File.exist? @tmp_path2 + + @wait, @ready = IO.pipe + + @log_writer = Puma::LogWriter.strings + + @events = Puma::Events.new + @events.on_booted { @ready << "!" } + + @puma_version_pattern = "\\d+.\\d+.\\d+(\\.[a-z\\d]+)?" + end + + def wait_booted + @wait.sysread 1 + rescue Errno::EAGAIN + sleep 0.001 + retry + end + + def teardown + File.unlink @tmp_path if File.exist? @tmp_path + File.unlink @tmp_path2 if File.exist? @tmp_path2 + + @wait.close + @ready.close + end + + def test_control_for_tcp + control_port = UniquePort.call + url = "tcp://127.0.0.1:#{control_port}/" + + cli = Puma::CLI.new ["-b", "tcp://127.0.0.1:0", + "--control-url", url, + "--control-token", "", + "test/rackup/hello.ru"], @log_writer, @events + + t = Thread.new { cli.run } + + wait_booted + + body = send_http_read_resp_body "GET /stats HTTP/1.0\r\n\r\n", port: control_port + + assert_equal Puma.stats_hash, JSON.parse(Puma.stats, symbolize_names: true) + + dmt = Puma::Configuration::DEFAULTS[:max_threads] + expected_stats = /\{"started_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z","backlog":0,"running":0,"pool_capacity":#{dmt},"max_threads":#{dmt},"requests_count":0,"versions":\{"puma":"#{@puma_version_pattern}","ruby":\{"engine":"\w+","version":"\d+.\d+.\d+","patchlevel":-?\d+\}\}\}/ + assert_match(expected_stats, body) + ensure + cli.launcher.stop + t.join + end + + def test_control_for_ssl + skip_unless :ssl + + require "net/http" + control_port = UniquePort.call + control_host = "127.0.0.1" + control_url = "ssl://#{control_host}:#{control_port}?#{ssl_query}" + token = "token" + + cli = Puma::CLI.new ["-b", "tcp://127.0.0.1:0", + "--control-url", control_url, + "--control-token", token, + "test/rackup/hello.ru"], @log_writer, @events + + t = Thread.new { cli.run } + + wait_booted + + body = send_http_read_resp_body "GET /stats?token=#{token} HTTP/1.0\r\n\r\n", + port: control_port, ctx: new_ctx + + dmt = Puma::Configuration::DEFAULTS[:max_threads] + expected_stats = /{"started_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z","backlog":0,"running":0,"pool_capacity":#{dmt},"max_threads":#{dmt}/ + assert_match(expected_stats, body) + ensure + # always called, even if skipped + cli&.launcher&.stop + t&.join + end + + def test_control + skip_unless :unix + url = "unix://#{@tmp_path}" + + cli = Puma::CLI.new ["-b", "unix://#{@tmp_path2}", + "--control-url", url, + "--control-token", "", + "test/rackup/hello.ru"], @log_writer, @events + + t = Thread.new { cli.run } + + wait_booted + + body = send_http_read_resp_body "GET /stats HTTP/1.0\r\n\r\n", path: @tmp_path + + dmt = Puma::Configuration::DEFAULTS[:max_threads] + expected_stats = /{"started_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z","backlog":0,"running":0,"pool_capacity":#{dmt},"max_threads":#{dmt},"requests_count":0,"versions":\{"puma":"#{@puma_version_pattern}","ruby":\{"engine":"\w+","version":"\d+.\d+.\d+","patchlevel":-?\d+\}\}\}/ + assert_match(expected_stats, body) + ensure + if UNIX_SKT_EXIST + cli.launcher.stop + t.join + end + end + + def test_control_stop + skip_unless :unix + url = "unix://#{@tmp_path}" + + cli = Puma::CLI.new ["-b", "unix://#{@tmp_path2}", + "--control-url", url, + "--control-token", "", + "test/rackup/hello.ru"], @log_writer, @events + + t = Thread.new { cli.run } + + wait_booted + + body = send_http_read_resp_body "GET /stop HTTP/1.0\r\n\r\n", path: @tmp_path + + assert_equal '{ "status": "ok" }', body + ensure + t.join if UNIX_SKT_EXIST + end + + def test_control_requests_count + @bind_port = UniquePort.call + control_port = UniquePort.call + url = "tcp://127.0.0.1:#{control_port}/" + + cli = Puma::CLI.new ["-b", "tcp://127.0.0.1:#{@bind_port}", + "--control-url", url, + "--control-token", "", + "test/rackup/hello.ru"], @log_writer, @events + + t = Thread.new { cli.run } + + wait_booted + + body = send_http_read_resp_body "GET /stats HTTP/1.0\r\n\r\n", port: control_port + + assert_equal 0, JSON.parse(body)['requests_count'] + + # send real requests to server + 3.times { send_http_read_resp_body GET_10 } + + body = send_http_read_resp_body "GET /stats HTTP/1.0\r\n\r\n", port: control_port + + assert_equal 3, JSON.parse(body)['requests_count'] + ensure + cli.launcher.stop + t.join + end + + def test_control_thread_backtraces + skip_unless :unix + url = "unix://#{@tmp_path}" + + cli = Puma::CLI.new ["-b", "unix://#{@tmp_path2}", + "--control-url", url, + "--control-token", "", + "test/rackup/hello.ru"], @log_writer, @events + + t = Thread.new { cli.run } + + wait_booted + + if TRUFFLE + Thread.pass + sleep 0.2 + end + + # All thread backtraces may be very large, just get a chunk + socket = send_http "GET /thread-backtraces HTTP/1.0\r\n\r\n", path: @tmp_path + socket.wait_readable 3 + body = socket.sysread 32_768 + assert_match %r{Thread: TID-}, body + ensure + cli.launcher.stop if cli + t.join if UNIX_SKT_EXIST + end + + + def test_tmp_control + skip_if :jruby, suffix: " - Unknown issue" + + cli = Puma::CLI.new ["--state", @tmp_path, "--control-url", "auto"] + cli.launcher.write_state + + opts = cli.launcher.instance_variable_get(:@options) + + data = Psych.load_file @tmp_path + + Puma::StateFile::ALLOWED_FIELDS.each do |key| + val = + case key + when 'pid' then Process.pid + when 'running_from' then File.expand_path('.') # same as Launcher + else opts[key.to_sym] + end + assert_equal val, data[key] + end + + assert_equal (Puma::StateFile::ALLOWED_FIELDS & data.keys).sort, data.keys.sort + + url = data["control_url"] + + assert_operator url, :start_with?, "unix://", "'#{url}' is not a URL" + end + + def test_state_file_callback_filtering + skip_unless :fork + cli = Puma::CLI.new [ "--config", "test/config/state_file_testing_config.rb", + "--state", @tmp_path ] + cli.launcher.write_state + + data = Psych.load_file @tmp_path + + assert_equal (Puma::StateFile::ALLOWED_FIELDS & data.keys).sort, data.keys.sort + end + + def test_log_formatter_default_single + cli = Puma::CLI.new [ ] + assert_instance_of Puma::LogWriter::DefaultFormatter, cli.launcher.log_writer.formatter + end + + def test_log_formatter_default_clustered + skip_unless :fork + + cli = Puma::CLI.new [ "-w 2" ] + assert_instance_of Puma::LogWriter::PidFormatter, cli.launcher.log_writer.formatter + end + + def test_log_formatter_custom_single + cli = Puma::CLI.new [ "--config", "test/config/custom_log_formatter.rb" ] + assert_instance_of Proc, cli.launcher.log_writer.formatter + assert_match(/^\[.*\] \[.*\] .*: test$/, cli.launcher.log_writer.format('test')) + end + + def test_log_formatter_custom_clustered + skip_unless :fork + + cli = Puma::CLI.new [ "--config", "test/config/custom_log_formatter.rb", "-w 2" ] + assert_instance_of Proc, cli.launcher.log_writer.formatter + assert_match(/^\[.*\] \[.*\] .*: test$/, cli.launcher.log_writer.format('test')) + end + + def test_state + url = "tcp://127.0.0.1:#{UniquePort.call}" + cli = Puma::CLI.new ["--state", @tmp_path, "--control-url", url] + cli.launcher.write_state + + data = Psych.load_file @tmp_path + + assert_equal Process.pid, data["pid"] + assert_equal url, data["control_url"] + end + + def test_load_path + Puma::CLI.new ["--include", 'foo/bar'] + + assert_equal 'foo/bar', $LOAD_PATH[0] + $LOAD_PATH.shift + + Puma::CLI.new ["--include", 'foo/bar:baz/qux'] + + assert_equal 'foo/bar', $LOAD_PATH[0] + $LOAD_PATH.shift + assert_equal 'baz/qux', $LOAD_PATH[0] + $LOAD_PATH.shift + end + + def test_extra_runtime_dependencies + cli = Puma::CLI.new ['--extra-runtime-dependencies', 'a,b'] + extra_dependencies = cli.instance_variable_get(:@conf) + .instance_variable_get(:@options)[:extra_runtime_dependencies] + + assert_equal %w[a b], extra_dependencies + end + + def test_environment_app_env + ENV['RACK_ENV'] = @environment + ENV['RAILS_ENV'] = @environment + ENV['APP_ENV'] = 'test' + + cli = Puma::CLI.new [] + cli.send(:setup_options) + + assert_equal 'test', cli.instance_variable_get(:@conf).environment + ensure + ENV.delete 'APP_ENV' + ENV.delete 'RAILS_ENV' + end + + def test_environment_rack_env + ENV['RACK_ENV'] = @environment + + cli = Puma::CLI.new [] + cli.send(:setup_options) + + assert_equal @environment, cli.instance_variable_get(:@conf).environment + end + + def test_environment_rails_env + ENV.delete 'RACK_ENV' + ENV['RAILS_ENV'] = @environment + + cli = Puma::CLI.new [] + cli.send(:setup_options) + + assert_equal @environment, cli.instance_variable_get(:@conf).environment + ensure + ENV.delete 'RAILS_ENV' + end + + def test_silent + cli = Puma::CLI.new ['--silent'] + cli.send(:setup_options) + + log_writer = cli.instance_variable_get(:@log_writer) + + assert_equal log_writer.class, Puma::LogWriter.null.class + assert_equal log_writer.stdout.class, Puma::NullIO + assert_equal log_writer.stderr, $stderr + end + + def test_plugins + assert_empty Puma::Plugins.instance_variable_get(:@plugins) + + cli = Puma::CLI.new ['--plugin', 'tmp_restart', '--plugin', 'systemd'] + cli.send(:setup_options) + + assert Puma::Plugins.find("tmp_restart") + assert Puma::Plugins.find("systemd") + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_config.rb b/vendor/cache/puma-fba741b91780/test/test_config.rb new file mode 100644 index 000000000..08ecbebb4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_config.rb @@ -0,0 +1,763 @@ +# frozen_string_literal: true + +require_relative "helper" + +require "puma/configuration" +require 'puma/log_writer' +require 'rack' + +class TestConfigFile < Minitest::Test + parallelize_me! + + def test_default_max_threads + max_threads = 16 + max_threads = 5 if RUBY_ENGINE.nil? || RUBY_ENGINE == 'ruby' + assert_equal max_threads, Puma::Configuration.new.options.default_options[:max_threads] + end + + def test_app_from_rackup + if Rack.release >= '3' + fn = "test/rackup/hello-bind_rack3.ru" + bind = "tcp://0.0.0.0:9292" + else + fn = "test/rackup/hello-bind.ru" + bind = "tcp://127.0.0.1:9292" + end + + conf = Puma::Configuration.new do |c| + c.rackup fn + end + conf.load + + # suppress deprecation warning of Rack (>= 2.2.0) + # > Parsing options from the first comment line is deprecated!\n + assert_output(nil, nil) do + conf.app + end + + assert_equal [200, {"Content-Type"=>"text/plain"}, ["Hello World"]], conf.app.call({}) + + assert_equal [bind], conf.options[:binds] + end + + def test_app_from_app_DSL + conf = Puma::Configuration.new do |c| + c.load "test/config/app.rb" + end + conf.load + + app = conf.app + + assert_equal [200, {}, ["embedded app"]], app.call({}) + end + + def test_ssl_configuration_from_DSL + skip_unless :ssl + conf = Puma::Configuration.new do |config| + config.load "test/config/ssl_config.rb" + end + + conf.load + + bind_configuration = conf.options.file_options[:binds].first + app = conf.app + + assert bind_configuration =~ %r{ca=.*ca.crt} + assert bind_configuration&.include?('verify_mode=peer') + + assert_equal [200, {}, ["embedded app"]], app.call({}) + end + + def test_ssl_self_signed_configuration_from_DSL + skip_if :jruby + skip_unless :ssl + conf = Puma::Configuration.new do |config| + config.load "test/config/ssl_self_signed_config.rb" + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?&verify_mode=none" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind + skip_if :jruby + skip_unless :ssl + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "/path/to/cert", + key: "/path/to/key", + verify_mode: "the_verify_mode", + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?cert=%2Fpath%2Fto%2Fcert&key=%2Fpath%2Fto%2Fkey&verify_mode=the_verify_mode" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_with_escaped_filenames + skip_if :jruby + skip_unless :ssl + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "/path/to/cert+1", + ca: "/path/to/ca+1", + key: "/path/to/key+1", + verify_mode: :peer + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?cert=%2Fpath%2Fto%2Fcert%2B1&key=%2Fpath%2Fto%2Fkey%2B1&verify_mode=peer&ca=%2Fpath%2Fto%2Fca%2B1" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_with_cert_and_key_pem + skip_if :jruby + skip_unless :ssl + + cert_path = File.expand_path "../examples/puma/client_certs", __dir__ + cert_pem = File.read("#{cert_path}/server.crt") + key_pem = File.read("#{cert_path}/server.key") + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert_pem: cert_pem, + key_pem: key_pem, + verify_mode: "the_verify_mode", + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?cert=store%3A0&key=store%3A1&verify_mode=the_verify_mode" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_with_backlog + skip_unless :ssl + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + backlog: "2048", + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert ssl_binding.include?('&backlog=2048') + end + + def test_ssl_bind_with_low_latency_true + skip_unless :ssl + skip_if :jruby + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + low_latency: true + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert ssl_binding.include?('&low_latency=true') + end + + def test_ssl_bind_with_low_latency_false + skip_unless :ssl + skip_if :jruby + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + low_latency: false + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert ssl_binding.include?('&low_latency=false') + end + + def test_ssl_bind_jruby + skip_unless :jruby + skip_unless :ssl + + ciphers = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + keystore: "/path/to/keystore", + keystore_pass: "password", + cipher_suites: ciphers, + protocols: 'TLSv1.2', + verify_mode: "the_verify_mode" + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?keystore=/path/to/keystore" \ + "&keystore-pass=password&cipher_suites=#{ciphers}&protocols=TLSv1.2" \ + "&verify_mode=the_verify_mode" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_jruby_with_ssl_cipher_list + skip_unless :jruby + skip_unless :ssl + + cipher_list = "TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256" + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + keystore: "/path/to/keystore", + keystore_pass: "password", + ssl_cipher_list: cipher_list, + verify_mode: "the_verify_mode" + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?keystore=/path/to/keystore" \ + "&keystore-pass=password&ssl_cipher_list=#{cipher_list}" \ + "&verify_mode=the_verify_mode" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_jruby_with_truststore + skip_unless :jruby + skip_unless :ssl + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + keystore: "/path/to/keystore", + keystore_type: "pkcs12", + keystore_pass: "password", + truststore: "default", + truststore_type: "jks", + verify_mode: "none" + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?keystore=/path/to/keystore" \ + "&keystore-pass=password&keystore-type=pkcs12" \ + "&truststore=default&truststore-type=jks" \ + "&verify_mode=none" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_no_tlsv1_1 + skip_if :jruby + skip_unless :ssl + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "/path/to/cert", + key: "/path/to/key", + verify_mode: "the_verify_mode", + no_tlsv1_1: true + } + end + + conf.load + + ssl_binding = "ssl://0.0.0.0:9292?cert=%2Fpath%2Fto%2Fcert&key=%2Fpath%2Fto%2Fkey&verify_mode=the_verify_mode&no_tlsv1_1=true" + assert_equal [ssl_binding], conf.options[:binds] + end + + def test_ssl_bind_with_cipher_filter + skip_if :jruby + skip_unless :ssl + + cipher_filter = "!aNULL:AES+SHA" + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "cert", + key: "key", + ssl_cipher_filter: cipher_filter, + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert ssl_binding.include?("&ssl_cipher_filter=#{cipher_filter}") + end + + def test_ssl_bind_with_ciphersuites + skip_if :jruby + skip_unless :ssl + skip('Requires TLSv1.3') unless Puma::MiniSSL::HAS_TLS1_3 + + ciphersuites = "TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256" + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "cert", + key: "key", + ssl_ciphersuites: ciphersuites, + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert ssl_binding.include?("&ssl_ciphersuites=#{ciphersuites}") + end + + def test_ssl_bind_with_verification_flags + skip_if :jruby + skip_unless :ssl + + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "cert", + key: "key", + verification_flags: ["TRUSTED_FIRST", "NO_CHECK_TIME"] + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert ssl_binding.include?("&verification_flags=TRUSTED_FIRST,NO_CHECK_TIME") + end + + def test_ssl_bind_with_ca + skip_unless :ssl + conf = Puma::Configuration.new do |c| + c.ssl_bind "0.0.0.0", "9292", { + cert: "/path/to/cert", + ca: "/path/to/ca", + key: "/path/to/key", + verify_mode: :peer, + } + end + + conf.load + + ssl_binding = conf.options[:binds].first + assert_includes ssl_binding, Puma::Util.escape("/path/to/ca") + assert_includes ssl_binding, "verify_mode=peer" + end + + def test_lowlevel_error_handler_DSL + conf = Puma::Configuration.new do |c| + c.load "test/config/app.rb" + end + conf.load + + app = conf.options[:lowlevel_error_handler] + + assert_equal [200, {}, ["error page"]], app.call({}) + end + + def test_allow_users_to_override_default_options + conf = Puma::Configuration.new(restart_cmd: 'bin/rails server') + + assert_equal 'bin/rails server', conf.options[:restart_cmd] + end + + def test_overwrite_options + conf = Puma::Configuration.new do |c| + c.workers 3 + end + conf.load + + assert_equal conf.options[:workers], 3 + conf.options[:workers] += 1 + assert_equal conf.options[:workers], 4 + end + + def test_explicit_config_files + conf = Puma::Configuration.new(config_files: ['test/config/settings.rb']) do |c| + end + conf.load + assert_match(/:3000$/, conf.options[:binds].first) + end + + def test_parameters_overwrite_files + conf = Puma::Configuration.new(config_files: ['test/config/settings.rb']) do |c| + c.port 3030 + end + conf.load + + assert_match(/:3030$/, conf.options[:binds].first) + assert_equal 3, conf.options[:min_threads] + assert_equal 5, conf.options[:max_threads] + end + + def test_config_files_default + conf = Puma::Configuration.new do + end + + assert_equal [nil], conf.config_files + end + + def test_config_files_with_dash + conf = Puma::Configuration.new(config_files: ['-']) do + end + + assert_equal [], conf.config_files + end + + def test_config_files_with_existing_path + conf = Puma::Configuration.new(config_files: ['test/config/settings.rb']) do + end + + assert_equal ['test/config/settings.rb'], conf.config_files + end + + def test_config_files_with_non_existing_path + conf = Puma::Configuration.new(config_files: ['test/config/typo/settings.rb']) do + end + + assert_equal ['test/config/typo/settings.rb'], conf.config_files + end + + def test_config_files_with_integer_convert + conf = Puma::Configuration.new(config_files: ['test/config/with_integer_convert.rb']) do + end + conf.load + + assert_equal 6, conf.options[:persistent_timeout] + assert_equal 3, conf.options[:first_data_timeout] + assert_equal 2, conf.options[:workers] + assert_equal 4, conf.options[:min_threads] + assert_equal 8, conf.options[:max_threads] + assert_equal 90, conf.options[:worker_timeout] + assert_equal 120, conf.options[:worker_boot_timeout] + assert_equal 150, conf.options[:worker_shutdown_timeout] + end + + def test_config_files_with_float_convert + conf = Puma::Configuration.new(config_files: ['test/config/with_float_convert.rb']) do + end + conf.load + + assert_equal Float::INFINITY, conf.options[:max_fast_inline] + end + + def test_config_files_with_symbol_convert + conf = Puma::Configuration.new(config_files: ['test/config/with_symbol_convert.rb']) do + end + conf.load + + assert_equal :ruby, conf.options[:io_selector_backend] + end + + def test_config_raise_exception_on_sigterm + conf = Puma::Configuration.new do |c| + c.raise_exception_on_sigterm false + end + conf.load + + assert_equal conf.options[:raise_exception_on_sigterm], false + conf.options[:raise_exception_on_sigterm] = true + assert_equal conf.options[:raise_exception_on_sigterm], true + end + + def test_run_hooks_on_restart_hook + assert_run_hooks :on_restart + end + + def test_run_hooks_before_worker_fork + assert_run_hooks :before_worker_fork, configured_with: :on_worker_fork + + assert_warning_for_hooks_defined_in_single_mode :on_worker_fork + end + + def test_run_hooks_after_worker_fork + assert_run_hooks :after_worker_fork + + assert_warning_for_hooks_defined_in_single_mode :after_worker_fork + end + + def test_run_hooks_before_worker_boot + assert_run_hooks :before_worker_boot, configured_with: :on_worker_boot + + assert_warning_for_hooks_defined_in_single_mode :on_worker_boot + end + + def test_run_hooks_before_worker_shutdown + assert_run_hooks :before_worker_shutdown, configured_with: :on_worker_shutdown + + assert_warning_for_hooks_defined_in_single_mode :on_worker_shutdown + end + + def test_run_hooks_before_fork + assert_run_hooks :before_fork + + assert_warning_for_hooks_defined_in_single_mode :before_fork + end + + def test_run_hooks_before_thread_start + assert_run_hooks :before_thread_start, configured_with: :on_thread_start + end + + def test_run_hooks_before_thread_exit + assert_run_hooks :before_thread_exit, configured_with: :on_thread_exit + end + + def test_run_hooks_and_exception + conf = Puma::Configuration.new do |c| + c.on_restart do |a| + raise RuntimeError, 'Error from hook' + end + end + conf.load + log_writer = Puma::LogWriter.strings + + conf.run_hooks(:on_restart, 'ARG', log_writer) + expected = /WARNING hook on_restart failed with exception \(RuntimeError\) Error from hook/ + assert_match expected, log_writer.stdout.string + end + + def test_config_does_not_load_workers_by_default + assert_equal 0, Puma::Configuration.new.options.default_options[:workers] + end + + def test_final_options_returns_merged_options + conf = Puma::Configuration.new({ min_threads: 1, max_threads: 2 }, { min_threads: 2 }) + + assert_equal 1, conf.final_options[:min_threads] + assert_equal 2, conf.final_options[:max_threads] + end + + def test_silence_single_worker_warning_default + conf = Puma::Configuration.new + conf.load + + assert_equal false, conf.options[:silence_single_worker_warning] + end + + def test_silence_single_worker_warning_overwrite + conf = Puma::Configuration.new do |c| + c.silence_single_worker_warning + end + conf.load + + assert_equal true, conf.options[:silence_single_worker_warning] + end + + def test_silence_fork_callback_warning_default + conf = Puma::Configuration.new + conf.load + + assert_equal false, conf.options[:silence_fork_callback_warning] + end + + def test_silence_fork_callback_warning_overwrite + conf = Puma::Configuration.new do |c| + c.silence_fork_callback_warning + end + conf.load + + assert_equal true, conf.options[:silence_fork_callback_warning] + end + + def test_http_content_length_limit + assert_nil Puma::Configuration.new.options.default_options[:http_content_length_limit] + + conf = Puma::Configuration.new({ http_content_length_limit: 10000}) + + assert_equal 10000, conf.final_options[:http_content_length_limit] + end + + private + + def assert_run_hooks(hook_name, options = {}) + configured_with = options[:configured_with] || hook_name + + # test single, not an array + messages = [] + conf = Puma::Configuration.new do |c| + c.silence_fork_callback_warning + end + conf.options[hook_name] = -> (a) { + messages << "#{hook_name} is called with #{a}" + } + + conf.run_hooks(hook_name, 'ARG', Puma::LogWriter.strings) + assert_equal messages, ["#{hook_name} is called with ARG"] + + # test multiple + messages = [] + conf = Puma::Configuration.new do |c| + c.silence_fork_callback_warning + + c.send(configured_with) do |a| + messages << "#{hook_name} is called with #{a} one time" + end + + c.send(configured_with) do |a| + messages << "#{hook_name} is called with #{a} a second time" + end + end + conf.load + + conf.run_hooks(hook_name, 'ARG', Puma::LogWriter.strings) + assert_equal messages, ["#{hook_name} is called with ARG one time", "#{hook_name} is called with ARG a second time"] + end + + def assert_warning_for_hooks_defined_in_single_mode(hook_name) + out, _ = capture_io do + Puma::Configuration.new do |c| + c.send(hook_name) + end + end + + assert_match "your `#{hook_name}` block did not run\n", out + end +end + +# contains tests that cannot run parallel +class TestConfigFileSingle < Minitest::Test + def test_custom_logger_from_DSL + conf = Puma::Configuration.new { |c| c.load 'test/config/custom_logger.rb' } + + conf.load + out, _ = capture_subprocess_io { conf.options[:custom_logger].write 'test' } + + assert_equal "Custom logging: test\n", out + end +end + +# Thread unsafe modification of ENV +class TestEnvModifificationConfig < Minitest::Test + def test_double_bind_port + port = (rand(10_000) + 30_000).to_s + env = { "PORT" => port } + conf = Puma::Configuration.new({}, {}, env) do |user_config, file_config, default_config| + user_config.bind "tcp://#{Puma::Configuration::DEFAULTS[:tcp_host]}:#{port}" + file_config.load "test/config/app.rb" + end + + conf.load + assert_equal ["tcp://0.0.0.0:#{port}"], conf.options[:binds] + end +end + +class TestConfigEnvVariables < Minitest::Test + def test_config_loads_correct_min_threads + assert_equal 0, Puma::Configuration.new.options.default_options[:min_threads] + + env = { "MIN_THREADS" => "7" } + conf = Puma::Configuration.new({}, {}, env) + assert_equal 7, conf.options.default_options[:min_threads] + + env = { "PUMA_MIN_THREADS" => "8" } + conf = Puma::Configuration.new({}, {}, env) + assert_equal 8, conf.options.default_options[:min_threads] + end + + def test_config_loads_correct_max_threads + conf = Puma::Configuration.new + + env = { "MAX_THREADS" => "7" } + conf = Puma::Configuration.new({}, {}, env) + assert_equal 7, conf.options.default_options[:max_threads] + + env = { "PUMA_MAX_THREADS" => "8" } + conf = Puma::Configuration.new({}, {}, env) + assert_equal 8, conf.options.default_options[:max_threads] + end + + def test_config_loads_workers_from_env + env = { "WEB_CONCURRENCY" => "9" } + conf = Puma::Configuration.new({}, {}, env) + assert_equal 9, conf.options.default_options[:workers] + end + + def test_config_does_not_preload_app_if_not_using_workers + env = { "WEB_CONCURRENCY" => "0" } + conf = Puma::Configuration.new({}, {}, env) + assert_equal false, conf.options.default_options[:preload_app] + end + + def test_config_preloads_app_if_using_workers + env = { "WEB_CONCURRENCY" => "2" } + preload = Puma.forkable? + conf = Puma::Configuration.new({}, {}, env) + assert_equal preload, conf.options.default_options[:preload_app] + end +end + +class TestConfigFileWithFakeEnv < Minitest::Test + def setup + FileUtils.mkpath("config/puma") + File.write("config/puma/fake-env.rb", "") + end + + def teardown + FileUtils.rm_r("config/puma") + end + + def test_config_files_with_app_env + env = { 'APP_ENV' => 'fake-env' } + + conf = Puma::Configuration.new({}, {}, env) + + assert_equal ['config/puma/fake-env.rb'], conf.config_files + end + + def test_config_files_with_rack_env + env = { 'RACK_ENV' => 'fake-env' } + + conf = Puma::Configuration.new({}, {}, env) + + assert_equal ['config/puma/fake-env.rb'], conf.config_files + end + + def test_config_files_with_rails_env + env = { 'RAILS_ENV' => 'fake-env', 'RACK_ENV' => nil } + + conf = Puma::Configuration.new({}, {}, env) + + assert_equal ['config/puma/fake-env.rb'], conf.config_files + end + + def test_config_files_with_specified_environment + conf = Puma::Configuration.new + + conf.options[:environment] = 'fake-env' + + assert_equal ['config/puma/fake-env.rb'], conf.config_files + end + + def test_enable_keep_alives_by_default + conf = Puma::Configuration.new + conf.load + + assert_equal conf.options[:enable_keep_alives], true + end + + def test_enable_keep_alives_true + conf = Puma::Configuration.new do |c| + c.enable_keep_alives true + end + conf.load + + assert_equal conf.options[:enable_keep_alives], true + end + + def test_enable_keep_alives_false + conf = Puma::Configuration.new do |c| + c.enable_keep_alives false + end + conf.load + + assert_equal conf.options[:enable_keep_alives], false + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_error_logger.rb b/vendor/cache/puma-fba741b91780/test/test_error_logger.rb new file mode 100644 index 000000000..533c970f6 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_error_logger.rb @@ -0,0 +1,95 @@ +require 'puma/error_logger' +require_relative "helper" + +class TestErrorLogger < Minitest::Test + Req = Struct.new(:env, :body) + + def test_stdio + error_logger = Puma::ErrorLogger.stdio + + assert_equal STDERR, error_logger.ioerr + end + + + def test_stdio_respects_sync + error_logger = Puma::ErrorLogger.stdio + + assert_equal STDERR.sync, error_logger.ioerr.sync + assert_equal STDERR, error_logger.ioerr + end + + def test_info_with_only_error + _, err = capture_io do + Puma::ErrorLogger.stdio.info(error: StandardError.new('ready')) + end + + assert_match %r!#!, err + end + + def test_info_with_request + env = { + 'REQUEST_METHOD' => 'GET', + 'PATH_INFO' => '/debug', + 'HTTP_X_FORWARDED_FOR' => '8.8.8.8' + } + req = Req.new(env, '{"hello":"world"}') + + _, err = capture_io do + Puma::ErrorLogger.stdio.info(error: StandardError.new, req: req) + end + + assert_match %r!\("GET /debug" - \(8\.8\.8\.8\)\)!, err + end + + def test_info_with_text + _, err = capture_io do + Puma::ErrorLogger.stdio.info(text: 'The client disconnected while we were reading data') + end + + assert_match %r!The client disconnected while we were reading data!, err + end + + def test_debug_without_debug_mode + _, err = capture_io do + Puma::ErrorLogger.stdio.debug(text: 'blank') + end + + assert_empty err + end + + def test_debug_with_debug_mode + with_debug_mode do + _, err = capture_io do + Puma::ErrorLogger.stdio.debug(text: 'non-blank') + end + + assert_match %r!non-blank!, err + end + end + + def test_debug_backtrace_logging + with_debug_mode do + def dummy_error + raise StandardError.new('non-blank') + rescue => e + Puma::ErrorLogger.stdio.debug(error: e) + end + + _, err = capture_io do + dummy_error + end + + assert_match %r!non-blank!, err + assert_match %r!:in [`'](TestErrorLogger#)?dummy_error'!, err + end + end + + private + + def with_debug_mode + original_debug, ENV["PUMA_DEBUG"] = ENV["PUMA_DEBUG"], "1" + yield + ensure + ENV["PUMA_DEBUG"] = original_debug + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_events.rb b/vendor/cache/puma-fba741b91780/test/test_events.rb new file mode 100644 index 000000000..4bf8fd836 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_events.rb @@ -0,0 +1,60 @@ +require 'puma/events' +require_relative "helper" + +class TestEvents < Minitest::Test + def test_register_callback_with_block + res = false + + events = Puma::Events.new + + events.register(:exec) { res = true } + + events.fire(:exec) + + assert_equal true, res + end + + def test_register_callback_with_object + obj = Object.new + + def obj.res + @res || false + end + + def obj.call + @res = true + end + + events = Puma::Events.new + + events.register(:exec, obj) + + events.fire(:exec) + + assert_equal true, obj.res + end + + def test_fire_callback_with_multiple_arguments + res = [] + + events = Puma::Events.new + + events.register(:exec) { |*args| res.concat(args) } + + events.fire(:exec, :foo, :bar, :baz) + + assert_equal [:foo, :bar, :baz], res + end + + def test_on_booted_callback + res = false + + events = Puma::Events.new + + events.on_booted { res = true } + + events.fire_on_booted! + + assert res + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_example_cert_expiration.rb b/vendor/cache/puma-fba741b91780/test/test_example_cert_expiration.rb new file mode 100644 index 000000000..d4f30e0d9 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_example_cert_expiration.rb @@ -0,0 +1,47 @@ +require_relative 'helper' +require 'openssl' + +# +# Thes are tests to ensure that the checked in certs in the ./examples/ +# directory are valid and work as expected. +# +# These tests will start to fail 1 month before the certs expire +# +class TestExampleCertExpiration < Minitest::Test + EXAMPLES_DIR = File.expand_path '../examples/puma', __dir__ + EXPIRE_THRESHOLD = Time.now.utc - (60 * 60 * 24 * 30) # 30 days + + # Explicitly list the files to test + TEST_FILES = %w[ + cert_puma.pem + client_certs/ca.crt + client_certs/client.crt + client_certs/client_unknown.crt + client_certs/server.crt + client_certs/unknown_ca.crt + chain_cert/ca.crt + chain_cert/cert.crt + chain_cert/intermediate.crt + ] + + def test_certs_not_expired + expiration_data = TEST_FILES.map do |path| + full_path = File.join(EXAMPLES_DIR, path) + not_after = OpenSSL::X509::Certificate.new(File.read(full_path)).not_after + [not_after, path] + end + + failed = expiration_data.select { |ary| ary[0] <= EXPIRE_THRESHOLD } + + if failed.empty? + assert true + else + msg = +"\n** The below certs in the 'examples/puma' folder are expiring soon.\n" \ + " See 'examples/generate_all_certs.md' for instructions on how to regenerate.\n\n" + failed.each do |ary| + msg << " #{ary[1]}\n" + end + assert false, msg + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_http10.rb b/vendor/cache/puma-fba741b91780/test/test_http10.rb new file mode 100644 index 000000000..5448a51c4 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_http10.rb @@ -0,0 +1,27 @@ +require_relative "helper" + +require "puma/puma_http11" + +class Http10ParserTest < Minitest::Test + def test_parse_simple + parser = Puma::HttpParser.new + req = {} + http = "GET / HTTP/1.0\r\n\r\n" + nread = parser.execute(req, http, 0) + + assert nread == http.length, "Failed to parse the full HTTP request" + assert parser.finished?, "Parser didn't finish" + assert !parser.error?, "Parser had error" + assert nread == parser.nread, "Number read returned from execute does not match" + + assert_equal '/', req['REQUEST_PATH'] + assert_equal 'HTTP/1.0', req['SERVER_PROTOCOL'] + assert_equal '/', req['REQUEST_URI'] + assert_equal 'GET', req['REQUEST_METHOD'] + assert_nil req['FRAGMENT'] + assert_nil req['QUERY_STRING'] + + parser.reset + assert parser.nread == 0, "Number read after reset should be 0" + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_http11.rb b/vendor/cache/puma-fba741b91780/test/test_http11.rb new file mode 100644 index 000000000..f7741f717 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_http11.rb @@ -0,0 +1,285 @@ +# Copyright (c) 2011 Evan Phoenix +# Copyright (c) 2005 Zed A. Shaw + +require_relative "helper" +require_relative "helpers/integration" +require "digest" + +require "puma/puma_http11" + +class Http11ParserTest < TestIntegration + + parallelize_me! + + def test_parse_simple + parser = Puma::HttpParser.new + req = {} + http = "GET /?a=1 HTTP/1.1\r\n\r\n" + nread = parser.execute(req, http, 0) + + assert nread == http.length, "Failed to parse the full HTTP request" + assert parser.finished?, "Parser didn't finish" + assert !parser.error?, "Parser had error" + assert nread == parser.nread, "Number read returned from execute does not match" + + assert_equal '/', req['REQUEST_PATH'] + assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL'] + assert_equal '/?a=1', req['REQUEST_URI'] + assert_equal 'GET', req['REQUEST_METHOD'] + assert_nil req['FRAGMENT'] + assert_equal "a=1", req['QUERY_STRING'] + + parser.reset + assert parser.nread == 0, "Number read after reset should be 0" + end + + def test_parse_escaping_in_query + parser = Puma::HttpParser.new + req = {} + http = "GET /admin/users?search=%27%%27 HTTP/1.1\r\n\r\n" + nread = parser.execute(req, http, 0) + + assert nread == http.length, "Failed to parse the full HTTP request" + assert parser.finished?, "Parser didn't finish" + assert !parser.error?, "Parser had error" + assert nread == parser.nread, "Number read returned from execute does not match" + + assert_equal '/admin/users?search=%27%%27', req['REQUEST_URI'] + assert_equal "search=%27%%27", req['QUERY_STRING'] + + parser.reset + assert parser.nread == 0, "Number read after reset should be 0" + end + + def test_parse_absolute_uri + parser = Puma::HttpParser.new + req = {} + http = "GET http://192.168.1.96:3000/api/v1/matches/test?1=1 HTTP/1.1\r\n\r\n" + nread = parser.execute(req, http, 0) + + assert nread == http.length, "Failed to parse the full HTTP request" + assert parser.finished?, "Parser didn't finish" + assert !parser.error?, "Parser had error" + assert nread == parser.nread, "Number read returned from execute does not match" + + assert_equal "GET", req['REQUEST_METHOD'] + assert_equal 'http://192.168.1.96:3000/api/v1/matches/test?1=1', req['REQUEST_URI'] + assert_equal 'HTTP/1.1', req['SERVER_PROTOCOL'] + + assert_nil req['REQUEST_PATH'] + assert_nil req['FRAGMENT'] + assert_nil req['QUERY_STRING'] + + parser.reset + assert parser.nread == 0, "Number read after reset should be 0" + + end + + def test_parse_dumbfuck_headers + parser = Puma::HttpParser.new + req = {} + should_be_good = "GET / HTTP/1.1\r\naaaaaaaaaaaaa:++++++++++\r\n\r\n" + nread = parser.execute(req, should_be_good, 0) + assert_equal should_be_good.length, nread + assert parser.finished? + assert !parser.error? + end + + def test_parse_error + parser = Puma::HttpParser.new + req = {} + bad_http = "GET / SsUTF/1.1" + + error = false + begin + parser.execute(req, bad_http, 0) + rescue + error = true + end + + assert error, "failed to throw exception" + assert !parser.finished?, "Parser shouldn't be finished" + assert parser.error?, "Parser SHOULD have error" + end + + def test_fragment_in_uri + parser = Puma::HttpParser.new + req = {} + get = "GET /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n\r\n" + + parser.execute(req, get, 0) + + assert parser.finished? + assert_equal '/forums/1/topics/2375?page=1', req['REQUEST_URI'] + assert_equal 'posts-17408', req['FRAGMENT'] + end + + def test_semicolon_in_path + parser = Puma::HttpParser.new + req = {} + get = "GET /forums/1/path;stillpath/2375?page=1 HTTP/1.1\r\n\r\n" + + parser.execute(req, get, 0) + + assert parser.finished? + assert_equal '/forums/1/path;stillpath/2375?page=1', req['REQUEST_URI'] + assert_equal '/forums/1/path;stillpath/2375', req['REQUEST_PATH'] + end + + # lame random garbage maker + def rand_data(min, max, readable=true) + count = min + ((rand(max)+1) *10).to_i + res = count.to_s + "/" + + if readable + res << Digest(:SHA1).hexdigest(rand(count * 100).to_s) * (count / 40) + else + res << Digest(:SHA1).digest(rand(count * 100).to_s) * (count / 20) + end + + res + end + + def test_get_const_length + skip_unless :jruby + + envs = %w[PUMA_REQUEST_URI_MAX_LENGTH PUMA_REQUEST_PATH_MAX_LENGTH PUMA_QUERY_STRING_MAX_LENGTH] + default_exp = [1024 * 12, 8192, 10 * 1024] + tests = [{ envs: %w[60000 61000 62000], exp: [60000, 61000, 62000], error_indexes: [] }, + { envs: ['', 'abc', nil], exp: default_exp, error_indexes: [1] }, + { envs: %w[-4000 0 3000.45], exp: default_exp, error_indexes: [0, 1, 2] }] + cli_config = <<~CONFIG + app do |_| + require 'json' + [200, {}, [{ MAX_REQUEST_URI_LENGTH: org.jruby.puma.Http11::MAX_REQUEST_URI_LENGTH, + MAX_REQUEST_PATH_LENGTH: org.jruby.puma.Http11::MAX_REQUEST_PATH_LENGTH, + MAX_QUERY_STRING_LENGTH: org.jruby.puma.Http11::MAX_QUERY_STRING_LENGTH, + MAX_REQUEST_URI_LENGTH_ERR: org.jruby.puma.Http11::MAX_REQUEST_URI_LENGTH_ERR, + MAX_REQUEST_PATH_LENGTH_ERR: org.jruby.puma.Http11::MAX_REQUEST_PATH_LENGTH_ERR, + MAX_QUERY_STRING_LENGTH_ERR: org.jruby.puma.Http11::MAX_QUERY_STRING_LENGTH_ERR }.to_json]] + end + CONFIG + + tests.each do |conf| + cli_server 'test/rackup/hello.ru', + env: {envs[0] => conf[:envs][0], envs[1] => conf[:envs][1], envs[2] => conf[:envs][2]}, + merge_err: true, + config: cli_config + result = JSON.parse read_body(connect) + + assert_equal conf[:exp][0], result['MAX_REQUEST_URI_LENGTH'] + assert_equal conf[:exp][1], result['MAX_REQUEST_PATH_LENGTH'] + assert_equal conf[:exp][2], result['MAX_QUERY_STRING_LENGTH'] + + assert_includes result['MAX_REQUEST_URI_LENGTH_ERR'], "longer than the #{conf[:exp][0]} allowed length" + assert_includes result['MAX_REQUEST_PATH_LENGTH_ERR'], "longer than the #{conf[:exp][1]} allowed length" + assert_includes result['MAX_QUERY_STRING_LENGTH_ERR'], "longer than the #{conf[:exp][2]} allowed length" + + conf[:error_indexes].each do |index| + assert_includes @server_log, "The value #{conf[:envs][index]} for #{envs[index]} is invalid. "\ + "Using default value #{default_exp[index]} instead" + end + + stop_server + end + end + + def test_max_uri_path_length + parser = Puma::HttpParser.new + req = {} + + # Support URI path length to a max of 8192 + path = "/" + rand_data(7000, 100) + http = "GET #{path} HTTP/1.1\r\n\r\n" + parser.execute(req, http, 0) + assert_equal path, req['REQUEST_PATH'] + parser.reset + + # Raise exception if URI path length > 8192 + path = "/" + rand_data(9000, 100) + http = "GET #{path} HTTP/1.1\r\n\r\n" + assert_raises Puma::HttpParserError do + parser.execute(req, http, 0) + parser.reset + end + end + + def test_horrible_queries + parser = Puma::HttpParser.new + + # then that large header names are caught + 10.times do |c| + get = "GET /#{rand_data(10,120)} HTTP/1.1\r\nX-#{rand_data(1024, 1024+(c*1024))}: Test\r\n\r\n" + assert_raises Puma::HttpParserError do + parser.execute({}, get, 0) + parser.reset + end + end + + # then that large mangled field values are caught + 10.times do |c| + get = "GET /#{rand_data(10,120)} HTTP/1.1\r\nX-Test: #{rand_data(1024, 1024+(c*1024), false)}\r\n\r\n" + assert_raises Puma::HttpParserError do + parser.execute({}, get, 0) + parser.reset + end + end + + # then large headers are rejected too + get = "GET /#{rand_data(10,120)} HTTP/1.1\r\n" + get += "X-Test: test\r\n" * (80 * 1024) + assert_raises Puma::HttpParserError do + parser.execute({}, get, 0) + parser.reset + end + + # finally just that random garbage gets blocked all the time + 10.times do |c| + get = "GET #{rand_data(1024, 1024+(c*1024), false)} #{rand_data(1024, 1024+(c*1024), false)}\r\n\r\n" + assert_raises Puma::HttpParserError do + parser.execute({}, get, 0) + parser.reset + end + end + end + + def test_trims_whitespace_from_headers + parser = Puma::HttpParser.new + req = {} + http = "GET / HTTP/1.1\r\nX-Strip-Me: Strip This \r\n\r\n" + + parser.execute(req, http, 0) + + assert_equal "Strip This", req["HTTP_X_STRIP_ME"] + end + + def test_newline_smuggler + parser = Puma::HttpParser.new + req = {} + http = "GET / HTTP/1.1\r\nHost: localhost:8080\r\nDummy: x\nDummy2: y\r\n\r\n" + + parser.execute(req, http, 0) rescue nil # We test the raise elsewhere. + + assert parser.error?, "Parser SHOULD have error" + end + + def test_newline_smuggler_two + parser = Puma::HttpParser.new + req = {} + http = "GET / HTTP/1.1\r\nHost: localhost:8080\r\nDummy: x\r\nDummy: y\nDummy2: z\r\n\r\n" + + parser.execute(req, http, 0) rescue nil + + assert parser.error?, "Parser SHOULD have error" + end + + def test_htab_in_header_val + parser = Puma::HttpParser.new + req = {} + http = "GET / HTTP/1.1\r\nHost: localhost:8080\r\nDummy: Valid\tValue\r\n\r\n" + + parser.execute(req, http, 0) + + assert_equal "Valid\tValue", req['HTTP_DUMMY'] + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_integration_cluster.rb b/vendor/cache/puma-fba741b91780/test/test_integration_cluster.rb new file mode 100644 index 000000000..46e0e6ca3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_integration_cluster.rb @@ -0,0 +1,777 @@ +require_relative "helper" +require_relative "helpers/integration" + +require "puma/configuration" + +require "time" + +class TestIntegrationCluster < TestIntegration + parallelize_me! if ::Puma.mri? + + def workers ; 2 ; end + + def setup + skip_unless :fork + super + end + + def teardown + return if skipped? + super + end + + def test_hot_restart_does_not_drop_connections_threads + hot_restart_does_not_drop_connections num_threads: 10, total_requests: 3_000 + end + + def test_hot_restart_does_not_drop_connections + hot_restart_does_not_drop_connections num_threads: 1, total_requests: 1_000 + end + + def test_pre_existing_unix + skip_unless :unix + + File.open(@bind_path, mode: 'wb') { |f| f.puts 'pre existing' } + + cli_server "-w #{workers} -q test/rackup/sleep_step.ru", unix: :unix + + stop_server + + assert File.exist?(@bind_path) + + ensure + if UNIX_SKT_EXIST + File.unlink @bind_path if File.exist? @bind_path + end + end + + def test_pre_existing_unix_stop_after_restart + skip_unless :unix + + File.open(@bind_path, mode: 'wb') { |f| f.puts 'pre existing' } + + cli_server "-w #{workers} -q test/rackup/sleep_step.ru", unix: :unix + connection = connect(nil, unix: true) + restart_server connection + + connect(nil, unix: true) + stop_server + + assert File.exist?(@bind_path) + + ensure + if UNIX_SKT_EXIST + File.unlink @bind_path if File.exist? @bind_path + end + end + + def test_siginfo_thread_print + skip_unless_signal_exist? :INFO + + cli_server "-w #{workers} -q test/rackup/hello.ru" + worker_pids = get_worker_pids + output = [] + t = Thread.new { output << @server.readlines } + Process.kill :INFO, worker_pids.first + Process.kill :INT , @pid + t.join + + assert_match "Thread: TID", output.join + end + + def test_usr2_restart + _, new_reply = restart_server_and_listen("-q -w #{workers} test/rackup/hello.ru") + assert_equal "Hello World", new_reply + end + + # Next two tests, one tcp, one unix + # Send requests 10 per second. Send 10, then :TERM server, then send another 30. + # No more than 10 should throw Errno::ECONNRESET. + + def test_term_closes_listeners_tcp + skip_unless_signal_exist? :TERM + term_closes_listeners unix: false + end + + def test_term_closes_listeners_unix + skip_unless_signal_exist? :TERM + term_closes_listeners unix: true + end + + # Next two tests, one tcp, one unix + # Send requests 1 per second. Send 1, then :USR1 server, then send another 24. + # All should be responded to, and at least three workers should be used + + def test_usr1_all_respond_tcp + skip_unless_signal_exist? :USR1 + usr1_all_respond unix: false + end + + def test_usr1_fork_worker + skip_unless_signal_exist? :USR1 + usr1_all_respond config: '--fork-worker' + end + + def test_usr1_all_respond_unix + skip_unless_signal_exist? :USR1 + usr1_all_respond unix: true + end + + def test_term_exit_code + skip_unless_signal_exist? :TERM + + cli_server "-w #{workers} test/rackup/hello.ru" + _, status = stop_server + + assert_equal 15, status + end + + def test_term_suppress + skip_unless_signal_exist? :TERM + + cli_server "-w #{workers} -C test/config/suppress_exception.rb test/rackup/hello.ru" + + _, status = stop_server + + assert_equal 0, status + end + + def test_on_booted_and_on_stopped + skip_unless_signal_exist? :TERM + cli_server "-w #{workers} -C test/config/event_on_booted_and_on_stopped.rb -C test/config/event_on_booted_exit.rb test/rackup/hello.ru" + + # above checks 'Ctrl-C', below is logged after workers boot + assert wait_for_server_to_include('on_booted called') + assert wait_for_server_to_include('Goodbye!') + # below logged after workers are stopped + assert wait_for_server_to_include('on_stopped called') + end + + def test_term_worker_clean_exit + skip_unless_signal_exist? :TERM + cli_server "-w #{workers} test/rackup/hello.ru" + + # Get the PIDs of the child workers. + worker_pids = get_worker_pids + + # Signal the workers to terminate, and wait for them to die. + Process.kill :TERM, @pid + wait_server 15 + + zombies = bad_exit_pids worker_pids + + assert_empty zombies, "Process ids #{zombies} became zombies" + end + + # mimicking stuck workers, test respawn with external TERM + def test_stuck_external_term_spawn + skip_unless_signal_exist? :TERM + + worker_respawn(0) do |phase0_worker_pids| + last = phase0_worker_pids.last + # test is tricky if only one worker is TERM'd, so kill all but + # spread out, so all aren't killed at once + phase0_worker_pids.each do |pid| + Process.kill :TERM, pid + sleep 4 unless pid == last + end + end + end + + # From Ruby 2.6 to 3.2, `Process.detach` can delay or prevent + # `Process.wait2(-1)` from detecting a terminated child: + # https://bugs.ruby-lang.org/issues/19837. However, + # `Process.wait2()` still works properly. This bug has + # been fixed in Ruby 3.3. + def test_workers_respawn_with_process_detach + skip_unless_signal_exist? :KILL + + config = 'test/config/process_detach_before_fork.rb' + + worker_respawn(0, workers, config) do |phase0_worker_pids| + phase0_worker_pids.each do |pid| + Process.kill :KILL, pid + end + end + + # `test/config/process_detach_before_fork.rb` forks and detaches a + # process. Since MiniTest attempts to join all threads before + # finishing, terminate the process so that the test can end quickly + # if it passes. + pid_filename = File.join(Dir.tmpdir, 'process_detach_test.pid') + if File.exist?(pid_filename) + pid = File.read(pid_filename).chomp.to_i + File.unlink(pid_filename) + Process.kill :TERM, pid if pid > 0 + end + end + + # mimicking stuck workers, test restart + def test_stuck_phased_restart + skip_unless_signal_exist? :USR1 + worker_respawn { |phase0_worker_pids| Process.kill :USR1, @pid } + end + + def test_worker_check_interval + # iso8601 2022-12-14T00:05:49Z + re_8601 = /\A\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z\z/ + @control_tcp_port = UniquePort.call + worker_check_interval = 1 + + cli_server "-w 1 -t 1:1 --control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN} test/rackup/hello.ru", config: "worker_check_interval #{worker_check_interval}" + + sleep worker_check_interval + 1 + checkin_1 = get_stats["worker_status"].first["last_checkin"] + assert_match re_8601, checkin_1 + + sleep worker_check_interval + 1 + checkin_2 = get_stats["worker_status"].first["last_checkin"] + assert_match re_8601, checkin_2 + + # iso8601 sorts as a string + assert_operator(checkin_2, :>, checkin_1) + end + + def test_worker_boot_timeout + timeout = 1 + worker_timeout(timeout, 2, "failed to boot within \\\d+ seconds", "worker_boot_timeout #{timeout}; on_worker_boot { sleep #{timeout + 1} }") + end + + def test_worker_timeout + skip 'Thread#name not available' unless Thread.current.respond_to?(:name) + timeout = Puma::Configuration::DEFAULTS[:worker_check_interval] + 1 + config = <<~CONFIG + worker_timeout #{timeout} + on_worker_boot do + Thread.new do + sleep 1 + Thread.list.find {|t| t.name == 'puma stat pld'}.kill + end + end + CONFIG + + worker_timeout(timeout, 1, "failed to check in within \\\d+ seconds", config) + end + + def test_idle_timeout + cli_server "-w #{workers} test/rackup/hello.ru", config: "idle_timeout 1" + + get_worker_pids # wait for workers to boot + + 10.times { + fast_connect + sleep 0.5 + } + + sleep 1.15 + + assert_raises Errno::ECONNREFUSED, "Connection refused" do + connect + end + end + + def test_worker_index_is_with_in_options_limit + skip_unless_signal_exist? :TERM + + cli_server "-C test/config/t3_conf.rb test/rackup/hello.ru" + + get_worker_pids(0, 3) # this will wait till all the processes are up + + worker_pid_was_present = File.file? "t3-worker-2-pid" + + stop_server(Integer(File.read("t3-worker-2-pid"))) + + worker_index_within_number_of_workers = !File.file?("t3-worker-3-pid") + + stop_server(Integer(File.read("t3-pid"))) + + assert(worker_pid_was_present) + assert(worker_index_within_number_of_workers) + ensure + File.unlink "t3-pid" if File.file? "t3-pid" + File.unlink "t3-worker-0-pid" if File.file? "t3-worker-0-pid" + File.unlink "t3-worker-1-pid" if File.file? "t3-worker-1-pid" + File.unlink "t3-worker-2-pid" if File.file? "t3-worker-2-pid" + File.unlink "t3-worker-3-pid" if File.file? "t3-worker-3-pid" + end + + # use three workers to keep accepting clients + def test_fork_worker_on_refork + refork = Tempfile.new 'refork' + wrkrs = 3 + cli_server "-w #{wrkrs} test/rackup/hello_with_delay.ru", config: <<~CONFIG + fork_worker 20 + on_refork { File.write '#{refork.path}', 'Reforked' } + CONFIG + + pids = get_worker_pids 0, wrkrs + + socks = [] + until refork.read == 'Reforked' + socks << fast_connect + sleep 0.004 + end + + 100.times { + socks << fast_connect + sleep 0.004 + } + + socks.each { |s| read_body s } + + refute_includes pids, get_worker_pids(1, wrkrs - 1) + end + + def test_fork_worker_spawn + cli_server '', config: <<~CONFIG + workers 1 + fork_worker 0 + app do |_| + pid = spawn('ls', [:out, :err]=>'/dev/null') + sleep 0.01 + exitstatus = Process.detach(pid).value.exitstatus + [200, {}, [exitstatus.to_s]] + end + CONFIG + assert_equal '0', read_body(connect) + end + + def test_fork_worker_phased_restart_with_high_worker_count + worker_count = 10 + + cli_server "test/rackup/hello.ru", config: <<~CONFIG + fork_worker 0 + worker_check_interval 1 + # lower worker timeout from default (60) to avoid test timeout + worker_timeout 2 + # to simulate worker 0 timeout, total boot time for all workers + # needs to exceed single worker timeout + workers #{worker_count} + CONFIG + + # workers is the default + get_worker_pids 0, worker_count + + Process.kill :USR1, @pid + + get_worker_pids 1, worker_count + + # below is so all of @server_log isn't output for failure + refute @server_log[/.*Terminating timed out worker.*/] + end + + def test_prune_bundler_with_multiple_workers + cli_server "-C test/config/prune_bundler_with_multiple_workers.rb" + reply = read_body(connect) + + assert reply, "embedded app" + end + + def test_load_path_includes_extra_deps + cli_server "-w #{workers} -C test/config/prune_bundler_with_deps.rb test/rackup/hello.ru" + + assert wait_for_server_to_match(/^LOAD_PATH: .+?\/gems\/minitest-[\d.]+\/lib$/) + end + + def test_load_path_does_not_include_nio4r + cli_server "-w #{workers} -C test/config/prune_bundler_with_deps.rb test/rackup/hello.ru" + + get_worker_pids # reads thru 'LOAD_PATH:' data + + # make sure we're seeing LOAD_PATH: logging + assert_match(/^LOAD_PATH: .+\/gems\/minitest-[\d.]+\/lib$/, @server_log) + refute_match(%r{gems/nio4r-[\d.]+/lib$}, @server_log) + end + + def test_json_gem_not_required_in_master_process + cli_server "-w #{workers} -C test/config/prune_bundler_print_json_defined.rb test/rackup/hello.ru" + + assert wait_for_server_to_include('defined?(::JSON): nil') + end + + def test_nio4r_gem_not_required_in_master_process + cli_server "-w #{workers} -C test/config/prune_bundler_print_nio_defined.rb test/rackup/hello.ru" + + assert wait_for_server_to_include('defined?(::NIO): nil') + end + + def test_nio4r_gem_not_required_in_master_process_when_using_control_server + @control_tcp_port = UniquePort.call + control_opts = "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN}" + cli_server "-w #{workers} #{control_opts} -C test/config/prune_bundler_print_nio_defined.rb test/rackup/hello.ru" + + assert wait_for_server_to_include('Starting control server') + + assert wait_for_server_to_include('defined?(::NIO): nil') + end + + def test_application_is_loaded_exactly_once_if_using_preload_app + cli_server "-w #{workers} --preload test/rackup/write_to_stdout_on_boot.ru" + + get_worker_pids + loading_app_count = @server_log.scan('Loading app').length + assert_equal 1, loading_app_count + end + + def test_warning_message_outputted_when_single_worker + cli_server "-w 1 test/rackup/hello.ru" + + assert wait_for_server_to_include('Worker 0 (PID') + assert_match(/WARNING: Detected running cluster mode with 1 worker/, @server_log) + end + + def test_warning_message_not_outputted_when_single_worker_silenced + cli_server "-w 1 test/rackup/hello.ru", config: "silence_single_worker_warning" + + assert wait_for_server_to_include('Worker 0 (PID') + refute_match(/WARNING: Detected running cluster mode with 1 worker/, @server_log) + end + + def test_signal_ttin + cli_server "-w 2 test/rackup/hello.ru" + get_worker_pids # to consume server logs + + Process.kill :TTIN, @pid + + assert wait_for_server_to_match(/Worker 2 \(PID: \d+\) booted in/) + end + + def test_signal_ttou + cli_server "-w 2 test/rackup/hello.ru" + get_worker_pids # to consume server logs + + Process.kill :TTOU, @pid + + assert wait_for_server_to_match(/Worker 1 \(PID: \d+\) terminating/) + end + + def test_culling_strategy_youngest + cli_server "-w 2 test/rackup/hello.ru", config: "worker_culling_strategy :youngest" + get_worker_pids # to consume server logs + + Process.kill :TTIN, @pid + + assert wait_for_server_to_match(/Worker 2 \(PID: \d+\) booted in/) + + Process.kill :TTOU, @pid + + assert wait_for_server_to_match(/Worker 2 \(PID: \d+\) terminating/) + end + + def test_culling_strategy_oldest + cli_server "-w 2 test/rackup/hello.ru", config: "worker_culling_strategy :oldest" + get_worker_pids # to consume server logs + + Process.kill :TTIN, @pid + + assert wait_for_server_to_match(/Worker 2 \(PID: \d+\) booted in/) + + Process.kill :TTOU, @pid + + assert wait_for_server_to_match(/Worker 0 \(PID: \d+\) terminating/) + end + + def test_culling_strategy_oldest_fork_worker + cli_server "-w 2 test/rackup/hello.ru", config: <<~CONFIG + worker_culling_strategy :oldest + fork_worker + CONFIG + + get_worker_pids # to consume server logs + + Process.kill :TTIN, @pid + + assert wait_for_server_to_match(/Worker 2 \(PID: \d+\) booted in/) + + Process.kill :TTOU, @pid + + assert wait_for_server_to_match(/Worker 1 \(PID: \d+\) terminating/) + end + + def test_hook_data + skip_unless_signal_exist? :TERM + + cli_server "-C test/config/hook_data.rb test/rackup/hello.ru" + get_worker_pids 0, 2 # make sure workers are booted + stop_server + + ary = Array.new(2) do |_index| + wait_for_server_to_match(/(index \d data \d)/, 1) + end.sort + + assert 'index 0 data 0', ary[0] + assert 'index 1 data 1', ary[1] + end + + def test_worker_hook_warning_cli + cli_server "-w2 test/rackup/hello.ru", config: <<~CONFIG + on_worker_boot(:test) do |index, data| + data[:test] = index + end + CONFIG + + get_worker_pids + line = @server_log[/.+on_worker_boot.+/] + refute line, "Warning below should not be shown!\n#{line}" + end + + def test_worker_hook_warning_web_concurrency + cli_server "test/rackup/hello.ru", + env: { 'WEB_CONCURRENCY' => '2'}, + config: <<~CONFIG + on_worker_boot(:test) do |index, data| + data[:test] = index + end + CONFIG + + get_worker_pids + line = @server_log[/.+on_worker_boot.+/] + refute line, "Warning below should not be shown!\n#{line}" + end + + def test_puma_debug_loaded_exts + cli_server "-w #{workers} test/rackup/hello.ru", puma_debug: true + + assert wait_for_server_to_include('Loaded Extensions - worker 0:') + assert wait_for_server_to_include('Loaded Extensions - master:') + @pid = @server.pid + end + + private + + def worker_timeout(timeout, iterations, details, config, log: nil) + cli_server "-w #{workers} -t 1:1 test/rackup/hello.ru", config: config + + pids = [] + re = /Terminating timed out worker \(Worker \d+ #{details}\): (\d+)/ + + Timeout.timeout(iterations * (timeout + 1)) do + while (pids.size < workers * iterations) + idx = wait_for_server_to_match(re, 1).to_i + pids << idx + end + end + + assert_equal pids, pids.uniq + end + + # Send requests 10 per second. Send 10, then :TERM server, then send another 30. + # No more than 10 should throw Errno::ECONNRESET. + def term_closes_listeners(unix: false) + skip_unless_signal_exist? :TERM + + cli_server "-w #{workers} -t 0:6 -q test/rackup/sleep_step.ru", unix: unix + threads = [] + replies = [] + mutex = Mutex.new + div = 10 + + refused = thread_run_refused unix: unix + + 41.times.each do |i| + if i == 10 + threads << Thread.new do + sleep i.to_f/div + Process.kill :TERM, @pid + mutex.synchronize { replies[i] = :term_sent } + end + else + threads << Thread.new do + thread_run_step replies, i.to_f/div, 1, i, mutex, refused, unix: unix + end + end + end + + threads.each(&:join) + + failures = replies.count(:failure) + successes = replies.count(:success) + resets = replies.count(:reset) + refused = replies.count(:refused) + read_timeouts = replies.count(:read_timeout) + + r_success = replies.rindex(:success) + l_reset = replies.index(:reset) + r_reset = replies.rindex(:reset) + l_refused = replies.index(:refused) + + msg = "#{successes} successes, #{resets} resets, #{refused} refused, #{failures} failures, #{read_timeouts} read timeouts" + + assert_equal 0, failures, msg + assert_equal 0, read_timeouts, msg + + assert_operator 9, :<=, successes, msg + + assert_operator 10, :>=, resets , msg + + assert_operator 20, :<=, refused , msg + + # Interleaved asserts + # UNIX binders do not generate :reset items + if l_reset + assert_operator r_success, :<, l_reset , "Interleaved success and reset" + assert_operator r_reset , :<, l_refused, "Interleaved reset and refused" + else + assert_operator r_success, :<, l_refused, "Interleaved success and refused" + end + + ensure + if passed? + $debugging_info << "#{full_name}\n #{msg}\n" + else + $debugging_info << "#{full_name}\n #{msg}\n#{replies.inspect}\n" + end + end + + # Send requests 1 per second. Send 1, then :USR1 server, then send another 24. + # All should be responded to, and at least three workers should be used + def usr1_all_respond(unix: false, config: '') + cli_server "-w #{workers} -t 0:5 -q test/rackup/sleep_pid.ru #{config}", unix: unix + threads = [] + replies = [] + mutex = Mutex.new + + s = connect "sleep1", unix: unix + replies << read_body(s) + + Process.kill :USR1, @pid + + refused = thread_run_refused unix: unix + + 24.times do |delay| + threads << Thread.new do + thread_run_pid replies, delay, 1, mutex, refused, unix: unix + end + end + + threads.each(&:join) + + responses = replies.count { |r| r[/\ASlept 1/] } + resets = replies.count { |r| r == :reset } + refused = replies.count { |r| r == :refused } + read_timeouts = replies.count { |r| r == :read_timeout } + + # get pids from replies, generate uniq array + t = replies.map { |body| body[/\d+\z/] } + t.uniq!; t.compact! + qty_pids = t.length + + msg = "#{responses} responses, #{qty_pids} uniq pids" + + assert_equal 25, responses, msg + assert_operator qty_pids, :>, 2, msg + + msg = "#{responses} responses, #{resets} resets, #{refused} refused, #{read_timeouts} read timeouts" + + assert_equal 0, refused, msg + + assert_equal 0, resets, msg + + assert_equal 0, read_timeouts, msg + ensure + unless passed? + $debugging_info << "#{full_name}\n #{msg}\n#{replies.inspect}\n" + end + end + + def worker_respawn(phase = 1, size = workers, config = 'test/config/worker_shutdown_timeout_2.rb') + threads = [] + + cli_server "-w #{workers} -t 1:1 -C #{config} test/rackup/sleep_pid.ru" + + # make sure two workers have booted + phase0_worker_pids = get_worker_pids + + [35, 40].each do |sleep_time| + threads << Thread.new do + begin + connect "sleep#{sleep_time}" + # stuck connections will raise IOError or Errno::ECONNRESET + # when shutdown + rescue IOError, Errno::ECONNRESET + end + end + end + + @start_time = Time.now.to_f + + # below should 'cancel' the phase 0 workers, either via phased_restart or + # externally TERM'ing them + yield phase0_worker_pids + + # wait for new workers to boot + phase1_worker_pids = get_worker_pids phase + + # should be empty if all phase 0 workers cleanly exited + phase0_exited = bad_exit_pids phase0_worker_pids + + # Since 35 is the shorter of the two requests, server should restart + # and cancel both requests + assert_operator (Time.now.to_f - @start_time).round(2), :<, 35 + + msg = "phase0_worker_pids #{phase0_worker_pids.inspect} phase1_worker_pids #{phase1_worker_pids.inspect} phase0_exited #{phase0_exited.inspect}" + assert_equal workers, phase0_worker_pids.length, msg + + assert_equal workers, phase1_worker_pids.length, msg + assert_empty phase0_worker_pids & phase1_worker_pids, "#{msg}\nBoth workers should be replaced with new" + + assert_empty phase0_exited, msg + + threads.each { |th| Thread.kill th } + end + + # Returns an array of pids still in the process table, so it should + # be empty for a clean exit. + # Process.kill should raise the Errno::ESRCH exception, indicating the + # process is dead and has been reaped. + def bad_exit_pids(pids) + t = pids.map do |pid| + begin + pid if Process.kill 0, pid + rescue Errno::ESRCH + nil + end + end + t.compact!; t + end + + # used in loop to create several 'requests' + def thread_run_pid(replies, delay, sleep_time, mutex, refused, unix: false) + begin + sleep delay + s = fast_connect "sleep#{sleep_time}", unix: unix + body = read_body(s, 20) + mutex.synchronize { replies << body } + rescue Errno::ECONNRESET + # connection was accepted but then closed + # client would see an empty response + mutex.synchronize { replies << :reset } + rescue *refused + mutex.synchronize { replies << :refused } + rescue Timeout::Error + mutex.synchronize { replies << :read_timeout } + end + end + + # used in loop to create several 'requests' + def thread_run_step(replies, delay, sleep_time, step, mutex, refused, unix: false) + begin + sleep delay + s = connect "sleep#{sleep_time}-#{step}", unix: unix + body = read_body(s, 20) + if body[/\ASlept /] + mutex.synchronize { replies[step] = :success } + else + mutex.synchronize { replies[step] = :failure } + end + rescue Errno::ECONNRESET + # connection was accepted but then closed + # client would see an empty response + mutex.synchronize { replies[step] = :reset } + rescue *refused + mutex.synchronize { replies[step] = :refused } + rescue Timeout::Error + mutex.synchronize { replies[step] = :read_timeout } + end + end +end if ::Process.respond_to?(:fork) diff --git a/vendor/cache/puma-fba741b91780/test/test_integration_pumactl.rb b/vendor/cache/puma-fba741b91780/test/test_integration_pumactl.rb new file mode 100644 index 000000000..5dfe6cc18 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_integration_pumactl.rb @@ -0,0 +1,259 @@ +require_relative "helper" +require_relative "helpers/integration" + +class TestIntegrationPumactl < TestIntegration + include TmpPath + parallelize_me! if ::Puma.mri? + + def workers ; 2 ; end + + def setup + super + @control_path = nil + @state_path = tmp_path('.state') + end + + def teardown + super + + refute @control_path && File.exist?(@control_path), "Control path must be removed after stop" + ensure + [@state_path, @control_path].each { |p| File.unlink(p) rescue nil } + end + + def test_stop_tcp + skip_if :jruby, :truffleruby # Undiagnose thread race. TODO fix + @control_tcp_port = UniquePort.call + cli_server "-q test/rackup/sleep.ru #{set_pumactl_args} -S #{@state_path}" + + cli_pumactl "stop" + + wait_server + end + + def test_stop_unix + ctl_unix + end + + def test_halt_unix + ctl_unix 'halt' + end + + def ctl_unix(signal='stop') + skip_unless :unix + stderr = Tempfile.new(%w(stderr .log)) + + cli_server "-q test/rackup/sleep.ru #{set_pumactl_args unix: true} -S #{@state_path}", + config: "stdout_redirect nil, '#{stderr.path}'", + unix: true + + cli_pumactl signal, unix: true + + wait_server + + refute_match 'error', File.read(stderr.path) + end + + def test_phased_restart_cluster + skip_unless :fork + cli_server "-q -w #{workers} test/rackup/sleep.ru #{set_pumactl_args unix: true} -S #{@state_path}", unix: true + + start = Process.clock_gettime(Process::CLOCK_MONOTONIC) + + s = UNIXSocket.new @bind_path + @ios_to_close << s + s << "GET /sleep1 HTTP/1.0\r\n\r\n" + + # Get the PIDs of the phase 0 workers. + phase0_worker_pids = get_worker_pids 0 + assert File.exist? @bind_path + + # Phased restart + cli_pumactl "phased-restart", unix: true + + # Get the PIDs of the phase 1 workers. + phase1_worker_pids = get_worker_pids 1 + + msg = "phase 0 pids #{phase0_worker_pids.inspect} phase 1 pids #{phase1_worker_pids.inspect}" + + assert_equal workers, phase0_worker_pids.length, msg + assert_equal workers, phase1_worker_pids.length, msg + assert_empty phase0_worker_pids & phase1_worker_pids, "#{msg}\nBoth workers should be replaced with new" + assert File.exist?(@bind_path), "Bind path must exist after phased restart" + + cli_pumactl "stop", unix: true + + wait_server + assert_operator Process.clock_gettime(Process::CLOCK_MONOTONIC) - start, :<, (DARWIN ? 8 : 7) + end + + def test_refork_cluster + skip_unless :fork + wrkrs = 3 + cli_server "-q -w #{wrkrs} test/rackup/sleep.ru #{set_pumactl_args unix: true} -S #{@state_path}", + config: 'fork_worker 50', + unix: true + + start = Time.now + + fast_connect("sleep1", unix: true) + + # Get the PIDs of the phase 0 workers. + phase0_worker_pids = get_worker_pids 0, wrkrs + assert File.exist? @bind_path + + cli_pumactl "refork", unix: true + + # Get the PIDs of the phase 1 workers. + phase1_worker_pids = get_worker_pids 1, wrkrs - 1 + + msg = "phase 0 pids #{phase0_worker_pids.inspect} phase 1 pids #{phase1_worker_pids.inspect}" + + assert_equal wrkrs , phase0_worker_pids.length, msg + assert_equal wrkrs - 1, phase1_worker_pids.length, msg + assert_empty phase0_worker_pids & phase1_worker_pids, "#{msg}\nBoth workers should be replaced with new" + assert File.exist?(@bind_path), "Bind path must exist after phased refork" + + cli_pumactl "stop", unix: true + + wait_server + assert_operator Time.now - start, :<, 60 + end + + def test_prune_bundler_with_multiple_workers + skip_unless :fork + + cli_server "-q -C test/config/prune_bundler_with_multiple_workers.rb #{set_pumactl_args unix: true} -S #{@state_path}", unix: true + + socket = fast_connect(unix: true) + headers, body = read_response(socket) + + assert_includes headers, "200 OK" + assert_includes body, "embedded app" + + cli_pumactl "stop", unix: true + + wait_server + end + + def test_kill_unknown + skip_if :jruby + + # we run ls to get a 'safe' pid to pass off as puma in cli stop + # do not want to accidentally kill a valid other process + io = IO.popen(windows? ? "dir" : "ls") + safe_pid = io.pid + Process.wait safe_pid + + sout = StringIO.new + + e = assert_raises SystemExit do + Puma::ControlCLI.new(%W!-p #{safe_pid} stop!, sout).run + end + sout.rewind + # windows bad URI(is not URI?) + assert_match(/No pid '\d+' found|bad URI ?\(is not URI\?\)/, sout.readlines.join("")) + assert_equal(1, e.status) + end + + # calls pumactl with both a config file and a state file, making sure that + # puma files are required, see https://github.com/puma/puma/issues/3186 + def test_require_dependencies + skip_if :jruby + conf_path = tmp_path '.config.rb' + @tcp_port = UniquePort.call + @control_tcp_port = UniquePort.call + + File.write conf_path , <<~CONF + state_path "#{@state_path}" + bind "tcp://127.0.0.1:#{@tcp_port}" + + workers 0 + + before_fork do + end + + activate_control_app "tcp://127.0.0.1:#{@control_tcp_port}", auth_token: "#{TOKEN}" + + app do |env| + [200, {}, ["Hello World"]] + end + CONF + + cli_server "-q -C #{conf_path}", no_bind: true, merge_err: true + + out = cli_pumactl_spawn "-F #{conf_path} restart", no_bind: true + + assert_includes out.read, "Command restart sent success" + + sleep 0.5 # give some time to restart + read_response connect + + out = cli_pumactl_spawn "-S #{@state_path} status", no_bind: true + assert_includes out.read, "Puma is started" + end + + def test_clustered_stats + skip_unless :fork + skip_unless :unix + + puma_version_pattern = "\\d+.\\d+.\\d+(\\.[a-z\\d]+)?" + + cli_server "-w2 -t2:2 -q test/rackup/hello.ru #{set_pumactl_args unix: true} -S #{@state_path}" + + get_worker_pids # waits for workers to boot + + resp_io = cli_pumactl "stats", unix: true + + status = JSON.parse resp_io.read.split("\n", 2).last + + assert_equal 2, status["workers"] + + sleep 0.5 # needed for GHA ? + + resp_io = cli_pumactl "stats", unix: true + + body = resp_io.read.split("\n", 2).last + + expected_stats = /\{"started_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z","workers":2,"phase":0,"booted_workers":2,"old_workers":0,"worker_status":\[\{"started_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z","pid":\d+,"index":0,"phase":0,"booted":true,"last_checkin":"[^"]+","last_status":\{"backlog":0,"running":2,"pool_capacity":2,"max_threads":2,"requests_count":0\}\},\{"started_at":"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z","pid":\d+,"index":1,"phase":0,"booted":true,"last_checkin":"[^"]+","last_status":\{"backlog":0,"running":2,"pool_capacity":2,"max_threads":2,"requests_count":0\}\}\],"versions":\{"puma":"#{puma_version_pattern}","ruby":\{"engine":"\w+","version":"\d+.\d+.\d+","patchlevel":-?\d+\}\}\}/ + + assert_match(expected_stats, body) + end + + def control_gc_stats(unix: false) + cli_server "-t1:1 -q test/rackup/hello.ru #{set_pumactl_args unix: unix} -S #{@state_path}" + + key = Puma::IS_MRI || TRUFFLE_HEAD ? "count" : "used" + + resp_io = cli_pumactl "gc-stats", unix: unix + before = JSON.parse resp_io.read.split("\n", 2).last + gc_before = before[key].to_i + + 2.times { fast_connect } + + resp_io = cli_pumactl "gc", unix: unix + # below shows gc was called (200 reply) + assert_equal "Command gc sent success", resp_io.read.rstrip + + resp_io = cli_pumactl "gc-stats", unix: unix + after = JSON.parse resp_io.read.split("\n", 2).last + gc_after = after[key].to_i + + # Hitting the /gc route should increment the count by 1 + if key == "count" + assert_operator gc_before, :<, gc_after, "make sure a gc has happened" + elsif !Puma::IS_JRUBY + refute_equal gc_before, gc_after, "make sure a gc has happened" + end + end + + def test_control_gc_stats_tcp + @control_tcp_port = UniquePort.call + control_gc_stats + end + + def test_control_gc_stats_unix + skip_unless :unix + control_gc_stats unix: true + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_integration_single.rb b/vendor/cache/puma-fba741b91780/test/test_integration_single.rb new file mode 100644 index 000000000..caa227f25 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_integration_single.rb @@ -0,0 +1,288 @@ +require_relative "helper" +require_relative "helpers/integration" + +class TestIntegrationSingle < TestIntegration + parallelize_me! if ::Puma.mri? + + def workers ; 0 ; end + + def test_hot_restart_does_not_drop_connections_threads + ttl_reqs = Puma.windows? ? 500 : 1_000 + hot_restart_does_not_drop_connections num_threads: 5, total_requests: ttl_reqs + end + + def test_hot_restart_does_not_drop_connections + if Puma.windows? + hot_restart_does_not_drop_connections total_requests: 300 + else + hot_restart_does_not_drop_connections + end + end + + def test_usr2_restart + skip_unless_signal_exist? :USR2 + _, new_reply = restart_server_and_listen("-q test/rackup/hello.ru") + assert_equal "Hello World", new_reply + end + + # It does not share environments between multiple generations, which would break Dotenv + def test_usr2_restart_restores_environment + # jruby has a bug where setting `nil` into the ENV or `delete` do not change the + # next workers ENV + skip_if :jruby + skip_unless_signal_exist? :USR2 + + initial_reply, new_reply = restart_server_and_listen("-q test/rackup/hello-env.ru") + + assert_includes initial_reply, "Hello RAND" + assert_includes new_reply, "Hello RAND" + refute_equal initial_reply, new_reply + end + + def test_term_exit_code + skip_unless_signal_exist? :TERM + skip_if :jruby # JVM does not return correct exit code for TERM + + cli_server "test/rackup/hello.ru" + _, status = stop_server + + assert_equal 15, status + end + + def test_on_booted_and_on_stopped + skip_unless_signal_exist? :TERM + + cli_server "-C test/config/event_on_booted_and_on_stopped.rb -C test/config/event_on_booted_exit.rb test/rackup/hello.ru", + no_wait: true + + assert wait_for_server_to_include('on_booted called') + assert wait_for_server_to_include('on_stopped called') + end + + def test_term_suppress + skip_unless_signal_exist? :TERM + + cli_server "-C test/config/suppress_exception.rb test/rackup/hello.ru" + _, status = stop_server + + assert_equal 0, status + end + + def test_rack_url_scheme_default + skip_unless_signal_exist? :TERM + + cli_server("test/rackup/url_scheme.ru") + + reply = read_body(connect) + stop_server + + assert_match("http", reply) + end + + def test_conf_is_loaded_before_passing_it_to_binder + skip_unless_signal_exist? :TERM + + cli_server("-C test/config/rack_url_scheme.rb test/rackup/url_scheme.ru") + + reply = read_body(connect) + stop_server + + assert_match("https", reply) + end + + def test_prefer_rackup_file_specified_by_cli + skip_unless_signal_exist? :TERM + + cli_server "-C test/config/with_rackup_from_dsl.rb test/rackup/hello.ru" + reply = read_body(connect) + stop_server + + assert_match("Hello World", reply) + end + + def test_term_not_accepts_new_connections + skip_unless_signal_exist? :TERM + skip_if :jruby + + cli_server 'test/rackup/sleep.ru' + + _stdin, curl_stdout, _stderr, curl_wait_thread = Open3.popen3({ 'LC_ALL' => 'C' }, "curl http://#{HOST}:#{@tcp_port}/sleep10") + sleep 1 # ensure curl send a request + + Process.kill :TERM, @pid + assert wait_for_server_to_include('Gracefully stopping') # wait for server to begin graceful shutdown + + # Invoke a request which must be rejected + _stdin, _stdout, rejected_curl_stderr, rejected_curl_wait_thread = Open3.popen3("curl #{HOST}:#{@tcp_port}") + + assert nil != Process.getpgid(@server.pid) # ensure server is still running + assert nil != Process.getpgid(curl_wait_thread[:pid]) # ensure first curl invocation still in progress + + curl_wait_thread.join + rejected_curl_wait_thread.join + + assert_match(/Slept 10/, curl_stdout.read) + assert_match(/Connection refused|(Couldn't|Could not) connect to server/, rejected_curl_stderr.read) + + wait_server 15 + end + + def test_int_refuse + skip_unless_signal_exist? :INT + skip_if :jruby # seems to intermittently lockup JRuby CI + + cli_server 'test/rackup/hello.ru' + begin + sock = TCPSocket.new(HOST, @tcp_port) + sock.close + rescue => ex + fail("Port didn't open properly: #{ex.message}") + end + + Process.kill :INT, @pid + wait_server + + assert_raises(Errno::ECONNREFUSED) { TCPSocket.new(HOST, @tcp_port) } + end + + def test_siginfo_thread_print + skip_unless_signal_exist? :INFO + + cli_server 'test/rackup/hello.ru' + output = [] + t = Thread.new { output << @server.readlines } + Process.kill :INFO, @pid + Process.kill :INT , @pid + t.join + + assert_match "Thread: TID", output.join + end + + def test_write_to_log + skip_unless_signal_exist? :TERM + + suppress_output = '> /dev/null 2>&1' + + cli_server '-C test/config/t1_conf.rb test/rackup/hello.ru' + + system "curl http://localhost:#{@tcp_port}/ #{suppress_output}" + + stop_server + + log = File.read('t1-stdout') + + assert_match(%r!GET / HTTP/1\.1!, log) + ensure + File.unlink 't1-stdout' if File.file? 't1-stdout' + File.unlink 't1-pid' if File.file? 't1-pid' + end + + def test_puma_started_log_writing + skip_unless_signal_exist? :TERM + + cli_server '-C test/config/t2_conf.rb test/rackup/hello.ru' + + system "curl http://localhost:#{@tcp_port}/ > /dev/null 2>&1" + + out=`#{BASE} bin/pumactl -F test/config/t2_conf.rb status` + + stop_server + + log = File.read('t2-stdout') + + assert_match(%r!GET / HTTP/1\.1!, log) + assert(!File.file?("t2-pid")) + assert_equal("Puma is started\n", out) + ensure + File.unlink 't2-stdout' if File.file? 't2-stdout' + end + + def test_application_logs_are_flushed_on_write + cli_server "#{set_pumactl_args} test/rackup/write_to_stdout.ru" + + read_body connect + + cli_pumactl 'stop' + + assert wait_for_server_to_include("hello\n") + assert wait_for_server_to_include("Goodbye!") + + wait_server + end + + # listener is closed 'externally' while Puma is in the IO.select statement + def test_closed_listener + skip_unless_signal_exist? :TERM + + cli_server "test/rackup/close_listeners.ru", merge_err: true + connection = fast_connect + + begin + read_body connection + rescue EOFError + end + + begin + Timeout.timeout(5) do + begin + Process.kill :SIGTERM, @pid + rescue Errno::ESRCH + end + begin + Process.wait2 @pid + rescue Errno::ECHILD + end + end + rescue Timeout::Error + Process.kill :SIGKILL, @pid + assert false, "Process froze" + end + assert true + end + + def test_puma_debug_loaded_exts + cli_server "#{set_pumactl_args} test/rackup/hello.ru", puma_debug: true + + assert wait_for_server_to_include('Loaded Extensions:') + + cli_pumactl 'stop' + assert wait_for_server_to_include('Goodbye!') + wait_server + end + + def test_idle_timeout + cli_server "test/rackup/hello.ru", config: "idle_timeout 1" + + connect + + sleep 1.15 + + assert_raises Errno::ECONNREFUSED, "Connection refused" do + connect + end + end + + def test_pre_existing_unix_after_idle_timeout + skip_unless :unix + + File.open(@bind_path, mode: 'wb') { |f| f.puts 'pre existing' } + + cli_server "-q test/rackup/hello.ru", unix: :unix, config: "idle_timeout 1" + + sock = connection = connect(nil, unix: true) + read_body(connection) + + sleep 1.15 + + assert sock.wait_readable(1), 'Unexpected timeout' + assert_raises Puma.jruby? ? IOError : Errno::ECONNREFUSED, "Connection refused" do + connection = connect(nil, unix: true) + end + + assert File.exist?(@bind_path) + ensure + if UNIX_SKT_EXIST + File.unlink @bind_path if File.exist? @bind_path + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_integration_ssl.rb b/vendor/cache/puma-fba741b91780/test/test_integration_ssl.rb new file mode 100644 index 000000000..97391c561 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_integration_ssl.rb @@ -0,0 +1,324 @@ +require_relative 'helper' +require_relative "helpers/integration" + +if ::Puma::HAS_SSL # don't load any files if no ssl support + require "net/http" + require "openssl" + require_relative "helpers/test_puma/puma_socket" +end + +# These tests are used to verify that Puma works with SSL sockets. Only +# integration tests isolate the server from the test environment, so there +# should be a few SSL tests. +# +# For instance, since other tests make use of 'client' SSLSockets created by +# net/http, OpenSSL is loaded in the CI process. By shelling out with IO.popen, +# the server process isn't affected by whatever is loaded in the CI process. + +class TestIntegrationSSL < TestIntegration + parallelize_me! if ::Puma.mri? + + LOCALHOST = ENV.fetch 'PUMA_CI_DFLT_HOST', 'localhost' + + include TestPuma::PumaSocket + + def bind_port + @bind_port ||= UniquePort.call + @tcp_port = @bind_port + end + + def control_tcp_port + @control_tcp_port ||= UniquePort.call + end + + def with_server(config) + cli_server "-t1:1", config: config, no_bind: true + + http = Net::HTTP.new HOST, bind_port + http.use_ssl = true + http.verify_mode = OpenSSL::SSL::VERIFY_NONE + + yield http + end + + def test_ssl_run + cert_path = File.expand_path '../examples/puma', __dir__ + + config = <<~CONFIG + if ::Puma.jruby? + keystore = '#{cert_path}/keystore.jks' + keystore_pass = 'jruby_puma' + + ssl_bind '#{HOST}', '#{bind_port}', { + keystore: keystore, + keystore_pass: keystore_pass, + verify_mode: 'none' + } + else + key = '#{cert_path}/puma_keypair.pem' + cert = '#{cert_path}/cert_puma.pem' + + ssl_bind '#{HOST}', '#{bind_port}', { + cert: cert, + key: key, + verify_mode: 'none' + } + end + + activate_control_app 'tcp://#{HOST}:#{control_tcp_port}', { auth_token: '#{TOKEN}' } + + app do |env| + [200, {}, [env['rack.url_scheme']]] + end + CONFIG + + with_server(config) do |http| + body = nil + http.start do + req = Net::HTTP::Get.new '/', {} + http.request(req) { |resp| body = resp.body } + end + assert_equal 'https', body + end + end + + # should use TLSv1.3 with OpenSSL 1.1 or later + def test_verify_client_cert_roundtrip(tls1_2 = nil) + cert_path = File.expand_path '../examples/puma/client_certs', __dir__ + bind_port + + cli_server "-t1:5 #{set_pumactl_args}", no_bind: true, config: <<~CONFIG + if ::Puma::IS_JRUBY + ssl_bind '#{LOCALHOST}', '#{@bind_port}', { + keystore: '#{cert_path}/keystore.jks', + keystore_pass: 'jruby_puma', + verify_mode: 'force_peer' + } + else + ssl_bind '#{LOCALHOST}', '#{@bind_port}', { + cert: '#{cert_path}/server.crt', + key: '#{cert_path}/server.key', + ca: '#{cert_path}/ca.crt', + verify_mode: 'force_peer' + } + end + threads 1, 5 + + app do |env| + [200, {}, [env['puma.peercert'].to_s]] + end + CONFIG + + client_cert = File.read "#{cert_path}/client.crt" + + body = send_http_read_resp_body host: LOCALHOST, port: @bind_port, ctx: new_ctx { |c| + ca = "#{cert_path}/ca.crt" + key = "#{cert_path}/client.key" + c.ca_file = ca + c.cert = ::OpenSSL::X509::Certificate.new client_cert + c.key = ::OpenSSL::PKey::RSA.new File.read(key) + c.verify_mode = ::OpenSSL::SSL::VERIFY_PEER + if tls1_2 + if c.respond_to? :max_version= + c.max_version = :TLS1_2 + else + c.ssl_version = :TLSv1_2 + end + end + } + + assert_equal client_cert, body + end + + def test_verify_client_cert_roundtrip_tls1_2 + test_verify_client_cert_roundtrip true + end + + def test_ssl_run_with_curl_client + skip_if :windows + + require 'stringio' + + cert_path = File.expand_path '../examples/puma/client_certs', __dir__ + bind_port + + cli_server "-t1:1", no_bind: true, config: <<~CONFIG + if ::Puma::IS_JRUBY + ssl_bind '#{LOCALHOST}', '#{@bind_port}', { + keystore: '#{cert_path}/keystore.jks', + keystore_pass: 'jruby_puma', + verify_mode: 'force_peer' + } + else + ssl_bind '#{LOCALHOST}', '#{@bind_port}', { + cert: '#{cert_path}/server.crt', + key: '#{cert_path}/server.key', + ca: '#{cert_path}/ca.crt', + verify_mode: 'force_peer' + } + end + + app { |_| [200, { 'Content-Type' => 'text/plain' }, ["HELLO", ' ', "THERE"]] } + CONFIG + + + ca = "#{cert_path}/ca.crt" + cert = "#{cert_path}/client.crt" + key = "#{cert_path}/client.key" + # NOTE: JRuby used to end up in a hang with TLS peer verification enabled + # it's easier to reproduce using an external client such as CURL (using net/http client the bug isn't triggered) + # also the "hang", being buffering related, seems to showcase better with TLS 1.2 than 1.3 + body = curl_and_get_response "https://#{LOCALHOST}:#{@bind_port}", + args: "--cacert #{ca} --cert #{cert} --key #{key} --tlsv1.2 --tls-max 1.2" + + assert_equal 'HELLO THERE', body + end + + def test_ssl_run_with_pem + skip_if :jruby + + config = <<~CONFIG + key_path = '#{File.expand_path '../examples/puma/puma_keypair.pem', __dir__}' + cert_path = '#{File.expand_path '../examples/puma/cert_puma.pem', __dir__}' + + ssl_bind '#{HOST}', '#{bind_port}', { + cert_pem: File.read(cert_path), + key_pem: File.read(key_path), + verify_mode: 'none' + } + + activate_control_app 'tcp://#{HOST}:#{control_tcp_port}', { auth_token: '#{TOKEN}' } + + app do |env| + [200, {}, [env['rack.url_scheme']]] + end + CONFIG + + with_server(config) do |http| + body = nil + http.start do + req = Net::HTTP::Get.new '/', {} + http.request(req) { |resp| body = resp.body } + end + assert_equal 'https', body + end + end + + def test_ssl_run_with_localhost_authority + skip_if :jruby + + config = <<~CONFIG + require 'localhost' + ssl_bind '#{HOST}', '#{bind_port}' + + activate_control_app 'tcp://#{HOST}:#{control_tcp_port}', { auth_token: '#{TOKEN}' } + + app do |env| + [200, {}, [env['rack.url_scheme']]] + end + CONFIG + + with_server(config) do |http| + body = nil + http.start do + req = Net::HTTP::Get.new '/', {} + http.request(req) { |resp| body = resp.body } + end + assert_equal 'https', body + end + end + + def test_ssl_run_with_encrypted_key + skip_if :jruby + + cert_path = File.expand_path '../examples/puma', __dir__ + + config = <<~CONFIG + key_path = '#{cert_path}/encrypted_puma_keypair.pem' + cert_path = '#{cert_path}/cert_puma.pem' + key_command = ::Puma::IS_WINDOWS ? 'echo hello world' : + '#{cert_path}/key_password_command.sh' + + ssl_bind '#{HOST}', '#{bind_port}', { + cert: cert_path, + key: key_path, + verify_mode: 'none', + key_password_command: key_command + } + + activate_control_app 'tcp://#{HOST}:#{control_tcp_port}', { auth_token: '#{TOKEN}' } + + app do |env| + [200, {}, [env['rack.url_scheme']]] + end + CONFIG + + with_server(config) do |http| + body = nil + http.start do + req = Net::HTTP::Get.new '/', {} + http.request(req) { |resp| body = resp.body } + end + assert_equal 'https', body + end + end + + def test_ssl_run_with_encrypted_pem + skip_if :jruby + + cert_path = File.expand_path '../examples/puma', __dir__ + + config = <<~CONFIG + key_path = '#{cert_path}/encrypted_puma_keypair.pem' + cert_path = '#{cert_path}/cert_puma.pem' + key_command = ::Puma::IS_WINDOWS ? 'echo hello world' : + '#{cert_path}/key_password_command.sh' + + ssl_bind '#{HOST}', '#{bind_port}', { + cert_pem: File.read(cert_path), + key_pem: File.read(key_path), + verify_mode: 'none', + key_password_command: key_command + } + + activate_control_app 'tcp://#{HOST}:#{control_tcp_port}', { auth_token: '#{TOKEN}' } + + app do |env| + [200, {}, [env['rack.url_scheme']]] + end + CONFIG + + with_server(config) do |http| + body = nil + http.start do + req = Net::HTTP::Get.new '/', {} + http.request(req) { |resp| body = resp.body } + end + assert_equal 'https', body + end + end + + private + + def curl_and_get_response(url, method: :get, args: nil); require 'open3' + cmd = "curl -s -v --show-error #{args} -X #{method.to_s.upcase} -k #{url}" + begin + out, err, status = Open3.capture3(cmd) + rescue Errno::ENOENT + fail "curl not available, make sure curl binary is installed and available on $PATH" + end + + if status.success? + http_status = err.match(/< HTTP\/1.1 (.*?)/)[1] || '0' # < HTTP/1.1 200 OK\r\n + if http_status.strip[0].to_i > 2 + warn out + fail "#{cmd.inspect} unexpected response: #{http_status}\n\n#{err}" + end + return out + else + warn out + fail "#{cmd.inspect} process failed: #{status}\n\n#{err}" + end + end + +end if ::Puma::HAS_SSL diff --git a/vendor/cache/puma-fba741b91780/test/test_integration_ssl_session.rb b/vendor/cache/puma-fba741b91780/test/test_integration_ssl_session.rb new file mode 100644 index 000000000..2b65fc6fb --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_integration_ssl_session.rb @@ -0,0 +1,188 @@ +# frozen_string_literal: true + +require_relative 'helper' +require_relative 'helpers/integration' + +# These tests are used to verify that Puma works with SSL sockets. Only +# integration tests isolate the server from the test environment, so there +# should be a few SSL tests. +# +# For instance, since other tests make use of 'client' SSLSockets created by +# net/http, OpenSSL is loaded in the CI process. By shelling out with IO.popen, +# the server process isn't affected by whatever is loaded in the CI process. + +class TestIntegrationSSLSession < TestIntegration + parallelize_me! if Puma::IS_MRI + + require "openssl" unless defined?(::OpenSSL::SSL) + + OSSL = ::OpenSSL::SSL + + CLIENT_HAS_TLS1_3 = OSSL.const_defined? :TLS1_3_VERSION + + GET = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + + RESP = "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 5\r\n\r\nhttps" + + CERT_PATH = File.expand_path "../examples/puma/client_certs", __dir__ + + def teardown + return if skipped? + # stop server + sock = TCPSocket.new HOST, control_tcp_port + @ios_to_close << sock + sock.syswrite "GET /stop?token=#{TOKEN} HTTP/1.1\r\n\r\n" + sock.read + assert_match 'Goodbye!', @server.read + + @server.close unless @server&.closed? + @server = nil + super + end + + def bind_port + @bind_port ||= UniquePort.call + end + + def control_tcp_port + @control_tcp_port ||= UniquePort.call + end + + def set_reuse(reuse) + <<~RUBY + key = '#{File.expand_path '../examples/puma/client_certs/server.key', __dir__}' + cert = '#{File.expand_path '../examples/puma/client_certs/server.crt', __dir__}' + ca = '#{File.expand_path '../examples/puma/client_certs/ca.crt', __dir__}' + + ssl_bind '#{HOST}', '#{bind_port}', { + cert: cert, + key: key, + ca: ca, + verify_mode: 'none', + reuse: #{reuse} + } + + activate_control_app 'tcp://#{HOST}:#{control_tcp_port}', { auth_token: '#{TOKEN}' } + + app do |env| + [200, {}, [env['rack.url_scheme']]] + end + RUBY + end + + def with_server(config) + config_file = Tempfile.new %w(config .rb) + config_file.write config + config_file.close + config_file.path + + # start server + cmd = "#{BASE} bin/puma -C #{config_file.path}" + @server = IO.popen cmd, 'r' + wait_for_server_to_boot log: false + @pid = @server.pid + + yield + end + + def run_session(reuse, tls = nil) + config = set_reuse reuse + + with_server(config) { ssl_client tls_vers: tls } + end + + def test_dflt + reused = run_session true + assert reused, 'session was not reused' + end + + def test_dflt_tls1_2 + reused = run_session true, :TLS1_2 + assert reused, 'session was not reused' + end + + def test_dflt_tls1_3 + skip 'TLSv1.3 unavailable' unless Puma::MiniSSL::HAS_TLS1_3 && CLIENT_HAS_TLS1_3 + reused = run_session true, :TLS1_3 + assert reused, 'session was not reused' + end + + def test_1000_tls1_2 + reused = run_session '{size: 1_000}', :TLS1_2 + assert reused, 'session was not reused' + end + + def test_1000_10_tls1_2 + reused = run_session '{size: 1000, timeout: 10}', :TLS1_2 + assert reused, 'session was not reused' + end + + def test__10_tls1_2 + reused = run_session '{timeout: 10}', :TLS1_2 + assert reused, 'session was not reused' + end + + def test_off_tls1_2 + ssl_vers = Puma::MiniSSL::OPENSSL_LIBRARY_VERSION + old_ssl = ssl_vers.include?(' 1.0.') || ssl_vers.match?(/ 1\.1\.1[ a-e]/) + skip 'Requires 1.1.1f or later' if old_ssl + reused = run_session 'nil', :TLS1_2 + assert reused, 'session was not reused' + end + + # TLSv1.3 reuse is always on + def test_off_tls1_3 + skip 'TLSv1.3 unavailable' unless Puma::MiniSSL::HAS_TLS1_3 && CLIENT_HAS_TLS1_3 + reused = run_session 'nil' + assert reused, 'TLSv1.3 session was not reused' + end + + def client_skt(tls_vers = nil, session_pems = [], queue = nil) + ctx = OSSL::SSLContext.new + ctx.verify_mode = OSSL::VERIFY_NONE + ctx.session_cache_mode = OSSL::SSLContext::SESSION_CACHE_CLIENT + if tls_vers + if ctx.respond_to? :max_version= + ctx.max_version = tls_vers + ctx.min_version = tls_vers + else + ctx.ssl_version = tls_vers.to_s.sub('TLS', 'TLSv').to_sym + end + end + ctx.session_new_cb = ->(ary) { + queue << true if queue + session_pems << ary.last.to_pem + } + + skt = OSSL::SSLSocket.new TCPSocket.new(HOST, bind_port), ctx + skt.sync_close = true + skt + end + + def ssl_client(tls_vers: nil) + queue = Thread::Queue.new + session_pems = [] + skt = client_skt tls_vers, session_pems, queue + skt.connect + + skt.syswrite GET + skt.to_io.wait_readable 2 + assert_equal RESP, skt.sysread(1_024) + skt.sysclose + queue.pop # wait for cb session to be added to first client + + skt = client_skt tls_vers, session_pems + skt.session = OSSL::Session.new(session_pems[0]) + skt.connect + + skt.syswrite GET + skt.to_io.wait_readable 2 + assert_equal RESP, skt.sysread(1_024) + queue.close + queue = nil + + skt.session_reused? + ensure + skt&.sysclose unless skt&.closed? + end +end if Puma::HAS_SSL && Puma::IS_MRI diff --git a/vendor/cache/puma-fba741b91780/test/test_iobuffer.rb b/vendor/cache/puma-fba741b91780/test/test_iobuffer.rb new file mode 100644 index 000000000..f30ad8c57 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_iobuffer.rb @@ -0,0 +1,37 @@ +require_relative "helper" + +require "puma/io_buffer" + +class TestIOBuffer < Minitest::Test + attr_accessor :iobuf + def setup + self.iobuf = Puma::IOBuffer.new + end + + def test_initial_size + assert_equal 0, iobuf.size + end + + def test_append_op + iobuf << "abc" + assert_equal "abc", iobuf.to_s + iobuf << "123" + assert_equal "abc123", iobuf.to_s + assert_equal 6, iobuf.size + end + + def test_append + expected = "mary had a little lamb" + iobuf.append("mary", " ", "had ", "a little", " lamb") + assert_equal expected, iobuf.to_s + assert_equal expected.length, iobuf.size + end + + def test_reset + iobuf << "content" + assert_equal "content", iobuf.to_s + iobuf.reset + assert_equal 0, iobuf.size + assert_equal "", iobuf.to_s + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_json_serialization.rb b/vendor/cache/puma-fba741b91780/test/test_json_serialization.rb new file mode 100644 index 000000000..3c079aee8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_json_serialization.rb @@ -0,0 +1,107 @@ +require_relative "helper" +require "json" +require "puma/json_serialization" + +class TestJSONSerialization < Minitest::Test + parallelize_me! unless JRUBY_HEAD + + def test_json_generates_string_for_hash_with_string_keys + value = { "key" => "value" } + assert_puma_json_generates_string '{"key":"value"}', value + end + + def test_json_generates_string_for_hash_with_symbol_keys + value = { key: 'value' } + assert_puma_json_generates_string '{"key":"value"}', value, expected_roundtrip: { "key" => "value" } + end + + def test_generate_raises_error_for_unexpected_key_type + value = { [1] => 'b' } + ex = assert_raises Puma::JSONSerialization::SerializationError do + Puma::JSONSerialization.generate value + end + assert_equal 'Could not serialize object of type Array as object key', ex.message + end + + def test_json_generates_string_for_array_of_integers + value = [1, 2, 3] + assert_puma_json_generates_string '[1,2,3]', value + end + + def test_json_generates_string_for_array_of_strings + value = ["a", "b", "c"] + assert_puma_json_generates_string '["a","b","c"]', value + end + + def test_json_generates_string_for_nested_arrays + value = [1, [2, [3]]] + assert_puma_json_generates_string '[1,[2,[3]]]', value + end + + def test_json_generates_string_for_integer + value = 42 + assert_puma_json_generates_string '42', value + end + + def test_json_generates_string_for_float + value = 1.23 + assert_puma_json_generates_string '1.23', value + end + + def test_json_escapes_strings_with_quotes + value = 'a"' + assert_puma_json_generates_string '"a\""', value + end + + def test_json_escapes_strings_with_backslashes + value = 'a\\' + assert_puma_json_generates_string '"a\\\\"', value + end + + def test_json_escapes_strings_with_null_byte + value = "\x00" + assert_puma_json_generates_string '"\u0000"', value + end + + def test_json_escapes_strings_with_unicode_information_separator_one + value = "\x1f" + assert_puma_json_generates_string '"\u001F"', value + end + + def test_json_generates_string_for_true + value = true + assert_puma_json_generates_string 'true', value + end + + def test_json_generates_string_for_false + value = false + assert_puma_json_generates_string 'false', value + end + + def test_json_generates_string_for_nil + value = nil + assert_puma_json_generates_string 'null', value + end + + def test_generate_raises_error_for_unexpected_value_type + value = /abc/ + ex = assert_raises Puma::JSONSerialization::SerializationError do + Puma::JSONSerialization.generate value + end + assert_equal 'Unexpected value of type Regexp', ex.message + end + + private + + def assert_puma_json_generates_string(expected_output, value_to_serialize, expected_roundtrip: nil) + actual_output = Puma::JSONSerialization.generate(value_to_serialize) + assert_equal expected_output, actual_output + + if value_to_serialize.nil? + assert_nil ::JSON.parse(actual_output) + else + expected_roundtrip ||= value_to_serialize + assert_equal expected_roundtrip, ::JSON.parse(actual_output) + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_launcher.rb b/vendor/cache/puma-fba741b91780/test/test_launcher.rb new file mode 100644 index 000000000..aa8590d09 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_launcher.rb @@ -0,0 +1,173 @@ +require_relative "helper" +require_relative "helpers/tmp_path" + +require "puma/configuration" +require 'puma/log_writer' + +# Intermittent failures & errors when run parallel in GHA, local use may run fine. + + +class TestLauncher < Minitest::Test + include TmpPath + + def test_prints_thread_traces + create_launcher.thread_status do |name, _backtrace| + assert_match "Thread: TID", name + end + end + + def test_pid_file + pid_path = tmp_path('.pid') + + conf = Puma::Configuration.new do |c| + c.pidfile pid_path + end + + create_launcher(conf).write_state + + assert_equal File.read(pid_path).strip.to_i, Process.pid + ensure + File.unlink pid_path + end + + def test_state_permission_0640 + state_path = tmp_path('.state') + state_permission = 0640 + + conf = Puma::Configuration.new do |c| + c.state_path state_path + c.state_permission state_permission + end + + create_launcher(conf).write_state + + assert File.stat(state_path).mode.to_s(8)[-4..-1], state_permission + ensure + File.unlink state_path + end + + def test_state_permission_nil + state_path = tmp_path('.state') + + conf = Puma::Configuration.new do |c| + c.state_path state_path + c.state_permission nil + end + + create_launcher(conf).write_state + + assert File.exist?(state_path) + ensure + File.unlink state_path + end + + def test_no_state_permission + state_path = tmp_path('.state') + + conf = Puma::Configuration.new do |c| + c.state_path state_path + end + + create_launcher(conf).write_state + + assert File.exist?(state_path) + ensure + File.unlink state_path + end + + def test_puma_stats + conf = Puma::Configuration.new do |c| + c.app -> {[200, {}, ['']]} + end + launcher = create_launcher(conf) + launcher.events.on_booted { + sleep 1.1 unless Puma.mri? + launcher.stop + } + launcher.run + sleep 1 unless Puma.mri? + Puma::Server::STAT_METHODS.each do |stat| + assert_includes launcher.stats, stat + end + end + + def test_puma_stats_clustered + skip_unless :fork + + queue_booted = Queue.new + stopped = nil + status = nil + + conf = Puma::Configuration.new do |c| + c.app -> {[200, {}, ['']]} + c.workers 1 + end + launcher = create_launcher(conf) + launcher.events.on_booted { queue_booted << nil } + + th_stats = Thread.new do + queue_booted.pop + sleep Puma::Configuration::DEFAULTS[:worker_check_interval] + 1 + status = launcher.stats[:worker_status]&.first[:last_status] + launcher.stop + stopped = true + end + + launcher.run + assert th_stats.join(10) + + refute_nil status + + Puma::Server::STAT_METHODS.each do |stat| + assert_includes status, stat + end + ensure + launcher&.stop unless stopped + end + + def test_log_config_enabled + env = {'PUMA_LOG_CONFIG' => '1'} + + launcher = create_launcher env: env + + log = launcher.log_writer.stdout.string + + # the below confirms an exact match, allowing for line order differences + launcher.config.final_options.each do |config_key, value| + line = "- #{config_key}: #{value}\n" + assert_includes log, line + log.sub! line, '' + end + assert_equal 'Configuration:', log.strip + end + + def test_log_config_disabled + refute_match(/Configuration:/, create_launcher.log_writer.stdout.string) + end + + def test_fire_on_stopped + conf = Puma::Configuration.new do |c| + c.app -> {[200, {}, ['']]} + end + + is_stopped = nil + + launcher = create_launcher(conf) + launcher.events.on_booted { + sleep 1.1 unless Puma.mri? + launcher.stop + } + launcher.events.on_stopped { is_stopped = true } + + launcher.run + sleep 0.2 unless Puma.mri? + assert is_stopped, "on_stopped not called" + end + + private + + def create_launcher(config = Puma::Configuration.new, lw = Puma::LogWriter.strings, **kw) + config.options[:binds] = ["tcp://127.0.0.1:#{UniquePort.call}"] + Puma::Launcher.new(config, log_writer: lw, **kw) + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_log_writer.rb b/vendor/cache/puma-fba741b91780/test/test_log_writer.rb new file mode 100644 index 000000000..83dcfc54d --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_log_writer.rb @@ -0,0 +1,180 @@ +require 'puma/detect' +require 'puma/log_writer' +require_relative "helper" + +class TestLogWriter < Minitest::Test + def test_null + log_writer = Puma::LogWriter.null + + assert_instance_of Puma::NullIO, log_writer.stdout + assert_instance_of Puma::NullIO, log_writer.stderr + assert_equal log_writer.stdout, log_writer.stderr + end + + def test_strings + log_writer = Puma::LogWriter.strings + + assert_instance_of StringIO, log_writer.stdout + assert_instance_of StringIO, log_writer.stderr + end + + def test_stdio + log_writer = Puma::LogWriter.stdio + + assert_equal STDOUT, log_writer.stdout + assert_equal STDERR, log_writer.stderr + end + + def test_stdio_respects_sync + log_writer = Puma::LogWriter.stdio + + assert_equal STDOUT.sync, log_writer.stdout.sync + assert_equal STDERR.sync, log_writer.stderr.sync + assert_equal STDOUT, log_writer.stdout + assert_equal STDERR, log_writer.stderr + end + + def test_log_writes_to_stdout + out, _ = capture_io do + Puma::LogWriter.stdio.log("ready") + end + + assert_equal "ready\n", out + end + + def test_null_log_does_nothing + out, _ = capture_io do + Puma::LogWriter.null.log("ready") + end + + assert_equal "", out + end + + def test_write_writes_to_stdout + out, _ = capture_io do + Puma::LogWriter.stdio.write("ready") + end + + assert_equal "ready", out + end + + def test_debug_writes_to_stdout_if_env_is_present + original_debug, ENV["PUMA_DEBUG"] = ENV["PUMA_DEBUG"], "1" + + out, _ = capture_io do + Puma::LogWriter.stdio.debug("ready") + end + + assert_equal "% ready\n", out + ensure + ENV["PUMA_DEBUG"] = original_debug + end + + def test_debug_not_write_to_stdout_if_env_is_not_present + out, _ = capture_io do + Puma::LogWriter.stdio.debug("ready") + end + + assert_empty out + end + + def test_error_writes_to_stderr_and_exits + did_exit = false + + _, err = capture_io do + begin + Puma::LogWriter.stdio.error("interrupted") + rescue SystemExit + did_exit = true + ensure + assert did_exit + end + end + + assert_match %r!ERROR: interrupted!, err + end + + def test_pid_formatter + pid = Process.pid + + out, _ = capture_io do + log_writer = Puma::LogWriter.stdio + + log_writer.formatter = Puma::LogWriter::PidFormatter.new + + log_writer.write("ready") + end + + assert_equal "[#{ pid }] ready", out + end + + def test_custom_log_formatter + custom_formatter = proc { |str| "-> #{ str }" } + + out, _ = capture_io do + log_writer = Puma::LogWriter.stdio + + log_writer.formatter = custom_formatter + + log_writer.write("ready") + end + + assert_equal "-> ready", out + end + + def test_parse_error + app = proc { |_env| [200, {"Content-Type" => "plain/text"}, ["hello\n"]] } + log_writer = Puma::LogWriter.strings + server = Puma::Server.new app, nil, {log_writer: log_writer} + + host = '127.0.0.1' + port = (server.add_tcp_listener host, 0).addr[1] + server.run + + sock = TCPSocket.new host, port + path = "/" + params = "a"*1024*10 + + sock << "GET #{path}?a=#{params} HTTP/1.1\r\nConnection: close\r\n\r\n" + sock.read + sleep 0.1 # important so that the previous data is sent as a packet + assert_match %r!HTTP parse error, malformed request!, log_writer.stderr.string + assert_match %r!\("GET #{path}" - \(-\)\)!, log_writer.stderr.string + ensure + sock.close if sock && !sock.closed? + server.stop true + end + + # test_puma_server_ssl.rb checks that ssl errors are raised correctly, + # but it mocks the actual error code. This test the code, but it will + # break if the logged message changes + def test_ssl_error + log_writer = Puma::LogWriter.strings + + ssl_mock = -> (addr, subj) { + obj = Object.new + obj.define_singleton_method(:peeraddr) { addr } + if subj + cert = Object.new + cert.define_singleton_method(:subject) { subj } + obj.define_singleton_method(:peercert) { cert } + else + obj.define_singleton_method(:peercert) { nil } + end + obj + } + + log_writer.ssl_error OpenSSL::SSL::SSLError, ssl_mock.call(['127.0.0.1'], 'test_cert') + error = log_writer.stderr.string + assert_includes error, "SSL error" + assert_includes error, "peer: 127.0.0.1" + assert_includes error, "cert: test_cert" + + log_writer.ssl_error OpenSSL::SSL::SSLError, ssl_mock.call(nil, nil) + error = log_writer.stderr.string.lines[1] + assert_includes error, "SSL error" + assert_includes error, "peer: " + assert_includes error, "cert: :" + + end if ::Puma::HAS_SSL +end diff --git a/vendor/cache/puma-fba741b91780/test/test_minissl.rb b/vendor/cache/puma-fba741b91780/test/test_minissl.rb new file mode 100644 index 000000000..d27aa8fe8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_minissl.rb @@ -0,0 +1,94 @@ +require_relative "helper" + +require "puma/minissl" if ::Puma::HAS_SSL + +class TestMiniSSL < Minitest::Test + + if Puma.jruby? + def test_raises_with_invalid_keystore_file + ctx = Puma::MiniSSL::Context.new + + exception = assert_raises(ArgumentError) { ctx.keystore = "/no/such/keystore" } + assert_equal("Keystore file '/no/such/keystore' does not exist", exception.message) + end + + def test_raises_with_unreadable_keystore_file + ctx = Puma::MiniSSL::Context.new + + File.stub(:exist?, true) do + File.stub(:readable?, false) do + exception = assert_raises(ArgumentError) { ctx.keystore = "/unreadable/keystore" } + assert_equal("Keystore file '/unreadable/keystore' is not readable", exception.message) + end + end + end + else + def test_raises_with_invalid_key_file + ctx = Puma::MiniSSL::Context.new + + exception = assert_raises(ArgumentError) { ctx.key = "/no/such/key" } + assert_equal("Key file '/no/such/key' does not exist", exception.message) + end + + def test_raises_with_unreadable_key_file + ctx = Puma::MiniSSL::Context.new + + File.stub(:exist?, true) do + File.stub(:readable?, false) do + exception = assert_raises(ArgumentError) { ctx.key = "/unreadable/key" } + assert_equal("Key file '/unreadable/key' is not readable", exception.message) + end + end + end + + def test_raises_with_invalid_cert_file + ctx = Puma::MiniSSL::Context.new + + exception = assert_raises(ArgumentError) { ctx.cert = "/no/such/cert" } + assert_equal("Cert file '/no/such/cert' does not exist", exception.message) + end + + def test_raises_with_unreadable_cert_file + ctx = Puma::MiniSSL::Context.new + + File.stub(:exist?, true) do + File.stub(:readable?, false) do + exception = assert_raises(ArgumentError) { ctx.key = "/unreadable/cert" } + assert_equal("Key file '/unreadable/cert' is not readable", exception.message) + end + end + end + + def test_raises_with_invalid_key_pem + ctx = Puma::MiniSSL::Context.new + + exception = assert_raises(ArgumentError) { ctx.key_pem = nil } + assert_equal("'key_pem' is not a String", exception.message) + end + + def test_raises_with_unreadable_ca_file + ctx = Puma::MiniSSL::Context.new + + File.stub(:exist?, true) do + File.stub(:readable?, false) do + exception = assert_raises(ArgumentError) { ctx.ca = "/unreadable/cert" } + assert_equal("ca file '/unreadable/cert' is not readable", exception.message) + end + end + end + + def test_raises_with_invalid_cert_pem + ctx = Puma::MiniSSL::Context.new + + exception = assert_raises(ArgumentError) { ctx.cert_pem = nil } + assert_equal("'cert_pem' is not a String", exception.message) + end + + def test_raises_with_invalid_key_password_command + ctx = Puma::MiniSSL::Context.new + ctx.key_password_command = '/unreadable/decrypt_command' + + assert_raises(Errno::ENOENT) { ctx.key_password } + end + end +end if ::Puma::HAS_SSL diff --git a/vendor/cache/puma-fba741b91780/test/test_normalize.rb b/vendor/cache/puma-fba741b91780/test/test_normalize.rb new file mode 100644 index 000000000..60e61c3dd --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_normalize.rb @@ -0,0 +1,57 @@ +# frozen_string_literal: true + +require_relative "helper" + +require "puma/request" + +class TestNormalize < Minitest::Test + parallelize_me! + + include Puma::Request + + def test_comma_headers + env = { + "HTTP_X_FORWARDED_FOR" => "1.1.1.1", + "HTTP_X_FORWARDED,FOR" => "2.2.2.2", + } + + req_env_post_parse env + + expected = { + "HTTP_X_FORWARDED_FOR" => "1.1.1.1", + } + + assert_equal expected, env + + # Test that the iteration order doesn't matter + + env = { + "HTTP_X_FORWARDED,FOR" => "2.2.2.2", + "HTTP_X_FORWARDED_FOR" => "1.1.1.1", + } + + req_env_post_parse env + + expected = { + "HTTP_X_FORWARDED_FOR" => "1.1.1.1", + } + + assert_equal expected, env + end + + def test_unmaskable_headers + env = { + "HTTP_CONTENT,LENGTH" => "100000", + "HTTP_TRANSFER,ENCODING" => "chunky" + } + + req_env_post_parse env + + expected = { + "HTTP_CONTENT,LENGTH" => "100000", + "HTTP_TRANSFER,ENCODING" => "chunky" + } + + assert_equal expected, env + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_null_io.rb b/vendor/cache/puma-fba741b91780/test/test_null_io.rb new file mode 100644 index 000000000..c84739bec --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_null_io.rb @@ -0,0 +1,189 @@ +# frozen_string_literal: true + +require_relative "helper" + +require "puma/null_io" + +class TestNullIO < Minitest::Test + parallelize_me! + + attr_accessor :nio + + def setup + self.nio = Puma::NullIO.new + end + + def test_eof_returns_true + assert nio.eof? + end + + def test_gets_returns_nil + assert_nil nio.gets + end + + def test_string_returns_empty_string + assert_equal "", nio.string + end + + def test_each_never_yields + nio.instance_variable_set(:@foo, :baz) + nio.each { @foo = :bar } + assert_equal :baz, nio.instance_variable_get(:@foo) + end + + def test_read_with_no_arguments + assert_equal "", nio.read + end + + def test_read_with_nil_length + assert_equal "", nio.read(nil) + end + + def test_read_with_zero_length + assert_equal "", nio.read(0) + end + + def test_read_with_positive_integer_length + assert_nil nio.read(1) + end + + def test_read_with_negative_length + error = assert_raises ArgumentError do + nio.read(-42) + end + # 2nd match is TruffleRuby + assert_match(/negative length -42 given|length must not be negative/, error.message) + end + + def test_read_with_nil_buffer + assert_equal "", nio.read(nil, nil) + assert_equal "", nio.read(0, nil) + assert_nil nio.read(1, nil) + end + + class ImplicitString + def to_str + "ImplicitString".b + end + end + + def test_read_with_implicit_string_like_buffer + assert_equal "", nio.read(nil, ImplicitString.new) + end + + def test_read_with_invalid_buffer + error = assert_raises TypeError do + nio.read(nil, Object.new) + end + assert_includes error.message, "no implicit conversion of Object into String" + + error = assert_raises TypeError do + nio.read(0, Object.new) + end + + error = assert_raises TypeError do + nio.read(1, Object.new) + end + assert_includes error.message, "no implicit conversion of Object into String" + end + + def test_read_with_frozen_buffer + # Remove when Ruby 2.4 is no longer supported + err = defined? ::FrozenError ? ::FrozenError : ::RuntimeError + + assert_raises err do + nio.read(nil, "".freeze) + end + + assert_raises err do + nio.read(0, "".freeze) + end + + assert_raises err do + nio.read(20, "".freeze) + end + end + + def test_read_with_length_and_buffer + buf = "random_data".b + assert_nil nio.read(1, buf) + assert_equal "".b, buf + end + + def test_read_with_buffer + buf = "random_data".b + assert_same buf, nio.read(nil, buf) + assert_equal "", buf + end + + def test_size + assert_equal 0, nio.size + end + + def test_pos + assert_equal 0, nio.pos + end + + def test_seek_returns_0 + assert_equal 0, nio.seek(0) + assert_equal 0, nio.seek(100) + end + + def test_seek_negative_raises + error = assert_raises ArgumentError do + nio.read(-1) + end + + # TruffleRuby - length must not be negative + assert_match(/negative length -1 given|length must not be negative/, error.message) + end + + def test_sync_returns_true + assert_equal true, nio.sync + end + + def test_flush_returns_self + assert_equal nio, nio.flush + end + + def test_closed_returns_false + assert_equal false, nio.closed? + end + + def test_set_encoding + assert_equal nio, nio.set_encoding(Encoding::BINARY) + end + + def test_external_encoding + assert_equal Encoding::ASCII_8BIT, nio.external_encoding + end + + def test_binmode + assert_equal nio, nio.binmode + end + + def test_binmode? + assert nio.binmode? + end +end + +# Run the same tests but against an empty file to +# ensure all the test behavior is accurate +class TestNullIOConformance < TestNullIO + def setup + # client.rb sets 'binmode` on all Tempfiles + self.nio = ::Tempfile.create.binmode + nio.sync = true + end + + def teardown + return unless nio.is_a? ::File + nio.close + File.unlink nio.path + end + + def test_string_returns_empty_string + self.nio = StringIO.new + super + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_out_of_band_server.rb b/vendor/cache/puma-fba741b91780/test/test_out_of_band_server.rb new file mode 100644 index 000000000..5f20b02da --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_out_of_band_server.rb @@ -0,0 +1,169 @@ +require_relative "helper" + +class TestOutOfBandServer < Minitest::Test + parallelize_me! + + def setup + @ios = [] + @server = nil + @oob_finished = ConditionVariable.new + @app_finished = ConditionVariable.new + end + + def teardown + @oob_finished.broadcast + @app_finished.broadcast + @server&.stop true + + @ios.each do |io| + begin + io.close if io.is_a?(IO) && !io.closed? + rescue + ensure + io = nil + end + end + end + + def new_connection + TCPSocket.new('127.0.0.1', @port).tap {|s| @ios << s} + rescue IOError + Puma::Util.purge_interrupt_queue + retry + end + + def send_http(req) + new_connection << req + end + + def send_http_and_read(req) + send_http(req).read + end + + def oob_server(**options) + @request_count = 0 + @oob_count = 0 + in_oob = Mutex.new + @mutex = Mutex.new + oob_wait = options.delete(:oob_wait) + oob = -> do + in_oob.synchronize do + @mutex.synchronize do + @oob_count += 1 + @oob_finished.signal + @oob_finished.wait(@mutex, 1) if oob_wait + end + end + end + app_wait = options.delete(:app_wait) + app = ->(_) do + raise 'OOB conflict' if in_oob.locked? + @mutex.synchronize do + @request_count += 1 + @app_finished.signal + @app_finished.wait(@mutex, 1) if app_wait + end + [200, {}, [""]] + end + + options[:min_threads] ||= 1 + options[:max_threads] ||= 1 + options[:log_writer] ||= Puma::LogWriter.strings + + @server = Puma::Server.new app, nil, out_of_band: [oob], **options + @port = (@server.add_tcp_listener '127.0.0.1', 0).addr[1] + @server.run + sleep 0.15 if Puma.jruby? + end + + # Sequential requests should trigger out_of_band after every request. + def test_sequential + n = 100 + oob_server + n.times do + @mutex.synchronize do + send_http "GET / HTTP/1.0\r\n\r\n" + @oob_finished.wait(@mutex, 1) + end + end + assert_equal n, @request_count + assert_equal n, @oob_count + end + + # Stream of requests on concurrent connections should trigger + # out_of_band hooks only once after the final request. + def test_stream + oob_server app_wait: true, max_threads: 2 + n = 100 + Array.new(n) {send_http("GET / HTTP/1.0\r\n\r\n")} + Thread.pass until @request_count == n + @mutex.synchronize do + @app_finished.signal + @oob_finished.wait(@mutex, 1) + end + assert_equal n, @request_count + assert_equal 1, @oob_count + end + + # New requests should not get processed while OOB is running. + def test_request_overlapping_hook + oob_server oob_wait: true, max_threads: 2 + + # Establish connection for Req2 before OOB + req2 = new_connection + sleep 0.01 + + @mutex.synchronize do + send_http "GET / HTTP/1.0\r\n\r\n" + @oob_finished.wait(@mutex) # enter OOB + + # Send Req2 + req2 << "GET / HTTP/1.0\r\n\r\n" + # If Req2 is processed now it raises 'OOB Conflict' in the response. + sleep 0.01 + + @oob_finished.signal # exit OOB + # Req2 should be processed now. + @oob_finished.wait(@mutex, 1) # enter OOB + @oob_finished.signal # exit OOB + end + + refute_match(/OOB conflict/, req2.read) + end + + # Partial requests should not trigger OOB. + def test_partial_request + oob_server + new_connection.close + sleep 0.01 + assert_equal 0, @oob_count + end + + # OOB should be triggered following a completed request + # concurrent with other partial requests. + def test_partial_concurrent + oob_server max_threads: 2 + @mutex.synchronize do + send_http("GET / HTTP/1.0\r\n\r\n") + 100.times {new_connection.close} + @oob_finished.wait(@mutex, 1) + end + assert_equal 1, @oob_count + end + + # OOB should block new connections from being accepted. + def test_blocks_new_connection + oob_server oob_wait: true, max_threads: 2 + @mutex.synchronize do + send_http("GET / HTTP/1.0\r\n\r\n") + @oob_finished.wait(@mutex) + end + accepted = false + io = @server.binder.ios.last + io.stub(:accept_nonblock, -> {accepted = true; new_connection}) do + new_connection.close + sleep 0.01 + end + refute accepted, 'New connection accepted during out of band' + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_persistent.rb b/vendor/cache/puma-fba741b91780/test/test_persistent.rb new file mode 100644 index 000000000..b7fb4c80e --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_persistent.rb @@ -0,0 +1,240 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/test_puma/puma_socket" + +class TestPersistent < Minitest::Test + parallelize_me! + + include ::TestPuma::PumaSocket + + HOST = "127.0.0.1" + + def setup + @body = ["Hello"] + + @valid_request = "GET / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\n\r\n" + + @valid_response = <<~RESP.gsub("\n", "\r\n").rstrip + HTTP/1.1 200 OK + X-Header: Works + Content-Length: 5 + + Hello + RESP + + @close_request = "GET / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n" + @http10_request = "GET / HTTP/1.0\r\nHost: test.com\r\nContent-Type: text/plain\r\n\r\n" + @keep_request = "GET / HTTP/1.0\r\nHost: test.com\r\nContent-Type: text/plain\r\nConnection: Keep-Alive\r\n\r\n" + + @valid_post = "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 5\r\n\r\nhello" + @valid_no_body = "GET / HTTP/1.1\r\nHost: test.com\r\nX-Status: 204\r\nContent-Type: text/plain\r\n\r\n" + + @headers = { "X-Header" => "Works" } + @inputs = [] + + @simple = lambda do |env| + @inputs << env['rack.input'] + status = Integer(env['HTTP_X_STATUS'] || 200) + [status, @headers, @body] + end + + opts = {min_thread: 1, max_threads: 1} + @server = Puma::Server.new @simple, nil, opts + @bind_port = (@server.add_tcp_listener HOST, 0).addr[1] + @server.run + sleep 0.15 if Puma.jruby? + end + + def teardown + @server.stop(true) + end + + def test_one_with_content_length + response = send_http_read_response @valid_request + + assert_equal @valid_response, response + end + + def test_two_back_to_back + socket = send_http @valid_request + response = socket.read_response + + assert_equal @valid_response, response + + response = socket.req_write(@valid_request).read_response + + assert_equal @valid_response, response + end + + def test_post_then_get + socket = send_http @valid_post + response = socket.read_response + + expected = <<~RESP.gsub("\n", "\r\n").rstrip + HTTP/1.1 200 OK + X-Header: Works + Content-Length: 5 + + Hello + RESP + + assert_equal expected, response + + response = socket.req_write(@valid_request).read_response + + assert_equal @valid_response, response + end + + def test_no_body_then_get + socket = send_http @valid_no_body + response = socket.read_response + assert_equal "HTTP/1.1 204 No Content\r\nX-Header: Works\r\n\r\n", response + + response = socket.req_write(@valid_request).read_response + + assert_equal @valid_response, response + end + + def test_chunked + @body << "Chunked" + @body = @body.to_enum + + response = send_http_read_response @valid_request + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nTransfer-Encoding: chunked\r\n\r\n" \ + "5\r\nHello\r\n7\r\nChunked\r\n0\r\n\r\n", response + end + + def test_chunked_with_empty_part + @body << "" + @body << "Chunked" + @body = @body.to_enum + + response = send_http_read_response @valid_request + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nTransfer-Encoding: chunked\r\n\r\n" \ + "5\r\nHello\r\n7\r\nChunked\r\n0\r\n\r\n", response + end + + def test_no_chunked_in_http10 + @body << "Chunked" + @body = @body.to_enum + + socket = send_http GET_10 + + sleep 0.01 if ::Puma::IS_JRUBY + + response = socket.read_response + + assert_equal "HTTP/1.0 200 OK\r\nX-Header: Works\r\n\r\n" \ + "HelloChunked", response + end + + def test_hex + str = "This is longer and will be in hex" + @body << str + @body = @body.to_enum + + response = send_http_read_response @valid_request + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nTransfer-Encoding: chunked\r\n\r\n" \ + "5\r\nHello\r\n#{str.size.to_s(16)}\r\n#{str}\r\n0\r\n\r\n", response + end + + def test_client11_close + response = send_http_read_response @close_request + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nConnection: close\r\nContent-Length: 5\r\n\r\n" \ + "Hello", response + end + + def test_client10_close + response = send_http_read_response GET_10 + + assert_equal "HTTP/1.0 200 OK\r\nX-Header: Works\r\nContent-Length: 5\r\n\r\n" \ + "Hello", response + end + + def test_one_with_keep_alive_header + response = send_http_read_response @keep_request + + assert_equal "HTTP/1.0 200 OK\r\nX-Header: Works\r\nConnection: Keep-Alive\r\nContent-Length: 5\r\n\r\n" \ + "Hello", response + end + + def test_persistent_timeout + @server.instance_variable_set(:@persistent_timeout, 1) + + socket = send_http @valid_request + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nContent-Length: 5\r\n\r\n" \ + "Hello", response + + sleep 2 + + assert_raises EOFError do + socket.read_nonblock(1) + end + end + + def test_app_sets_content_length + @body = ["hello", " world"] + @headers['Content-Length'] = "11" + + response = send_http_read_response @valid_request + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nContent-Length: 11\r\n\r\n" \ + "hello world", response + end + + def test_allow_app_to_chunk_itself + @headers = {'Transfer-Encoding' => "chunked" } + + @body = ["5\r\nhello\r\n0\r\n\r\n"] + + response = send_http_read_response @valid_request + + assert_equal "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n" \ + "5\r\nhello\r\n0\r\n\r\n", response + end + + def test_two_requests_in_one_chunk + @server.instance_variable_set(:@persistent_timeout, 3) + + req = @valid_request.to_s + req += "GET /second HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\n\r\n" + + response = send_http_read_all req + + assert_equal @valid_response * 2, response + end + + def test_second_request_not_in_first_req_body + @server.instance_variable_set(:@persistent_timeout, 3) + + req = @valid_request.to_s + req += "GET /second HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\n\r\n" + + response = send_http_read_all req + + assert_equal @valid_response * 2, response + + assert_kind_of Puma::NullIO, @inputs[0] + assert_kind_of Puma::NullIO, @inputs[1] + end + + def test_keepalive_doesnt_starve_clients + send_http @valid_request + + c2 = send_http @valid_request + + assert c2.wait_readable(1), "2nd request starved" + + response = c2.read_response + + assert_equal "HTTP/1.1 200 OK\r\nX-Header: Works\r\nContent-Length: 5\r\n\r\n" \ + "Hello", response + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_plugin.rb b/vendor/cache/puma-fba741b91780/test/test_plugin.rb new file mode 100644 index 000000000..fd36cea55 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_plugin.rb @@ -0,0 +1,27 @@ +require_relative "helper" +require_relative "helpers/integration" + +class TestPlugin < TestIntegration + def test_plugin + skip "Skipped on Windows Ruby < 2.5.0, Ruby bug" if windows? && RUBY_VERSION < '2.5.0' + @control_tcp_port = UniquePort.call + + Dir.mkdir("tmp") unless Dir.exist?("tmp") + + cli_server "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN} test/rackup/hello.ru", + config: "plugin 'tmp_restart'" + + File.open('tmp/restart.txt', mode: 'wb') { |f| f.puts "Restart #{Time.now}" } + + assert wait_for_server_to_include('Restarting...') + + assert wait_for_server_to_boot + + cli_pumactl "stop" + + assert wait_for_server_to_include('Goodbye') + + @server.close + @server = nil + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_plugin_systemd.rb b/vendor/cache/puma-fba741b91780/test/test_plugin_systemd.rb new file mode 100644 index 000000000..c3259b2ab --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_plugin_systemd.rb @@ -0,0 +1,120 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/integration" + +class TestPluginSystemd < TestIntegration + parallelize_me! if ::Puma.mri? + + THREAD_LOG = TRUFFLE ? "{ 0/16 threads, 16 available, 0 backlog }" : + "{ 0/5 threads, 5 available, 0 backlog }" + + def setup + skip_unless :linux + skip_unless :unix + skip_unless_signal_exist? :TERM + skip_if :jruby + + super + + ::Dir::Tmpname.create("puma_socket") do |sockaddr| + @sockaddr = sockaddr + @socket = Socket.new(:UNIX, :DGRAM, 0) + socket_ai = Addrinfo.unix(sockaddr) + @socket.bind(socket_ai) + @env = {"NOTIFY_SOCKET" => sockaddr } + end + end + + def teardown + return if skipped? + @socket&.close + File.unlink(@sockaddr) if @sockaddr + @socket = nil + @sockaddr = nil + end + + def test_systemd_notify_usr1_phased_restart_cluster + skip_unless :fork + assert_restarts_with_systemd :USR1 + end + + def test_systemd_notify_usr2_hot_restart_cluster + skip_unless :fork + assert_restarts_with_systemd :USR2 + end + + def test_systemd_notify_usr2_hot_restart_single + assert_restarts_with_systemd :USR2, workers: 0 + end + + def test_systemd_watchdog + wd_env = @env.merge({"WATCHDOG_USEC" => "1_000_000"}) + cli_server "test/rackup/hello.ru", env: wd_env + assert_message "READY=1" + + assert_message "WATCHDOG=1" + + stop_server + assert_message "STOPPING=1" + end + + def test_systemd_notify + cli_server "test/rackup/hello.ru", env: @env + assert_message "READY=1" + + assert_message "STATUS=Puma #{Puma::Const::VERSION}: worker: #{THREAD_LOG}" + + stop_server + assert_message "STOPPING=1" + end + + def test_systemd_cluster_notify + skip_unless :fork + cli_server "-w2 test/rackup/hello.ru", env: @env + assert_message "READY=1" + + assert_message( + "STATUS=Puma #{Puma::Const::VERSION}: cluster: 2/2, worker_status: [#{THREAD_LOG},#{THREAD_LOG}]") + + stop_server + assert_message "STOPPING=1" + end + + private + + def assert_restarts_with_systemd(signal, workers: 2) + skip_unless(:fork) unless workers.zero? + cli_server "-w#{workers} test/rackup/hello.ru", env: @env + get_worker_pids(0, workers) if workers == 2 + assert_message 'READY=1' + + phase_ary = signal == :USR1 ? [1,2] : [0,0] + + Process.kill signal, @pid + get_worker_pids(phase_ary[0], workers) if workers == 2 + assert_message 'RELOADING=1' + assert_message 'READY=1' + + Process.kill signal, @pid + get_worker_pids(phase_ary[1], workers) if workers == 2 + assert_message 'RELOADING=1' + assert_message 'READY=1' + + stop_server + assert_message 'STOPPING=1' + end + + def assert_message(msg) + @socket.wait_readable 1 + read = @socket.sysread(msg.bytesize) + # below is kind of hacky, but seems to work correctly when slow CI systems + # write partial status messages + if read.start_with?('STATUS=') && !msg.start_with?('STATUS=') + read << @socket.sysread(512) while @socket.wait_readable(1) && !read.end_with?(msg) + assert_end_with read, msg + else + assert_equal msg, read + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_plugin_systemd_jruby.rb b/vendor/cache/puma-fba741b91780/test/test_plugin_systemd_jruby.rb new file mode 100644 index 000000000..af5a0a7e5 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_plugin_systemd_jruby.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/integration" + +require "puma/plugin" + +class TestPluginSystemdJruby < TestIntegration + + def setup + skip_unless :linux + skip_unless :unix + skip_unless_signal_exist? :TERM + skip_unless :jruby + + super + end + + def teardown + super unless skipped? + end + + def test_systemd_plugin_not_loaded + cli_server "test/rackup/hello.ru", + env: {'NOTIFY_SOCKET' => '/tmp/doesntmatter' }, config: <<~CONFIG + app do |_| + [200, {}, [Puma::Plugins.instance_variable_get(:@plugins)['systemd'].to_s]] + end + CONFIG + + assert_empty read_body(connect) + + stop_server + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_preserve_bundler_env.rb b/vendor/cache/puma-fba741b91780/test/test_preserve_bundler_env.rb new file mode 100644 index 000000000..d27b064df --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_preserve_bundler_env.rb @@ -0,0 +1,104 @@ +require_relative "helper" +require_relative "helpers/integration" + +class TestPreserveBundlerEnv < TestIntegration + def setup + skip_unless :fork + super + end + + def teardown + return if skipped? + FileUtils.rm current_release_symlink, force: true + super + end + + # It does not wipe out BUNDLE_GEMFILE et al + def test_usr2_restart_preserves_bundler_environment + skip_unless_signal_exist? :USR2 + + env = { + # Intentionally set this to something we wish to keep intact on restarts + "BUNDLE_GEMFILE" => "Gemfile.bundle_env_preservation_test", + # Don't allow our (rake test's) original env to interfere with the child process + "BUNDLER_ORIG_BUNDLE_GEMFILE" => nil + } + # Must use `bundle exec puma` here, because otherwise Bundler may not be defined, which is required to trigger the bug + cmd = "-w 2 --prune-bundler" + replies = ['', ''] + + Dir.chdir(File.expand_path("bundle_preservation_test", __dir__)) do + replies = restart_server_and_listen cmd, env: env + end + match = "Gemfile.bundle_env_preservation_test" + + assert_match(match, replies[0]) + assert_match(match, replies[1]) + end + + def test_worker_forking_preserves_bundler_config_path + skip_unless_signal_exist? :TERM + + @tcp_port = UniquePort.call + env = { + # Disable the .bundle/config file in the bundle_app_config_test directory + "BUNDLE_APP_CONFIG" => "/dev/null", + # Don't allow our (rake test's) original env to interfere with the child process + "BUNDLE_GEMFILE" => nil, + "BUNDLER_ORIG_BUNDLE_GEMFILE" => nil + } + cmd = "-q -w 1 --prune-bundler" + Dir.chdir File.expand_path("bundle_app_config_test", __dir__) do + cli_server cmd, env: env + end + + reply = read_body(connect) + assert_equal("Hello World", reply) + end + + def test_phased_restart_preserves_unspecified_bundle_gemfile + skip_unless_signal_exist? :USR1 + + @tcp_port = UniquePort.call + env = { + "BUNDLE_GEMFILE" => nil, + "BUNDLER_ORIG_BUNDLE_GEMFILE" => nil + } + set_release_symlink File.expand_path("bundle_preservation_test/version1", __dir__) + cmd = "-q -w 1 --prune-bundler" + Dir.chdir(current_release_symlink) do + cli_server cmd, env: env + end + connection = connect + + # Bundler itself sets ENV['BUNDLE_GEMFILE'] to the Gemfile it finds if ENV['BUNDLE_GEMFILE'] was unspecified + initial_reply = read_body(connection) + expected_gemfile = File.expand_path("bundle_preservation_test/version1/Gemfile", __dir__).inspect + assert_equal(expected_gemfile, initial_reply) + + set_release_symlink File.expand_path("bundle_preservation_test/version2", __dir__) + start_phased_restart + + connection = connect + new_reply = read_body(connection) + expected_gemfile = File.expand_path("bundle_preservation_test/version2/Gemfile", __dir__).inspect + assert_equal(expected_gemfile, new_reply) + end + + private + + def current_release_symlink + File.expand_path "bundle_preservation_test/current", __dir__ + end + + def set_release_symlink(target_dir) + FileUtils.rm current_release_symlink, force: true + FileUtils.symlink target_dir, current_release_symlink, force: true + end + + def start_phased_restart + Process.kill :USR1, @pid + + true while @server.gets !~ /booted in [.0-9]+s, phase: 1/ + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_puma_localhost_authority.rb b/vendor/cache/puma-fba741b91780/test/test_puma_localhost_authority.rb new file mode 100644 index 000000000..bac39a1b0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_puma_localhost_authority.rb @@ -0,0 +1,76 @@ +# Nothing in this file runs if Puma isn't compiled with ssl support +# +# helper is required first since it loads Puma, which needs to be +# loaded so HAS_SSL is defined +require_relative "helper" +require "localhost/authority" + +if ::Puma::HAS_SSL && !Puma::IS_JRUBY + require "puma/minissl" + require_relative "helpers/test_puma/puma_socket" + require "openssl" unless Object.const_defined? :OpenSSL +end + +class TestPumaLocalhostAuthority < Minitest::Test + include TestPuma + include TestPuma::PumaSocket + + def setup + @server = nil + end + + def teardown + @server&.stop true + end + + # yields ctx to block, use for ctx setup & configuration + def start_server + app = lambda { |env| [200, {}, [env['rack.url_scheme']]] } + + @log_writer = SSLLogWriterHelper.new STDOUT, STDERR + @server = Puma::Server.new app, nil, {log_writer: @log_writer} + @server.add_ssl_listener LOCALHOST, 0, nil + @bind_port = @server.connected_ports[0] + @server.run + end + + def test_localhost_authority_file_generated + # Initiate server to create localhost authority + unless File.exist?(File.join(Localhost::Authority.path,"localhost.key")) + start_server + end + assert_equal(File.exist?(File.join(Localhost::Authority.path,"localhost.key")), true) + assert_equal(File.exist?(File.join(Localhost::Authority.path,"localhost.crt")), true) + end + +end if ::Puma::HAS_SSL && !Puma::IS_JRUBY + +class TestPumaSSLLocalhostAuthority < Minitest::Test + include TestPuma + include TestPuma::PumaSocket + + def test_self_signed_by_localhost_authority + app = lambda { |env| [200, {}, [env['rack.url_scheme']]] } + + @log_writer = SSLLogWriterHelper.new STDOUT, STDERR + + @server = Puma::Server.new app, nil, {log_writer: @log_writer} + @server.app = app + + @server.add_ssl_listener LOCALHOST, 0, nil + @bind_port = @server.connected_ports[0] + + local_authority_crt = OpenSSL::X509::Certificate.new File.read(File.join(Localhost::Authority.path,"localhost.crt")) + + @server.run + cert = nil + begin + cert = send_http(host: LOCALHOST, ctx: new_ctx).peer_cert + rescue OpenSSL::SSL::SSLError, EOFError, Errno::ECONNRESET + # Errno::ECONNRESET TruffleRuby + end + sleep 0.1 + + assert_equal(cert.to_pem, local_authority_crt.to_pem) + end +end if ::Puma::HAS_SSL && !Puma::IS_JRUBY diff --git a/vendor/cache/puma-fba741b91780/test/test_puma_server.rb b/vendor/cache/puma-fba741b91780/test/test_puma_server.rb new file mode 100644 index 000000000..2277f8485 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_puma_server.rb @@ -0,0 +1,2101 @@ +require_relative "helper" +require_relative "helpers/test_puma/puma_socket" +require "puma/events" +require "puma/server" +require "nio" +require "ipaddr" + +class WithoutBacktraceError < StandardError + def backtrace; nil; end + def message; "no backtrace error"; end +end + +class TestPumaServer < Minitest::Test + parallelize_me! + + include TestPuma + include TestPuma::PumaSocket + + STATUS_CODES = ::Puma::HTTP_STATUS_CODES + + HOST = HOST4 + + def setup + @host = HOST + @app = ->(env) { [200, {}, [env['rack.url_scheme']]] } + + @log_writer = Puma::LogWriter.strings + @events = Puma::Events.new + @server = Puma::Server.new @app, @events, {log_writer: @log_writer} + end + + def teardown + @server.stop(true) + # Errno::EBADF raised on macOS + end + + def server_run(**options, &block) + options[:log_writer] ||= @log_writer + options[:min_threads] ||= 1 + @server = Puma::Server.new block || @app, @events, options + @bind_port = (@server.add_tcp_listener @host, 0).addr[1] + @server.run + end + + def test_http10_req_to_http10_resp + server_run do |env| + [200, {}, [env["SERVER_PROTOCOL"]]] + end + response = send_http_read_response GET_10 + assert_equal "HTTP/1.0 200 OK", response.status + assert_equal "HTTP/1.0" , response.body + end + def test_http11_req_to_http11_resp + server_run do |env| + [200, {}, [env["SERVER_PROTOCOL"]]] + end + response = send_http_read_response GET_11 + assert_equal "HTTP/1.1 200 OK", response.status + assert_equal "HTTP/1.1" , response.body + end + + def test_normalize_host_header_missing + server_run do |env| + [200, {}, [env["SERVER_NAME"], "\n", env["SERVER_PORT"]]] + end + + body = send_http_read_resp_body GET_10 + assert_equal "localhost\n80", body + end + + def test_normalize_host_header_hostname + server_run do |env| + [200, {}, [env["SERVER_NAME"], "\n", env["SERVER_PORT"]]] + end + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: example.com:456\r\n\r\n" + assert_equal "example.com\n456", body + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: example.com\r\n\r\n" + assert_equal "example.com\n80", body + end + + def test_normalize_host_header_ipv4 + server_run do |env| + [200, {}, [env["SERVER_NAME"], "\n", env["SERVER_PORT"]]] + end + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: 123.123.123.123:456\r\n\r\n" + assert_equal "123.123.123.123\n456", body + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: 123.123.123.123\r\n\r\n" + assert_equal "123.123.123.123\n80", body + end + + def test_normalize_host_header_ipv6 + server_run do |env| + [200, {}, [env["SERVER_NAME"], "\n", env["SERVER_PORT"]]] + end + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: [::ffff:127.0.0.1]:9292\r\n\r\n" + assert_equal "[::ffff:127.0.0.1]\n9292", body + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: [::1]:9292\r\n\r\n" + assert_equal "[::1]\n9292", body + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nHost: [::1]\r\n\r\n" + assert_equal "[::1]\n80", body + end + + def test_streaming_body + server_run do |env| + body = lambda do |stream| + stream.write("Hello World") + stream.close + end + + [200, {}, body] + end + + body = send_http_read_resp_body "GET / HTTP/1.0\r\nConnection: close\r\n\r\n" + + assert_equal "Hello World", body + end + + def test_file_body + random_bytes = SecureRandom.random_bytes(4096 * 32) + + tf = tempfile_create("test_file_body", random_bytes) + + server_run { |env| [200, {}, tf] } + + body = send_http_read_resp_body "GET / HTTP/1.1\r\nHost: [::ffff:127.0.0.1]:#{@bind_port}\r\n\r\n" + + assert_equal random_bytes.bytesize, body.bytesize + assert_equal random_bytes, body + ensure + tf&.close + end + + def test_file_to_path + random_bytes = SecureRandom.random_bytes(4096 * 32) + + tf = tempfile_create("test_file_to_path", random_bytes) + path = tf.path + + obj = Object.new + obj.singleton_class.send(:define_method, :to_path) { path } + obj.singleton_class.send(:define_method, :each) { path } # dummy, method needs to exist + + server_run { |env| [200, {}, obj] } + + body = send_http_read_resp_body + + assert_equal random_bytes.bytesize, body.bytesize + assert_equal random_bytes, body + ensure + tf&.close + end + + def test_proper_stringio_body + data = nil + + server_run do |env| + data = env['rack.input'].read + [200, {}, ["ok"]] + end + + fifteen = "1" * 15 + + socket = send_http "PUT / HTTP/1.0\r\nContent-Length: 30\r\n\r\n#{fifteen}" + + sleep 0.1 # important so that the previous data is sent as a packet + socket << fifteen + + socket.read_response + + assert_equal "#{fifteen}#{fifteen}", data + end + + def test_puma_socket + body = "HTTP/1.1 750 Upgraded to Awesome\r\nDone: Yep!\r\n" + server_run do |env| + io = env['puma.socket'] + io.write body + io.close + [-1, {}, []] + end + + data = send_http_read_response "PUT / HTTP/1.0\r\n\r\nHello" + + assert_equal body, data + end + + def test_very_large_return + giant = "x" * 2056610 + + server_run do + [200, {}, [giant]] + end + + body = send_http_read_resp_body GET_10 + + assert_equal giant.bytesize, body.bytesize + end + + def test_respect_x_forwarded_proto + env = {} + env['HOST'] = "example.com" + env['HTTP_X_FORWARDED_PROTO'] = "https,http" + + assert_equal "443", @server.default_server_port(env) + end + + def test_respect_x_forwarded_ssl_on + env = {} + env['HOST'] = 'example.com' + env['HTTP_X_FORWARDED_SSL'] = 'on' + + assert_equal "443", @server.default_server_port(env) + end + + def test_respect_x_forwarded_scheme + env = {} + env['HOST'] = 'example.com' + env['HTTP_X_FORWARDED_SCHEME'] = 'https' + + assert_equal '443', @server.default_server_port(env) + end + + def test_default_server_port + server_run do |env| + [200, {}, [env['SERVER_PORT']]] + end + + req = "GET / HTTP/1.0\r\nHost: example.com\r\n\r\n" + + body = send_http_read_resp_body req + + assert_equal "80", body + end + + def test_default_server_port_respects_x_forwarded_proto + server_run do |env| + [200, {}, [env['SERVER_PORT']]] + end + + req = "GET / HTTP/1.0\r\nHost: example.com\r\nx-forwarded-proto: https,http\r\n\r\n" + + body = send_http_read_resp_body req + + assert_equal "443", body + end + + def test_HEAD_has_no_body + server_run { [200, {"Foo" => "Bar"}, ["hello"]] } + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nFoo: Bar\r\nContent-Length: 5\r\n\r\n", response + end + + def test_GET_with_empty_body_has_sane_chunking + server_run { [200, {}, [""]] } + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nContent-Length: 0\r\n\r\n", response + end + + def test_back_to_back_no_content + bodies = [] + server_run { |e| + bodies << e['rack.input'].read + [200, {}, ["ok #{bodies.size}"]] + } + + data = send_http_read_all( + "GET / HTTP/1.1\r\nHost: a\r\nContent-Length: 0\r\n\r\n" \ + "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + ) + + assert_equal( + "HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nok 1" \ + "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 4\r\n\r\nok 2", data + ) + assert_equal ["", ""], bodies + end + + def test_back_to_back_content + bodies = [] + server_run { |e| + bodies << e['rack.input'].read + [200, {}, ["ok #{bodies.size}"]] + } + + data = send_http_read_all( + "GET / HTTP/1.1\r\nHost: a\r\nContent-Length: 1\r\n\r\na" \ + "GET / HTTP/1.1\r\nContent-Length: 0\r\n\r\n" + ) + + assert_equal( + "HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nok 1" \ + "HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nok 2", data + ) + assert_equal ["a", ""], bodies + end + + def test_back_to_back_chunked + server_run { |env| + [200, {'Content-Length' => env['CONTENT_LENGTH']}, [env['rack.input'].read]] + } + + socket = send_http( + "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" \ + "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n4\r\ngood\r\n3\r\nbye\r\n0\r\n\r\n" + ) + + sleep 0.05 # let both requests be processed? + + data = socket.sysread 1_024 + + assert_equal( + "HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello" \ + "HTTP/1.1 200 OK\r\nContent-Length: 7\r\n\r\ngoodbye", data + ) + + socket << "GET / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nH\r\n4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nHello", response + end + + def test_early_hints_works + server_run(early_hints: true) do |env| + env['rack.early_hints'].call("Link" => "; rel=preload; as=style\n; rel=preload") + [200, { "X-Hello" => "World" }, ["Hello world!"]] + end + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + expected_resp = <<~EOF.gsub("\n", "\r\n") + "\r\n" + HTTP/1.1 103 Early Hints + Link: ; rel=preload; as=style + Link: ; rel=preload + + HTTP/1.0 200 OK + X-Hello: World + Content-Length: 12 + EOF + + assert_equal true, @server.early_hints + assert_equal expected_resp, response + end + + def test_early_hints_are_ignored_if_connection_lost + + server_run(early_hints: true) do |env| + env['rack.early_hints'].call("Link" => "; rel=preload") + [200, { "X-Hello" => "World" }, ["Hello world!"]] + end + + def @server.fast_write(*args) + raise Puma::ConnectionError + end + + # This request will cause the server to try and send early hints + _ = send_http "HEAD / HTTP/1.0\r\n\r\n" + + # Give the server some time to try to write (and fail) + sleep 0.1 + + # Expect no errors in stderr + assert @log_writer.stderr.pos.zero?, "Server didn't swallow the connection error" + end + + def test_early_hints_is_off_by_default + server_run do |env| + assert_nil env['rack.early_hints'] + [200, { "X-Hello" => "World" }, ["Hello world!"]] + end + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + expected_resp = <<~EOF.gsub("\n", "\r\n") + "\r\n" + HTTP/1.0 200 OK + X-Hello: World + Content-Length: 12 + EOF + + assert_nil @server.early_hints + assert_equal expected_resp, response + end + + def test_request_payload_too_large + server_run(http_content_length_limit: 10) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 19\r\n\r\n" + socket << "hello world foo bar" + + response = socket.read_response + + # Content Too Large + assert_equal "HTTP/1.1 413 #{STATUS_CODES[413]}", response.status + end + + def test_http_11_keep_alive_with_large_payload + server_run(http_content_length_limit: 10) { [204, {}, []] } + + socket = send_http "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\nContent-Length: 17\r\n\r\n" + socket << "hello world foo bar" + + response = socket.read_response + + # Content Too Large + assert_equal "HTTP/1.1 413 #{STATUS_CODES[413]}", response.status + assert_equal ["Content-Length: 17"], response.headers + end + + def test_GET_with_no_body_has_sane_chunking + server_run { [200, {}, []] } + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nContent-Length: 0\r\n\r\n", response + end + + def test_doesnt_print_backtrace_in_production + server_run(environment: :production) { raise "don't leak me bro" } + + response = send_http_read_response GET_10 + + refute_match(/don't leak me bro/, response) + assert_equal 'HTTP/1.0 500 Internal Server Error', response.status + end + + def test_eof_on_connection_close_is_not_logged_as_an_error + server_run + + new_socket.close # Make a connection and close without writing + + @server.stop(true) + stderr = @log_writer.stderr.string + assert stderr.empty?, "Expected stderr from server to be empty but it was #{stderr.inspect}" + end + + def test_force_shutdown_custom_error_message + handler = lambda {|err, env, status| [500, {"Content-Type" => "application/json"}, ["{}\n"]]} + server_run(lowlevel_error_handler: handler, force_shutdown_after: 2) do + @server.stop + sleep 5 + end + + response = send_http_read_response GET_10 + + assert_equal 'HTTP/1.0 500 Internal Server Error', response.status + assert_match(/Content-Type: application\/json/, response) + assert_match(/{}\n$/, response) + end + + class ArrayClose < Array + attr_reader :is_closed + def closed? + @is_closed + end + + def close + @is_closed = true + end + end + + # returns status as an array, which throws lowlevel error + def test_lowlevel_error_body_close + app_body = ArrayClose.new(['lowlevel_error']) + + server_run(log_writer: @log_writer, :force_shutdown_after => 2) do + [[0,1], {}, app_body] + end + + response = send_http_read_response "GET / HTTP/1.0\r\n\r\n" + + assert_start_with response, 'HTTP/1.0 500 Internal Server Error' + assert_match(/Puma caught this error: undefined method [`']to_i' for/, response) + assert_includes response, "Array" + refute_includes response, 'lowlevel_error' + sleep 0.1 unless ::Puma::IS_MRI + assert app_body.closed? + end + + def test_lowlevel_error_message + server_run(log_writer: @log_writer, :force_shutdown_after => 2) do + raise NoMethodError, "Oh no an error" + end + + response = send_http_read_response GET_10 + + # Internal Server Error + assert_equal "HTTP/1.0 500 #{STATUS_CODES[500]}", response.status + assert_match(/Puma caught this error: Oh no an error.*\(NoMethodError\).*test\/test_puma_server.rb/m, response) + end + + def test_lowlevel_error_message_without_backtrace + server_run(log_writer: @log_writer, :force_shutdown_after => 2) do + raise WithoutBacktraceError.new + end + + response = send_http_read_response GET_11 + # Internal Server Error + assert_equal "HTTP/1.1 500 #{STATUS_CODES[500]}", response.status + assert_includes response, 'Puma caught this error: no backtrace error (WithoutBacktraceError)' + assert_includes response, '' + end + + def test_force_shutdown_error_default + server_run(force_shutdown_after: 2) do + @server.stop + sleep 5 + end + + response = send_http_read_response GET_10 + + assert_equal 'HTTP/1.0 503 Service Unavailable', response.status + assert_match(/Puma caught this error.+Puma::ThreadPool::ForceShutdown/, response) + end + + def test_prints_custom_error + re = lambda { |err| [302, {'Content-Type' => 'text', 'Location' => 'foo.html'}, ['302 found']] } + server_run(lowlevel_error_handler: re) { raise "don't leak me bro" } + + response = send_http_read_response GET_10 + + assert_equal 'HTTP/1.0 302 Found', response.status + end + + def test_leh_gets_env_as_well + re = lambda { |err,env| + env['REQUEST_PATH'] || raise('where is env?') + [302, {'Content-Type' => 'text', 'Location' => 'foo.html'}, ['302 found']] + } + + server_run(lowlevel_error_handler: re) { raise "don't leak me bro" } + + response = send_http_read_response GET_10 + + assert_equal 'HTTP/1.0 302 Found', response.status + end + + def test_leh_has_status + re = lambda { |err, env, status| + raise "Cannot find status" unless status + [302, {'Content-Type' => 'text', 'Location' => 'foo.html'}, ['302 found']] + } + + server_run(lowlevel_error_handler: re) { raise "don't leak me bro" } + + response = send_http_read_response GET_10 + + assert_equal 'HTTP/1.0 302 Found', response.status + end + + def test_custom_http_codes_10 + server_run { [449, {}, [""]] } + + response = send_http_read_response GET_10 + + assert_equal "HTTP/1.0 449 CUSTOM\r\nContent-Length: 0\r\n\r\n", response + end + + def test_custom_http_codes_11 + server_run { [449, {}, [""]] } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + + assert_equal "HTTP/1.1 449 CUSTOM\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + end + + def test_HEAD_returns_content_headers + server_run { [200, {"Content-Type" => "application/pdf", + "Content-Length" => "4242"}, []] } + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nContent-Type: application/pdf\r\nContent-Length: 4242\r\n\r\n", response + end + + def test_status_hook_fires_when_server_changes_states + + states = [] + + @events.register(:state) { |s| states << s } + + server_run { [200, {}, [""]] } + + _ = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_equal [:booting, :running], states + + @server.stop(true) + + assert_equal [:booting, :running, :stop, :done], states + end + + def test_timeout_in_data_phase(**options) + server_run(first_data_timeout: 1, **options) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 5\r\n\r\n" + + socket << "Hello" unless socket.wait_readable(1.15) + + response = socket.read_response + + # Request Timeout + assert_equal "HTTP/1.1 408 #{STATUS_CODES[408]}", response.status + end + + def test_timeout_data_no_queue + test_timeout_in_data_phase(queue_requests: false) + end + + # https://github.com/puma/puma/issues/2574 + def test_no_timeout_after_data_received + @server.instance_variable_set(:@first_data_timeout, 1) + server_run + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 11\r\n\r\n" + sleep 0.5 + + socket << "hello" + sleep 0.5 + socket << "world" + sleep 0.5 + socket << "!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + end + + def test_no_timeout_after_data_received_no_queue + @server = Puma::Server.new @app, @events, {log_writer: @log_writer, queue_requests: false} + test_no_timeout_after_data_received + end + + def test_idle_timeout_before_first_request + server_run(idle_timeout: 1) + + sleep 1.15 + + assert @server.shutting_down? + + assert_raises Errno::ECONNREFUSED do + send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + end + end + + def test_idle_timeout_before_first_request_data + server_run(idle_timeout: 1) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + sleep 1.15 + + socket << "hello world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + end + + def test_idle_timeout_between_first_request_data + server_run(idle_timeout: 1) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + socket << "hello" + + sleep 1.15 + + socket << " world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + end + + def test_idle_timeout_after_first_request + server_run(idle_timeout: 1) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + socket << "hello world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + + sleep 1.15 + + assert @server.shutting_down? + + assert socket.wait_readable(1), 'Unexpected timeout' + assert_raises Errno::ECONNREFUSED do + send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + end + end + + def test_idle_timeout_between_request_data + server_run(idle_timeout: 1) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + socket << "hello world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + + sleep 0.5 + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + socket << "hello" + + sleep 1.15 + + socket << " world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + + sleep 1.15 + + assert @server.shutting_down? + + assert socket.wait_readable(1), 'Unexpected timeout' + assert_raises Errno::ECONNREFUSED do + send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + end + end + + def test_idle_timeout_between_requests + server_run(idle_timeout: 1) + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + socket << "hello world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + + sleep 0.5 + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + + socket << "hello world!" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + + sleep 1.15 + + assert @server.shutting_down? + + assert socket.wait_readable(1), 'Unexpected timeout' + assert_raises Errno::ECONNREFUSED do + send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 12\r\n\r\n" + end + end + + def test_http_11_keep_alive_with_body + server_run { [200, {"Content-Type" => "plain/text"}, ["hello\n"]] } + + req = "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n" + response = send_http_read_response req + + assert_equal ["Content-Type: plain/text", "Content-Length: 6"], response.headers + assert_equal "hello\n", response.body + end + + def test_http_11_close_with_body + server_run { [200, {"Content-Type" => "plain/text"}, ["hello"]] } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + + assert_equal "HTTP/1.1 200 OK\r\nContent-Type: plain/text\r\nConnection: close\r\nContent-Length: 5\r\n\r\nhello", response + end + + def test_http_11_keep_alive_without_body + server_run { [204, {}, []] } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\n\r\n" + + # No Content + assert_equal "HTTP/1.1 204 #{STATUS_CODES[204]}", response.status + end + + def test_http_11_close_without_body + server_run { [204, {}, []] } + + req = "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + response = send_http_read_response req + + # No Content + assert_equal "HTTP/1.1 204 #{STATUS_CODES[204]}", response.status + assert_equal ["Connection: close"], response.headers + end + + def test_http_11_enable_keep_alives_by_default + server_run(enable_keep_alives: true) { [200, {"Content-Type" => "plain/text"}, ["hello\n"]] } + + req = "GET / HTTP/1.1\r\n\r\n" + response = send_http_read_response req + + # No "Connection: close" header. + assert_equal ["Content-Type: plain/text", "Content-Length: 6"], response.headers + assert_equal "hello\n", response.body + end + def test_http_11_enable_keep_alives_true + server_run(enable_keep_alives: true) { [200, {"Content-Type" => "plain/text"}, ["hello\n"]] } + + req = "GET / HTTP/1.1\r\n\r\n" + response = send_http_read_response req + + # No "Connection: close" header. + assert_equal ["Content-Type: plain/text", "Content-Length: 6"], response.headers + assert_equal "hello\n", response.body + end + + def test_http_11_enable_keep_alives_false + server_run(enable_keep_alives: false) { [200, {"Content-Type" => "plain/text"}, ["hello\n"]] } + + req = "GET / HTTP/1.1\r\n\r\n" + response = send_http_read_response req + + # Assert the "Connection: close" header is present with keep-alives disabled. + assert_equal ["Content-Type: plain/text", "Connection: close", "Content-Length: 6"], response.headers + assert_equal "hello\n", response.body + end + + def test_http_10_keep_alive_with_body + server_run { [200, {"Content-Type" => "plain/text"}, ["hello\n"]] } + + req = "GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n" + + response = send_http_read_response req + + assert_equal "HTTP/1.0 200 OK", response.status + assert_equal ["Content-Type: plain/text", "Connection: Keep-Alive", "Content-Length: 6"], + response.headers + assert_equal "hello\n", response.body + end + + def test_http_10_close_with_body + server_run { [200, {"Content-Type" => "plain/text"}, ["hello"]] } + + response = send_http_read_response "GET / HTTP/1.0\r\nConnection: close\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nContent-Type: plain/text\r\nContent-Length: 5\r\n\r\nhello", response + end + + def test_http_10_keep_alive_without_body + server_run { [204, {}, []] } + + response = send_http_read_response "GET / HTTP/1.0\r\nConnection: Keep-Alive\r\n\r\n" + + assert_equal "HTTP/1.0 204 No Content\r\nConnection: Keep-Alive\r\n\r\n", response + end + + def test_http_10_close_without_body + server_run { [204, {}, []] } + + response = send_http_read_response "GET / HTTP/1.0\r\nConnection: close\r\n\r\n" + + assert_equal "HTTP/1.0 204 No Content\r\n\r\n", response + end + + def test_Expect_100 + server_run { [200, {}, [""]] } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: close\r\nExpect: 100-continue\r\n\r\n" + + assert_equal "HTTP/1.1 100 Continue\r\n\r\nHTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + end + + def test_chunked_request + body = nil + content_length = nil + transfer_encoding = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + transfer_encoding = env['HTTP_TRANSFER_ENCODING'] + [200, {}, [""]] + } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: gzip,chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + assert_nil transfer_encoding + end + + # See also test_chunked_keep_alive_two_back_to_back + def test_two_back_to_back_chunked_have_different_tempfile + body = nil + content_length = nil + transfer_encoding = nil + req_body_path = nil + server_run { |env| + io = env['rack.input'] + req_body_path = io.path + body = io.read + content_length = env['CONTENT_LENGTH'] + transfer_encoding = env['HTTP_TRANSFER_ENCODING'] + [200, {}, [""]] + } + + chunked_req = "GET / HTTP/1.1\r\nTransfer-Encoding: gzip,chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + skt = send_http chunked_req + + response = skt.read_response + path1 = req_body_path + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + assert_nil transfer_encoding + + skt << chunked_req + response = skt.read_response + path2 = req_body_path + + # same as above + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + assert_nil transfer_encoding + + refute_equal path1, path2 + end + + def test_large_chunked_request + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + header = "GET / HTTP/1.1\r\nConnection: close\r\nContent-Length: 200\r\nTransfer-Encoding: chunked\r\n\r\n" + + chunk_header_size = 6 # 4fb8\r\n + # Current implementation reads one chunk of CHUNK_SIZE, then more chunks of size 4096. + # We want a chunk to split exactly after "#{request_body}\r", before the "\n". + edge_case_size = Puma::Const::CHUNK_SIZE + 4096 - header.size - chunk_header_size - 1 + + margin = 0 # 0 for only testing this specific case, increase to test more surrounding sizes + (-margin..margin).each do |i| + size = edge_case_size + i + request_body = '.' * size + request = "#{header}#{size.to_s(16)}\r\n#{request_body}\r\n0\r\n\r\n" + + response = send_http_read_response request + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal size, Integer(content_length) + assert_equal request_body, body + end + end + + def test_chunked_request_invalid_extension_header_length + body = nil + server_run(environment: :production) { |env| + body = env['rack.input'].read + [200, {}, [body]] + } + + max_chunk_header_size = Puma::Client::MAX_CHUNK_HEADER_SIZE + + # send valid request except for extension_header larger than limit + header = "GET / HTTP/1.1\r\nConnection: close\r\nContent-Length: 200\r\nTransfer-Encoding: chunked\r\n\r\n" + response = send_http_read_response "#{header}1;t=#{'x' * (max_chunk_header_size + 2)}\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + assert_equal "HTTP/1.1 400 Bad Request\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + end + + def test_chunked_request_invalid_extension_header_length_split + body = nil + completed_loops = 0 + server_run { |env| + body = env['rack.input'].read + [200, {}, [""]] + } + + # includes 1st chunk length + socket = send_http "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\nTransfer-Encoding: chunked\r\n\r\n1;" + + junk = "*" * 1_024 + + # Ubuntu allows us to close the client socket after an error write, and still + # read with the client. macOS and Windows won't allow a read. + + begin + 10.times do |i| + socket << junk + completed_loops = i + sleep 0.1 + end + socket << "\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + refute_equal 'hello', body + assert_equal "HTTP/1.1 400 Bad Request\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + + # errors raised vary by OS + rescue Errno::EPIPE, Errno::ECONNABORTED, Errno::ECONNRESET + end + assert_equal 4, completed_loops + end + + def test_chunked_request_pause_before_value + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n1\r\n" + sleep 1 + + socket << "h\r\n4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_request_pause_between_chunks + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n" + sleep 1 + + socket << "4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_request_pause_mid_count + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n1\r" + sleep 1 + + socket << "\nh\r\n4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_request_pause_before_count_newline + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n1" + sleep 1 + + socket << "\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_request_pause_mid_value + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n4\r\ne" + sleep 1 + + socket << "llo\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_request_pause_between_cr_lf_after_size_of_second_chunk + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + part1 = 'a' * 4200 + + chunked_body = "#{part1.size.to_s(16)}\r\n#{part1}\r\n1\r\nb\r\n0\r\n\r\n" + + socket = send_http "PUT /path HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n" + + sleep 0.1 + + socket << chunked_body[0..-10] + + sleep 0.1 + + socket << chunked_body[-9..-1] + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal (part1 + 'b'), body + assert_equal "4201", content_length + end + + def test_chunked_request_pause_between_closing_cr_lf + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "PUT /path HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n5\r\nhello\r" + + sleep 1 + + socket << "\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal 'hello', body + assert_equal "5", content_length + end + + def test_chunked_request_pause_before_closing_cr_lf + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "PUT /path HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n5\r\nhello" + + sleep 1 + + socket << "\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal 'hello', body + assert_equal "5", content_length + end + + # See https://github.com/puma/puma/issues/3337 & https://github.com/puma/puma/pull/3338 + # + def test_chunked_body_pause_within_chunk_size_hex + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + req_body = +'' + 9.times do |i| + req_body << "400\r\n" + req_body << "#{i}#{'x' * 1_023}" + req_body << "\r\n" + end + req_body << "0\r\n\r\n" + + header = "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n" + + data1 = req_body[0..7218] # Number here is arbitrary, so that the first chunk of data ends with `40` + data2 = req_body[7219..-1] # remaining data + + socket = send_http "#{header}#{data1}" + + sleep 0.1 # This makes it easier to reproduce the issue, might need to be adjusted + socket << data2 + + response = socket.read_response + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal 9*1_024, body.bytesize + assert_equal 9*1_024, content_length.to_i + assert_equal '012345678', body.delete('x') + end + + def test_chunked_request_header_case + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: Chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + assert_equal "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_keep_alive + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + response = send_http_read_response "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + end + + def test_chunked_keep_alive_two_back_to_back + body = nil + content_length = nil + server_run { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n" + + last_crlf_written = false + last_crlf_writer = Thread.new do + sleep 0.1 + socket << "\r" + sleep 0.1 + socket << "\n" + last_crlf_written = true + end + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + sleep 0.05 if TRUFFLE + assert_equal true, last_crlf_written + + last_crlf_writer.join + + socket << "GET / HTTP/1.1\r\nConnection: Keep-Alive\r\nTransfer-Encoding: chunked\r\n\r\n4\r\ngood\r\n3\r\nbye\r\n0\r\n\r\n" + sleep 0.1 + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "goodbye", body + assert_equal "7", content_length + end + + def test_chunked_keep_alive_two_back_to_back_with_set_remote_address + body = nil + content_length = nil + remote_addr =nil + server_run(remote_address: :header, remote_address_header: 'HTTP_X_FORWARDED_FOR') { |env| + body = env['rack.input'].read + content_length = env['CONTENT_LENGTH'] + remote_addr = env['REMOTE_ADDR'] + [200, {}, [""]] + } + + socket = send_http "GET / HTTP/1.1\r\nX-Forwarded-For: 127.0.0.1\r\nConnection: Keep-Alive\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n4\r\nello\r\n0\r\n\r\n" + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "hello", body + assert_equal "5", content_length + assert_equal "127.0.0.1", remote_addr + + socket << "GET / HTTP/1.1\r\nX-Forwarded-For: 127.0.0.2\r\nConnection: Keep-Alive\r\nTransfer-Encoding: chunked\r\n\r\n4\r\ngood\r\n3\r\nbye\r\n0\r\n\r\n" + sleep 0.1 + + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", response + assert_equal "goodbye", body + assert_equal "7", content_length + assert_equal "127.0.0.2", remote_addr + end + + def test_chunked_encoding + enc = Encoding::UTF_16LE + str = "──иї_テスト──\n".encode enc + suffix = "\nHello World\n".encode(enc) + + server_run { + hdrs = {} + hdrs['Content-Type'] = "text; charset=#{enc.to_s.downcase}" + + body = Enumerator.new do |yielder| + 100.times do |entry| + yielder << str + end + yielder << suffix + end + + [200, hdrs, body] + } + + # PumaSocket doesn't process Content-Type charset + body = send_http_read_response.decode_body + body = body.force_encoding enc + + assert_start_with body, str + assert_end_with body, suffix + assert_equal enc, body.encoding + end + + def test_empty_header_values + server_run { [200, {"X-Empty-Header" => ""}, []] } + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nX-Empty-Header: \r\nContent-Length: 0\r\n\r\n", response + end + + def test_request_body_wait + request_body_wait = nil + server_run { |env| + request_body_wait = env['puma.request_body_wait'] + [204, {}, []] + } + + socket = send_http "POST / HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 5\r\n\r\nh" + sleep 1 + socket << "ello" + + socket.read_response + + assert request_body_wait.is_a?(Float) + # Could be 1000 but the tests get flaky. We don't care if it's extremely precise so much as that + # it is set to a reasonable number. + assert_operator request_body_wait, :>=, 900 + end + + def test_request_body_wait_chunked + request_body_wait = nil + server_run { |env| + request_body_wait = env['puma.request_body_wait'] + [204, {}, []] + } + + socket = send_http "GET / HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n1\r\nh\r\n" + sleep 3 + socket << "4\r\nello\r\n0\r\n\r\n" + + socket.read_response + + # Could be 1000 but the tests get flaky. We don't care if it's extremely precise so much as that + # it is set to a reasonable number. + assert_operator request_body_wait, :>=, 900 + end + + def test_open_connection_wait(**options) + server_run(**options) { [200, {}, ["Hello"]] } + socket = send_http nil + sleep 0.1 + socket << GET_10 + assert_equal 'Hello', socket.read_body + end + + def test_open_connection_wait_no_queue + test_open_connection_wait(queue_requests: false) + end + + # Rack may pass a newline in a header expecting us to split it. + def test_newline_splits + server_run { [200, {'X-header' => "first line\nsecond line"}, ["Hello"]] } + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_match "X-header: first line\r\nX-header: second line\r\n", response + end + + def test_newline_splits_in_early_hint + server_run(early_hints: true) do |env| + env['rack.early_hints'].call({'X-header' => "first line\nsecond line"}) + [200, {}, ["Hello world!"]] + end + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + assert_match "X-header: first line\r\nX-header: second line\r\n", response + end + + def send_proxy_v1_http(req, remote_ip, multisend = false) + addr = IPAddr.new(remote_ip) + family = addr.ipv4? ? "TCP4" : "TCP6" + target = addr.ipv4? ? "127.0.0.1" : "::1" + skt = new_socket + if multisend + skt << "PROXY #{family} #{remote_ip} #{target} 10000 80\r\n" + sleep 0.15 + skt << req + else + skt << ("PROXY #{family} #{remote_ip} #{target} 10000 80\r\n" + req) + end + end + + def test_proxy_protocol + server_run(remote_address: :proxy_protocol, remote_address_proxy_protocol: :v1) do |env| + [200, {}, [env["REMOTE_ADDR"]]] + end + + remote_addr = send_proxy_v1_http("GET / HTTP/1.0\r\n\r\n", "1.2.3.4").read_body + assert_equal '1.2.3.4', remote_addr + + remote_addr = send_proxy_v1_http("GET / HTTP/1.0\r\n\r\n", "fd00::1").read_body + assert_equal 'fd00::1', remote_addr + + remote_addr = send_proxy_v1_http("GET / HTTP/1.0\r\n\r\n", "fd00::1", true).read_body + assert_equal 'fd00::1', remote_addr + end + + # To comply with the Rack spec, we have to split header field values + # containing newlines into multiple headers. + def assert_does_not_allow_http_injection(app, opts = {}) + server_run(early_hints: opts[:early_hints], &app) + + response = send_http_read_response "HEAD / HTTP/1.0\r\n\r\n" + + refute_match(/[\r\n]Cookie: hack[\r\n]/, response) + end + + # HTTP Injection Tests + # + # Puma should prevent injection of CR and LF characters into headers, either as + # CRLF or CR or LF, because browsers may interpret it at as a line end and + # allow untrusted input in the header to split the header or start the + # response body. While it's not documented anywhere and they shouldn't be doing + # it, Chrome and curl recognize a lone CR as a line end. According to RFC, + # clients SHOULD interpret LF as a line end for robustness, and CRLF is the + # specced line end. + # + # There are three different tests because there are three ways to set header + # content in Puma. Regular (rack env), early hints, and a special case for + # overriding content-length. + {"cr" => "\r", "lf" => "\n", "crlf" => "\r\n"}.each do |suffix, line_ending| + # The cr-only case for the following test was CVE-2020-5247 + define_method(:"test_prevent_response_splitting_headers_#{suffix}") do + app = ->(_) { [200, {'X-header' => "untrusted input#{line_ending}Cookie: hack"}, ["Hello"]] } + assert_does_not_allow_http_injection(app) + end + + define_method(:"test_prevent_response_splitting_headers_early_hint_#{suffix}") do + app = ->(env) do + env['rack.early_hints'].call("X-header" => "untrusted input#{line_ending}Cookie: hack") + [200, {}, ["Hello"]] + end + assert_does_not_allow_http_injection(app, early_hints: true) + end + + define_method(:"test_prevent_content_length_injection_#{suffix}") do + app = ->(_) { [200, {'content-length' => "untrusted input#{line_ending}Cookie: hack"}, ["Hello"]] } + assert_does_not_allow_http_injection(app) + end + end + + # Perform a server shutdown while requests are pending (one in app-server response, one still sending client request). + def shutdown_requests(s1_complete: true, s1_response: nil, post: false, s2_response: nil, **options) + mutex = Mutex.new + app_finished = ConditionVariable.new + server_run(**options) { |env| + path = env['REQUEST_PATH'] + mutex.synchronize do + app_finished.signal + app_finished.wait(mutex) if path == '/s1' + end + [204, {}, []] + } + + pool = @server.instance_variable_get(:@thread_pool) + + # Trigger potential race condition by pausing Reactor#add until shutdown begins. + if options.fetch(:queue_requests, true) + reactor = @server.instance_variable_get(:@reactor) + reactor.instance_variable_set(:@pool, pool) + reactor.extend(Module.new do + def add(client) + if client.env['REQUEST_PATH'] == '/s2' + Thread.pass until @pool.instance_variable_get(:@shutdown) + end + super + end + end) + end + + s1 = nil + s2 = send_http post ? + "POST /s2 HTTP/1.1\r\nHost: test.com\r\nContent-Type: text/plain\r\nContent-Length: 5\r\n\r\nhi!" : + "GET /s2 HTTP/1.1\r\n" + mutex.synchronize do + s1 = send_http "GET /s1 HTTP/1.1\r\n\r\n" + app_finished.wait(mutex) + app_finished.signal if s1_complete + end + @server.stop + Thread.pass until pool.instance_variable_get(:@shutdown) + + assert_match(s1_response, s1.read_response.status) if s1_response + + # Send s2 after shutdown begins + s2 << "\r\n" unless s2.wait_readable(0.2) + + assert s2.wait_readable(10), 'timeout waiting for response' + s2_result = begin + s2.read_response.status + rescue Errno::ECONNABORTED, Errno::ECONNRESET, EOFError + # Some platforms raise errors instead of returning a response/EOF when a TCP connection is aborted. + post ? '408' : nil + end + + if s2_response + assert_match s2_response, s2_result + else + assert_nil s2_result + end + end + + # Shutdown should allow pending requests and app-responses to complete. + def test_shutdown_requests + opts = {s1_response: /204/, s2_response: /204/} + shutdown_requests(**opts) + shutdown_requests(**opts, queue_requests: false) + end + + # Requests still pending after `force_shutdown_after` should have connection closed (408 w/pending POST body). + # App-responses still pending should return 503 (uncaught Puma::ThreadPool::ForceShutdown exception). + def test_force_shutdown + opts = {s1_complete: false, s1_response: /503/, s2_response: nil, force_shutdown_after: 0} + shutdown_requests(**opts) + shutdown_requests(**opts, queue_requests: false) + shutdown_requests(**opts, post: true, s2_response: /408/) + end + + def test_http11_connection_header_queue + server_run { [200, {}, [""]] } + + socket = send_http GET_11 + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + assert_equal ["Content-Length: 0"], response.headers + + socket << "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + response = socket.read_response + + assert_equal "HTTP/1.1 200 OK", response.status + assert_equal ["Connection: close", "Content-Length: 0"], response.headers + + socket = send_http "GET / HTTP/1.1\r\n\r\n" + response = socket.read_response + assert_equal "HTTP/1.1 200 OK", response.status + assert_equal ["Content-Length: 0"], response.headers + + socket << "GET / HTTP/1.1\r\nConnection: close\r\n\r\n" + response = socket.read_response + assert_equal "HTTP/1.1 200 OK", response.status + assert_equal ["Connection: close", "Content-Length: 0"], response.headers + end + + def test_http10_connection_header_queue + server_run { [200, {}, [""]] } + + socket = send_http "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n" + response = socket.read_response + + assert_equal "HTTP/1.0 200 OK", response.status + assert_equal ["Connection: Keep-Alive", "Content-Length: 0"], response.headers + + socket << "GET / HTTP/1.0\r\n\r\n" + response = socket.read_response + assert_equal "HTTP/1.0 200 OK", response.status + assert_equal ["Content-Length: 0"], response.headers + end + + def test_http11_connection_header_no_queue + server_run(queue_requests: false) { [200, {}, [""]] } + response = send_http_read_response GET_11 + assert_equal "HTTP/1.1 200 OK", response.status + assert_equal ["Connection: close", "Content-Length: 0"], response.headers + end + + def test_http10_connection_header_no_queue + server_run(queue_requests: false) { [200, {}, [""]] } + response = send_http_read_response GET_10 + assert_equal "HTTP/1.0 200 OK", response.status + assert_equal ["Content-Length: 0"], response.headers + end + + def stub_accept_nonblock(error) + @bind_port = (@server.add_tcp_listener HOST, 0).addr[1] + io = @server.binder.ios.last + + accept_old = io.method(:accept_nonblock) + io.singleton_class.send :define_method, :accept_nonblock do + accept_old.call.close + raise error + end + + @server.run + new_socket + sleep 0.01 + end + + # System-resource errors such as EMFILE should not be silently swallowed by accept loop. + def test_accept_emfile + stub_accept_nonblock Errno::EMFILE.new('accept(2)') + refute_empty @log_writer.stderr.string, "Expected EMFILE error not logged" + end + + # Retryable errors such as ECONNABORTED should be silently swallowed by accept loop. + def test_accept_econnaborted + # Match Ruby #accept_nonblock implementation, ECONNABORTED error is extended by IO::WaitReadable. + error = Errno::ECONNABORTED.new('accept(2) would block').tap {|e| e.extend IO::WaitReadable} + stub_accept_nonblock(error) + assert_empty @log_writer.stderr.string + end + + # see https://github.com/puma/puma/issues/2390 + # fixed by https://github.com/puma/puma/pull/2279 + # + def test_client_quick_close_no_lowlevel_error_handler_call + handler = ->(err, env, status) { + @log_writer.stdout.write "LLEH #{err.message}" + [500, {"Content-Type" => "application/json"}, ["{}\n"]] + } + + server_run(lowlevel_error_handler: handler) { [200, {}, ['Hello World']] } + + # valid req & read, close + socket = send_http GET_11 + sleep 0.05 # macOS TruffleRuby may not get the body without + body = socket.read_body + assert_match 'Hello World', body + sleep 0.5 + assert_empty @log_writer.stdout.string + + # valid req, close + socket = send_http GET_10 + socket.close + sleep 0.5 + assert_empty @log_writer.stdout.string + + # invalid req, close + socket = send_http "GET / HTTP" + socket.close + sleep 0.5 + assert_empty @log_writer.stdout.string + end + + def test_idle_connections_closed_immediately_on_shutdown + server_run + socket = new_socket + sleep 0.5 # give enough time for new connection to enter reactor + @server.stop false + + assert socket.wait_readable(1), 'Unexpected timeout' + assert_raises EOFError do + socket.read_nonblock(256) + end + end + + def test_run_stop_thread_safety + 100.times do + thread = @server.run + @server.stop + assert thread.join(1) + end + end + + def test_command_ignored_before_run + @server.stop # ignored + @server.run + @server.halt + done = Queue.new + @server.events.register(:state) do |state| + done << @server.instance_variable_get(:@status) if state == :done + end + assert_equal :halt, done.pop + end + + def test_custom_io_selector + backend = NIO::Selector.backends.first + + @server = Puma::Server.new @app, @events, {log_writer: @log_writer, :io_selector_backend => backend} + @server.run + + selector = @server.instance_variable_get(:@reactor).instance_variable_get(:@selector) + + assert_equal selector.backend, backend + end + + def test_drain_on_shutdown(drain=true) + num_connections = 10 + + wait = Queue.new + server_run(drain_on_shutdown: drain, max_threads: 1) do + wait.pop + [200, {}, ["DONE"]] + end + connections = Array.new(num_connections) { send_http GET_10 } + @server.stop + wait.close + bad = 0 + connections.each do |s| + begin + if s.wait_readable(1) and drain # JRuby may hang on read with drain is false + assert_match 'DONE', s.read_body + else + bad += 1 + end + rescue Errno::ECONNRESET + bad += 1 + end + end + if drain + assert_equal 0, bad + else + refute_equal 0, bad + end + end + + def test_not_drain_on_shutdown + test_drain_on_shutdown false + end + + def test_remote_address_header + server_run(remote_address: :header, remote_address_header: 'HTTP_X_REMOTE_IP') do |env| + [200, {}, [env['REMOTE_ADDR']]] + end + + body = send_http_read_resp_body "GET / HTTP/1.1\r\nX-Remote-IP: 1.2.3.4\r\n\r\n" + assert_equal '1.2.3.4', body + + # TODO: it would be great to test a connection from a non-localhost IP, but we can't really do that. For + # now, at least test that it doesn't return garbage. + body = send_http_read_resp_body "GET / HTTP/1.1\r\n\r\n" + assert_equal @host, body + end + + # see https://github.com/sinatra/sinatra/blob/master/examples/stream.ru + def test_streaming_enum_body_1 + str = "Hello Puma World" + body_len = str.bytesize * 3 + + server_run do |env| + hdrs = {} + hdrs['Content-Type'] = "text; charset=utf-8" + + body = Enumerator.new do |yielder| + yielder << str + sleep 0.5 + yielder << str + sleep 1.5 + yielder << str + end + [200, hdrs, body] + end + + response = send_http_read_response + response_body = response.decode_body + times = response.times + + assert_equal body_len, response_body.bytesize + assert_equal str * 3, response_body + assert times[1] - times[0] > 0.4 + assert times[1] - times[0] < 1 + assert times[2] - times[1] > 1 + end + + # similar to a longer running app passing its output thru an enum body + # example - https://github.com/dentarg/testssl.web + def test_streaming_enum_body_2 + str = "Hello Puma World" + loops = 10 + body_len = str.bytesize * loops + + server_run do |env| + hdrs = {} + hdrs['Content-Type'] = "text; charset=utf-8" + + body = Enumerator.new do |yielder| + loops.times do |i| + sleep 0.15 unless i.zero? + yielder << str + end + end + [200, hdrs, body] + end + + response = send_http_read_response + response_body = response.decode_body + times = response.times + + assert_equal body_len, response_body.bytesize + assert_equal str * loops, response_body + assert_operator times.last - times.first, :>, 1.0 + end + + def test_empty_body_array_content_length_0 + server_run { |env| [404, {'Content-Length' => '0'}, []] } + + response = send_http_read_response GET_11 + # Not Found + assert_equal "HTTP/1.1 404 #{STATUS_CODES[404]}\r\nContent-Length: 0\r\n\r\n", response + end + + def test_empty_body_array_no_content_length + server_run { |env| [404, {}, []] } + + response = send_http_read_response GET_11 + # Not Found + assert_equal "HTTP/1.1 404 #{STATUS_CODES[404]}\r\nContent-Length: 0\r\n\r\n", response + end + + def test_empty_body_enum + server_run { |env| [404, {}, [].to_enum] } + + response = send_http_read_response GET_11 + # Not Found + assert_equal "HTTP/1.1 404 #{STATUS_CODES[404]}\r\nTransfer-Encoding: chunked\r\n\r\n0\r\n\r\n", response + end + + def test_form_data_encoding_windows_bom + req_body = nil + + str = "──── Hello,World,From,Puma ────\r\n" + + file_contents = str * 5_500 # req body is > 256 kB + + file_bytesize = file_contents.bytesize + 3 # 3 = BOM byte size + + fio = Tempfile.create 'win_bom_utf8_' + + temp_file_path = fio.path + fio.close + + File.open temp_file_path, "wb:UTF-8" do |f| + f.write "\xEF\xBB\xBF#{file_contents}" + end + + server_run do |env| + req_body = env['rack.input'].read + [200, {}, [req_body]] + end + + cmd = "curl -H 'transfer-encoding: chunked' --form data=@#{temp_file_path} http://127.0.0.1:#{@bind_port}/" + + out_r, _, _ = spawn_cmd cmd + + out_r.wait_readable 3 + + form_file_data = req_body.split("\r\n\r\n", 2)[1].sub(/\r\n----\S+\r\n\z/, '') + + assert_equal file_bytesize, form_file_data.bytesize + assert_equal out_r.read.bytesize, req_body.bytesize + ensure + File.unlink(temp_file_path) if File.exist? temp_file_path + end + + def test_form_data_encoding_windows + req_body = nil + + str = "──── Hello,World,From,Puma ────\r\n" + + file_contents = str * 5_500 # req body is > 256 kB + + file_bytesize = file_contents.bytesize + + fio = tempfile_create 'win_utf8_', file_contents + + temp_file_path = fio.path + fio.close + + server_run do |env| + req_body = env['rack.input'].read + [200, {}, [req_body]] + end + + cmd = "curl -H 'transfer-encoding: chunked' --form data=@#{temp_file_path} http://127.0.0.1:#{@bind_port}/" + + out_r, _, _ = spawn_cmd cmd + + out_r.wait_readable 3 + + form_file_data = req_body.split("\r\n\r\n", 2)[1].sub(/\r\n----\S+\r\n\z/, '') + + assert_equal file_bytesize, form_file_data.bytesize + assert_equal out_r.read.bytesize, req_body.bytesize + end + + def test_supported_http_methods_match + server_run(supported_http_methods: ['PROPFIND', 'PROPPATCH']) do |env| + body = [env['REQUEST_METHOD']] + [200, {}, body] + end + body = send_http_read_resp_body "PROPFIND / HTTP/1.0\r\n\r\n" + assert_equal 'PROPFIND', body + end + + def test_supported_http_methods_no_match + server_run(supported_http_methods: ['PROPFIND', 'PROPPATCH']) do |env| + body = [env['REQUEST_METHOD']] + [200, {}, body] + end + response = send_http_read_response GET_10 + assert_match 'Not Implemented', response.status + end + + def test_supported_http_methods_accept_all + server_run(supported_http_methods: :any) do |env| + body = [env['REQUEST_METHOD']] + [200, {}, body] + end + body = send_http_read_resp_body "YOUR_SPECIAL_METHOD / HTTP/1.0\r\n\r\n" + assert_match 'YOUR_SPECIAL_METHOD', body + end + + def test_supported_http_methods_empty + server_run(supported_http_methods: []) do |env| + body = [env['REQUEST_METHOD']] + [200, {}, body] + end + response = send_http_read_response "GET / HTTP/1.0\r\n\r\n" + assert_match(/\AHTTP\/1\.0 501 Not Implemented/, response) + end + + + def spawn_cmd(env = {}, cmd) + opts = {} + + out_r, out_w = IO.pipe + opts[:out] = out_w + + err_r, err_w = IO.pipe + opts[:err] = err_w + + out_r.binmode + err_r.binmode + + pid = spawn(env, cmd, opts) + [out_w, err_w].each(&:close) + [out_r, err_r, pid] + end + + def test_lowlevel_error_handler_response + options = { + lowlevel_error_handler: ->(_error) do + [500, {}, ["something wrong happened"]] + end + } + broken_app = ->(_env) { [200, nil, []] } + + server_run(**options, &broken_app) + + body = send_http_read_resp_body "GET / HTTP/1.1\r\n\r\n" + + assert_equal "something wrong happened", body + end + + def test_cl_empty_string + server_run do |env| + [200, {}, [""]] + end + + empty_cl_request = <<~REQ.gsub("\n", "\r\n") + GET / HTTP/1.1 + Host: localhost + Content-Length: + + GET / HTTP/1.1 + Host: localhost + + REQ + + all = send_http_read_all empty_cl_request + assert_start_with all, 'HTTP/1.1 400 Bad Request' + end + + def test_crlf_trailer_smuggle + server_run do |env| + [200, {}, [""]] + end + + smuggled_payload = <<~REQ.gsub("\n", "\r\n") + GET / HTTP/1.1 + Transfer-Encoding: chunked + Host: whatever + + 0 + X:POST / HTTP/1.1 + Host: whatever + + GET / HTTP/1.1 + Host: whatever + + REQ + + response = send_http_read_all smuggled_payload + assert_equal 2, response.scan("HTTP/1.1 200 OK").size + end + + # test to check if content-length is ignored when 'transfer-encoding: chunked' + # is used. See also test_large_chunked_request + def test_cl_and_te_smuggle + body = nil + server_run { |env| + body = env['rack.input'].read + [200, {}, [""]] + } + + req = <<~REQ.gsub("\n", "\r\n") + POST /search HTTP/1.1 + Host: vulnerable-website.com + Content-Type: application/x-www-form-urlencoded + Content-Length: 4 + Transfer-Encoding: chunked + + 7b + GET /404 HTTP/1.1 + Host: vulnerable-website.com + Content-Type: application/x-www-form-urlencoded + Content-Length: 144 + + x= + 0 + + REQ + + response = send_http_read_response req + + assert_includes body, "GET /404 HTTP/1.1\r\n" + assert_includes body, "Content-Length: 144\r\n" + assert_equal 1, response.scan("HTTP/1.1 200 OK").size + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_puma_server_current.rb b/vendor/cache/puma-fba741b91780/test/test_puma_server_current.rb new file mode 100644 index 000000000..45c469c3f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_puma_server_current.rb @@ -0,0 +1,45 @@ +# frozen_string_literal: true + +# Released under the MIT License. +# Copyright, 2023, by Samuel Williams. + +require_relative "helper" + +require "puma/server" + +class PumaServerCurrentApplication + def call(env) + [200, {"Content-Type" => "text/plain"}, [Puma::Server.current.to_s]] + end +end + +class PumaServerCurrentTest < Minitest::Test + parallelize_me! + + def setup + @tester = PumaServerCurrentApplication.new + @server = Puma::Server.new @tester, nil, {log_writer: Puma::LogWriter.strings, clean_thread_locals: true} + @port = (@server.add_tcp_listener "127.0.0.1", 0).addr[1] + @tcp = "http://127.0.0.1:#{@port}" + @url = URI.parse(@tcp) + @server.run + end + + def teardown + @server.stop(true) + end + + def test_clean_thread_locals + server_string = @server.to_s + responses = [] + + # This must be a persistent connection to hit the `clean_thread_locals` code path. + Net::HTTP.new(@url.host, @url.port).start do |connection| + 3.times do + responses << connection.get("/").body + end + end + + assert_equal [server_string]*3, responses + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_puma_server_hijack.rb b/vendor/cache/puma-fba741b91780/test/test_puma_server_hijack.rb new file mode 100644 index 000000000..30f48b435 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_puma_server_hijack.rb @@ -0,0 +1,223 @@ +require_relative "helper" +require "puma/events" +require "puma/server" +require "net/http" +require "nio" + +require "rack" +require "rack/body_proxy" + +# Tests check both the proper passing of the socket to the app, and also calling +# of `body.close` on the response body. Rack spec is unclear as to whether +# calling close is expected. +# +# The sleep statements may not be needed for local CI, but are needed +# for use with GitHub Actions... + +class TestPumaServerHijack < Minitest::Test + parallelize_me! + + def setup + @host = "127.0.0.1" + + @ios = [] + + @app = ->(env) { [200, {}, [env['rack.url_scheme']]] } + + @log_writer = Puma::LogWriter.strings + @events = Puma::Events.new + end + + def teardown + return if skipped? + @server.stop(true) + assert_empty @log_writer.stdout.string + assert_empty @log_writer.stderr.string + + # Errno::EBADF raised on macOS + @ios.each do |io| + begin + io.close if io.respond_to?(:close) && !io.closed? + File.unlink io.path if io.is_a? File + rescue Errno::EBADF + ensure + io = nil + end + end + end + + def server_run(**options, &block) + options[:log_writer] ||= @log_writer + options[:min_threads] ||= 1 + @server = Puma::Server.new block || @app, @events, options + @port = (@server.add_tcp_listener @host, 0).addr[1] + @server.run + end + + # only for shorter bodies! + def send_http_and_sysread(req) + send_http(req).sysread 2_048 + end + + def send_http_and_read(req) + send_http(req).read + end + + def send_http(req) + t = new_connection + t.syswrite req + t + end + + def new_connection + TCPSocket.new(@host, @port).tap {|sock| @ios << sock} + end + + # Full hijack does not return headers + def test_full_hijack_body_close + @body_closed = false + server_run do |env| + io = env['rack.hijack'].call + io.syswrite 'Server listening' + io.wait_readable 2 + io.syswrite io.sysread(256) + body = ::Rack::BodyProxy.new([]) { @body_closed = true } + [200, {}, body] + end + + sock = send_http "GET / HTTP/1.1\r\n\r\n" + + sock.wait_readable 2 + assert_equal "Server listening", sock.sysread(256) + + sock.syswrite "this should echo" + assert_equal "this should echo", sock.sysread(256) + Thread.pass + sleep 0.001 # intermittent failure, may need to increase in CI + assert @body_closed, "Reponse body must be closed" + end + + def test_101_body + headers = { + 'Upgrade' => 'websocket', + 'Connection' => 'Upgrade', + 'Sec-WebSocket-Accept' => 's3pPLMBiTxaQ9kYGzzhZRbK+xOo=', + 'Sec-WebSocket-Protocol' => 'chat' + } + + body = -> (io) { + # below for TruffleRuby error with io.sysread + # Read Errno::EAGAIN: Resource temporarily unavailable + io.wait_readable 0.1 + io.syswrite io.sysread(256) + io.close + } + + server_run do |env| + [101, headers, body] + end + + sock = send_http "GET / HTTP/1.1\r\n\r\n" + resp = sock.sysread 1_024 + echo_msg = "This should echo..." + sock.syswrite echo_msg + + assert_includes resp, 'Connection: Upgrade' + assert_equal echo_msg, sock.sysread(256) + end + + def test_101_header + headers = { + 'Upgrade' => 'websocket', + 'Connection' => 'Upgrade', + 'Sec-WebSocket-Accept' => 's3pPLMBiTxaQ9kYGzzhZRbK+xOo=', + 'Sec-WebSocket-Protocol' => 'chat', + 'rack.hijack' => -> (io) { + # below for TruffleRuby error with io.sysread + # Read Errno::EAGAIN: Resource temporarily unavailable + io.wait_readable 0.1 + io.syswrite io.sysread(256) + io.close + } + } + + server_run do |env| + [101, headers, []] + end + + sock = send_http "GET / HTTP/1.1\r\n\r\n" + resp = sock.sysread 1_024 + echo_msg = "This should echo..." + sock.syswrite echo_msg + + assert_includes resp, 'Connection: Upgrade' + assert_equal echo_msg, sock.sysread(256) + end + + def test_http_10_header_with_content_length + body_parts = ['abc', 'de'] + + server_run do + hijack_lambda = proc do | io | + io.write(body_parts[0]) + io.write(body_parts[1]) + io.close + end + [200, {"Content-Length" => "5", 'rack.hijack' => hijack_lambda}, nil] + end + + # using sysread may only receive part of the response + data = send_http_and_read "GET / HTTP/1.0\r\nConnection: close\r\n\r\n" + + assert_equal "HTTP/1.0 200 OK\r\nContent-Length: 5\r\n\r\nabcde", data + end + + def test_partial_hijack_body_closes_body + skip 'Not supported with Rack 1.x' if Rack.release.start_with? '1.' + @available = true + hdrs = { 'Content-Type' => 'text/plain' } + body = ::Rack::BodyProxy.new(HIJACK_LAMBDA) { @available = true } + partial_hijack_closes_body(hdrs, body) + end + + def test_partial_hijack_header_closes_body_correct_precedence + skip 'Not supported with Rack 1.x' if Rack.release.start_with? '1.' + @available = true + incorrect_lambda = ->(io) { + io.syswrite 'incorrect body.call' + io.close + } + hdrs = { 'Content-Type' => 'text/plain', 'rack.hijack' => HIJACK_LAMBDA} + body = ::Rack::BodyProxy.new(incorrect_lambda) { @available = true } + partial_hijack_closes_body(hdrs, body) + end + + HIJACK_LAMBDA = ->(io) { + io.syswrite 'hijacked' + io.close + } + + def partial_hijack_closes_body(hdrs, body) + server_run do + if @available + @available = false + [200, hdrs, body] + else + [500, { 'Content-Type' => 'text/plain' }, ['incorrect']] + end + end + + sock1 = send_http "GET / HTTP/1.1\r\n\r\n" + sleep (Puma::IS_WINDOWS || !Puma::IS_MRI ? 0.3 : 0.1) + resp1 = sock1.sysread 1_024 + + sleep 0.01 # time for close block to be called ? + + sock2 = send_http "GET / HTTP/1.1\r\n\r\n" + sleep (Puma::IS_WINDOWS || !Puma::IS_MRI ? 0.3 : 0.1) + resp2 = sock2.sysread 1_024 + + assert_operator resp1, :end_with?, 'hijacked' + assert_operator resp2, :end_with?, 'hijacked' + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_puma_server_ssl.rb b/vendor/cache/puma-fba741b91780/test/test_puma_server_ssl.rb new file mode 100644 index 000000000..299bc0358 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_puma_server_ssl.rb @@ -0,0 +1,591 @@ +# Nothing in this file runs if Puma isn't compiled with ssl support +# +# helper is required first since it loads Puma, which needs to be +# loaded so HAS_SSL is defined +require_relative "helper" + +if ::Puma::HAS_SSL + require "puma/minissl" + require_relative "helpers/test_puma/puma_socket" + + if ENV['PUMA_TEST_DEBUG'] + require "openssl" unless Object.const_defined? :OpenSSL + if Puma::IS_JRUBY + puts "", RUBY_DESCRIPTION, "RUBYOPT: #{ENV['RUBYOPT']}", + " OpenSSL", + "OPENSSL_LIBRARY_VERSION: #{OpenSSL::OPENSSL_LIBRARY_VERSION}", + " OPENSSL_VERSION: #{OpenSSL::OPENSSL_VERSION}", "" + else + puts "", RUBY_DESCRIPTION, "RUBYOPT: #{ENV['RUBYOPT']}", + " Puma::MiniSSL OpenSSL", + "OPENSSL_LIBRARY_VERSION: #{Puma::MiniSSL::OPENSSL_LIBRARY_VERSION.ljust 32}#{OpenSSL::OPENSSL_LIBRARY_VERSION}", + " OPENSSL_VERSION: #{Puma::MiniSSL::OPENSSL_VERSION.ljust 32}#{OpenSSL::OPENSSL_VERSION}", "" + end + end +end + +class TestPumaServerSSL < Minitest::Test + parallelize_me! + + include TestPuma + include TestPuma::PumaSocket + + PROTOCOL_USE_MIN_MAX = + OpenSSL::SSL::SSLContext.private_instance_methods(false).include?(:set_minmax_proto_version) + + OPENSSL_3 = OpenSSL::OPENSSL_LIBRARY_VERSION.match?(/OpenSSL 3\.\d\.\d/) + + def setup + @server = nil + end + + def teardown + @server&.stop true + end + + # yields ctx to block, use for ctx setup & configuration + def start_server(&server_ctx) + app = lambda { |env| [200, {}, [env['rack.url_scheme']]] } + + ctx = Puma::MiniSSL::Context.new + + if Puma.jruby? + ctx.keystore = File.expand_path "../examples/puma/keystore.jks", __dir__ + ctx.keystore_pass = 'jruby_puma' + else + ctx.key = File.expand_path "../examples/puma/puma_keypair.pem", __dir__ + ctx.cert = File.expand_path "../examples/puma/cert_puma.pem", __dir__ + end + + ctx.verify_mode = Puma::MiniSSL::VERIFY_NONE + + yield ctx if server_ctx + + @log_stdout = StringIO.new + @log_stderr = StringIO.new + @log_writer = SSLLogWriterHelper.new @log_stdout, @log_stderr + @server = Puma::Server.new app, nil, {log_writer: @log_writer} + @port = (@server.add_ssl_listener HOST, 0, ctx).addr[1] + @bind_port = @port + @server.run + end + + def test_url_scheme_for_https + start_server + assert_equal "https", send_http_read_resp_body(ctx: new_ctx) + end + + def test_request_wont_block_thread + start_server + # Open a connection and give enough data to trigger a read, then wait + ctx = OpenSSL::SSL::SSLContext.new + ctx.verify_mode = OpenSSL::SSL::VERIFY_NONE + @bind_port = @server.connected_ports[0] + + socket = send_http "HEAD", ctx: new_ctx + sleep 0.1 + + # Capture the amount of threads being used after connecting and being idle + thread_pool = @server.instance_variable_get(:@thread_pool) + busy_threads = thread_pool.spawned - thread_pool.waiting + + socket.close + + # The thread pool should be empty since the request would block on read + # and our request should have been moved to the reactor. + assert busy_threads.zero?, "Our connection is monopolizing a thread" + end + + def test_very_large_return + start_server + giant = "x" * 2056610 + + @server.app = proc { [200, {}, [giant]] } + + body = send_http_read_resp_body(ctx: new_ctx) + + assert_equal giant.bytesize, body.bytesize + end + + def test_form_submit + start_server + @server.app = proc { |env| [200, {}, [env['rack.url_scheme'], "\n", env['rack.input'].read]] } + + req = "POST / HTTP/1.1\r\nContent-Type: text/plain\r\nContent-Length: 7\r\n\r\na=1&b=2" + + body = send_http_read_resp_body req, ctx: new_ctx + + assert_equal "https\na=1&b=2", body + end + + def rejection(server_ctx, min_max, ssl_version) + if server_ctx + start_server(&server_ctx) + else + start_server + end + + msg = nil + + assert_raises(OpenSSL::SSL::SSLError) do + begin + send_http_read_response ctx: new_ctx { |ctx| + if PROTOCOL_USE_MIN_MAX && min_max + ctx.max_version = min_max + else + ctx.ssl_version = ssl_version + end + } + rescue => e + msg = e.message + raise e + end + end + + expected = Puma::IS_JRUBY ? + /No appropriate protocol \(protocol is disabled or cipher suites are inappropriate\)/ : + /SSL_connect SYSCALL returned=5|wrong version number|(unknown|unsupported) protocol|no protocols available|version too low|unknown SSL method/ + assert_match expected, msg + + # make sure a good request succeeds + assert_equal "https", send_http_read_resp_body(ctx: new_ctx) + end + + def test_ssl_v3_rejection + skip-("SSLv3 protocol is unavailable") if Puma::MiniSSL::OPENSSL_NO_SSL3 + + rejection nil, nil, :SSLv3 + end + + def test_tls_v1_rejection + rejection ->(ctx) { ctx.no_tlsv1 = true }, :TLS1, :TLSv1 + end + + def test_tls_v1_1_rejection + rejection ->(ctx) { ctx.no_tlsv1_1 = true }, :TLS1_1, :TLSv1_1 + end + + def test_tls_v1_3 + skip("TLSv1.3 protocol can not be set") unless OpenSSL::SSL::SSLContext.instance_methods(false).include?(:min_version=) + + start_server + + body = send_http_read_resp_body ctx: new_ctx { |c| + if PROTOCOL_USE_MIN_MAX + c.min_version = :TLS1_3 + else + c.ssl_version = :TLSv1_3 + end + } + + assert_equal "https", body + end + + def test_http_rejection + body_http = nil + body_https = nil + + start_server + + tcp = Thread.new do + assert_raises(Errno::ECONNREFUSED, EOFError, IOError, Timeout::Error) do + body_http = send_http_read_resp_body timeout: 4 + end + end + + ssl = Thread.new do + body_https = send_http_read_resp_body ctx: new_ctx + end + + tcp.join + ssl.join + sleep 1.0 + + assert_nil body_http + assert_equal "https", body_https + + thread_pool = @server.instance_variable_get(:@thread_pool) + busy_threads = thread_pool.spawned - thread_pool.waiting + + assert busy_threads.zero?, "Our connection wasn't dropped" + end + + unless Puma.jruby? + def test_invalid_cert + assert_raises(Puma::MiniSSL::SSLError) do + start_server { |ctx| ctx.cert = __FILE__ } + end + end + + def test_invalid_key + assert_raises(Puma::MiniSSL::SSLError) do + start_server { |ctx| ctx.key = __FILE__ } + end + end + + def test_invalid_cert_pem + assert_raises(Puma::MiniSSL::SSLError) do + start_server { |ctx| + ctx.instance_variable_set(:@cert, nil) + ctx.cert_pem = 'Not a valid pem' + } + end + end + + def test_invalid_key_pem + assert_raises(Puma::MiniSSL::SSLError) do + start_server { |ctx| + ctx.instance_variable_set(:@key, nil) + ctx.key_pem = 'Not a valid pem' + } + end + end + + def test_invalid_ca + assert_raises(Puma::MiniSSL::SSLError) do + start_server { |ctx| + ctx.ca = __FILE__ + } + end + end + + # this may require updates if TLSv1.3 default ciphers change + def test_ssl_ciphersuites + skip('Requires TLSv1.3') unless Puma::MiniSSL::HAS_TLS1_3 + + start_server + default_cipher = send_http(ctx: new_ctx).cipher[0] + @server&.stop true + + cipher_suite = 'TLS_CHACHA20_POLY1305_SHA256' + start_server { |ctx| ctx.ssl_ciphersuites = cipher_suite} + + cipher = send_http(ctx: new_ctx).cipher + + refute_equal default_cipher, cipher[0] + assert_equal cipher_suite , cipher[0] + assert_equal cipher[1], 'TLSv1.3' + end + end +end if ::Puma::HAS_SSL + +# client-side TLS authentication tests +class TestPumaServerSSLClient < Minitest::Test + parallelize_me! unless ::Puma.jruby? + + include TestPuma + include TestPuma::PumaSocket + + CERT_PATH = File.expand_path "../examples/puma/client_certs", __dir__ + + # Context can be shared, may help with JRuby + CTX = Puma::MiniSSL::Context.new.tap { |ctx| + if Puma.jruby? + ctx.keystore = "#{CERT_PATH}/keystore.jks" + ctx.keystore_pass = 'jruby_puma' + else + ctx.key = "#{CERT_PATH}/server.key" + ctx.cert = "#{CERT_PATH}/server.crt" + ctx.ca = "#{CERT_PATH}/ca.crt" + end + ctx.verify_mode = Puma::MiniSSL::VERIFY_PEER | Puma::MiniSSL::VERIFY_FAIL_IF_NO_PEER_CERT + } + + def assert_ssl_client_error_match(error, subject: nil, context: CTX, &blk) + port = 0 + + app = lambda { |env| [200, {}, [env['rack.url_scheme']]] } + + log_writer = SSLLogWriterHelper.new STDOUT, STDERR + server = Puma::Server.new app, nil, {log_writer: log_writer} + server.add_ssl_listener LOCALHOST, port, context + host_addrs = server.binder.ios.map { |io| io.to_io.addr[2] } + @bind_port = server.connected_ports[0] + server.run + + ctx = OpenSSL::SSL::SSLContext.new + yield ctx + + expected_errors = [ + EOFError, + IOError, + OpenSSL::SSL::SSLError, + Errno::ECONNABORTED, + Errno::ECONNRESET + ] + + client_error = false + begin + send_http_read_resp_body host: LOCALHOST, ctx: ctx + rescue *expected_errors => e + client_error = e + end + + sleep 0.1 + assert_equal !!error, !!client_error, client_error + if error && !error.eql?(true) + assert_match error, log_writer.error.message + assert_includes host_addrs, log_writer.addr + end + assert_equal subject, log_writer.cert.subject.to_s if subject + ensure + server&.stop true + end + + def test_verify_fail_if_no_client_cert + error = Puma.jruby? ? /Empty client certificate chain/ : 'peer did not return a certificate' + assert_ssl_client_error_match(error) do |client_ctx| + # nothing + end + end + + def test_verify_fail_if_client_unknown_ca + error = Puma.jruby? ? /No trusted certificate found/ : /self[- ]signed certificate in certificate chain/ + cert_subject = Puma.jruby? ? '/DC=net/DC=puma/CN=localhost' : '/DC=net/DC=puma/CN=CAU' + assert_ssl_client_error_match(error, subject: cert_subject) do |client_ctx| + key = "#{CERT_PATH}/client_unknown.key" + crt = "#{CERT_PATH}/client_unknown.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/unknown_ca.crt" + end + end + + def test_verify_fail_if_client_expired_cert + error = Puma.jruby? ? /NotAfter:/ : 'certificate has expired' + assert_ssl_client_error_match(error, subject: '/DC=net/DC=puma/CN=localhost') do |client_ctx| + key = "#{CERT_PATH}/client_expired.key" + crt = "#{CERT_PATH}/client_expired.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + end + end + + def test_verify_client_cert + assert_ssl_client_error_match(false) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + end + + def test_verify_client_cert_with_truststore + ctx = Puma::MiniSSL::Context.new + ctx.keystore = "#{CERT_PATH}/server.p12" + ctx.keystore_type = 'pkcs12' + ctx.keystore_pass = 'jruby_puma' + ctx.truststore = "#{CERT_PATH}/ca_store.p12" + ctx.truststore_type = 'pkcs12' + ctx.truststore_pass = 'jruby_puma' + ctx.verify_mode = Puma::MiniSSL::VERIFY_PEER + + assert_ssl_client_error_match(false, context: ctx) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + end if Puma.jruby? + + def test_verify_client_cert_without_truststore + ctx = Puma::MiniSSL::Context.new + ctx.keystore = "#{CERT_PATH}/server.p12" + ctx.keystore_type = 'pkcs12' + ctx.keystore_pass = 'jruby_puma' + ctx.truststore = "#{CERT_PATH}/unknown_ca_store.p12" + ctx.truststore_type = 'pkcs12' + ctx.truststore_pass = 'jruby_puma' + ctx.verify_mode = Puma::MiniSSL::VERIFY_PEER + + assert_ssl_client_error_match(true, context: ctx) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + end if Puma.jruby? + + def test_allows_using_default_truststore + ctx = Puma::MiniSSL::Context.new + ctx.keystore = "#{CERT_PATH}/server.p12" + ctx.keystore_type = 'pkcs12' + ctx.keystore_pass = 'jruby_puma' + ctx.truststore = :default + # NOTE: a little hard to test - we're at least asserting that setting :default does not raise errors + ctx.verify_mode = Puma::MiniSSL::VERIFY_NONE + + assert_ssl_client_error_match(false, context: ctx) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + end if Puma.jruby? + + def test_allows_to_specify_cipher_suites_and_protocols + ctx = CTX.dup + ctx.cipher_suites = [ 'TLS_RSA_WITH_AES_128_GCM_SHA256' ] + ctx.protocols = 'TLSv1.2' + + assert_ssl_client_error_match(false, context: ctx) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + + client_ctx.ssl_version = :TLSv1_2 + client_ctx.ciphers = [ 'TLS_RSA_WITH_AES_128_GCM_SHA256' ] + end + end if Puma.jruby? + + def test_fails_when_no_cipher_suites_in_common + ctx = CTX.dup + ctx.cipher_suites = [ 'TLS_RSA_WITH_AES_128_GCM_SHA256' ] + ctx.protocols = 'TLSv1.2' + + assert_ssl_client_error_match(/no cipher suites in common/, context: ctx) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + + client_ctx.ssl_version = :TLSv1_2 + client_ctx.ciphers = [ 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384' ] + end + end if Puma.jruby? + + def test_verify_client_cert_with_truststore_without_pass + ctx = Puma::MiniSSL::Context.new + ctx.keystore = "#{CERT_PATH}/server.p12" + ctx.keystore_type = 'pkcs12' + ctx.keystore_pass = 'jruby_puma' + ctx.truststore = "#{CERT_PATH}/ca_store.jks" # cert entry can be read without password + ctx.truststore_type = 'jks' + ctx.verify_mode = Puma::MiniSSL::VERIFY_PEER + + assert_ssl_client_error_match(false, context: ctx) do |client_ctx| + key = "#{CERT_PATH}/client.key" + crt = "#{CERT_PATH}/client.crt" + client_ctx.key = OpenSSL::PKey::RSA.new File.read(key) + client_ctx.cert = OpenSSL::X509::Certificate.new File.read(crt) + client_ctx.ca_file = "#{CERT_PATH}/ca.crt" + client_ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER + end + end if Puma.jruby? + +end if ::Puma::HAS_SSL + +class TestPumaServerSSLWithCertPemAndKeyPem < Minitest::Test + include TestPuma + include TestPuma::PumaSocket + + CERT_PATH = File.expand_path "../examples/puma/client_certs", __dir__ + + def test_server_ssl_with_cert_pem_and_key_pem + ctx = Puma::MiniSSL::Context.new + ctx.cert_pem = File.read "#{CERT_PATH}/server.crt" + ctx.key_pem = File.read "#{CERT_PATH}/server.key" + + app = lambda { |env| [200, {}, [env['rack.url_scheme']]] } + log_writer = SSLLogWriterHelper.new STDOUT, STDERR + server = Puma::Server.new app, nil, {log_writer: log_writer} + server.add_ssl_listener LOCALHOST, 0, ctx + @bind_port = server.connected_ports[0] + server.run + + client_error = nil + begin + send_http_read_resp_body host: LOCALHOST, ctx: new_ctx { |c| + c.ca_file = "#{CERT_PATH}/ca.crt" + c.verify_mode = OpenSSL::SSL::VERIFY_PEER + } + rescue OpenSSL::SSL::SSLError, EOFError, Errno::ECONNRESET => e + # Errno::ECONNRESET TruffleRuby + client_error = e + end + + assert_nil client_error + ensure + server&.stop true + end +end if ::Puma::HAS_SSL && !Puma::IS_JRUBY + +# +# Test certificate chain support, The certs and the whole certificate chain for +# this tests are located in ../examples/puma/chain_cert and were generated with +# the following commands: +# +# bundle exec ruby ../examples/puma/chain_cert/generate_chain_test.rb +# +class TestPumaSSLCertChain < Minitest::Test + include TestPuma + include TestPuma::PumaSocket + + CHAIN_DIR = File.expand_path '../examples/puma/chain_cert', __dir__ + + # OpenSSL::X509::Name#to_utf8 only available in Ruby 2.5 and later + USE_TO_UTFT8 = OpenSSL::X509::Name.instance_methods(false).include? :to_utf8 + + def cert_chain(&blk) + app = lambda { |env| [200, {}, [env['rack.url_scheme']]] } + + @log_stdout = StringIO.new + @log_stderr = StringIO.new + @log_writer = SSLLogWriterHelper.new @log_stdout, @log_stderr + @server = Puma::Server.new app, nil, {log_writer: @log_writer} + + mini_ctx = Puma::MiniSSL::Context.new + mini_ctx.key = "#{CHAIN_DIR}/cert.key" + yield mini_ctx + + @bind_port = (@server.add_ssl_listener HOST, 0, mini_ctx).addr[1] + @server.run + + socket = new_socket ctx: new_ctx + + subj_chain = socket.peer_cert_chain.map(&:subject) + subj_map = USE_TO_UTFT8 ? + subj_chain.map { |subj| subj.to_utf8[/CN=(.+ - )?([^,]+)/,2] } : + subj_chain.map { |subj| subj.to_s(OpenSSL::X509::Name::RFC2253)[/CN=(.+ - )?([^,]+)/,2] } + + @server&.stop true + + assert_equal ['test.puma.localhost', 'intermediate.puma.localhost', 'ca.puma.localhost'], subj_map + end + + def test_single_cert_file_with_ca + cert_chain { |mini_ctx| + mini_ctx.cert = "#{CHAIN_DIR}/cert.crt" + mini_ctx.ca = "#{CHAIN_DIR}/ca_chain.pem" + } + end + + def test_chain_cert_file_without_ca + cert_chain { |mini_ctx| mini_ctx.cert = "#{CHAIN_DIR}/cert_chain.pem" } + end + + def test_single_cert_string_with_ca + cert_chain { |mini_ctx| + mini_ctx.cert_pem = File.read "#{CHAIN_DIR}/cert.crt" + mini_ctx.ca = "#{CHAIN_DIR}/ca_chain.pem" + } + end + + def test_chain_cert_string_without_ca + cert_chain { |mini_ctx| mini_ctx.cert_pem = File.read "#{CHAIN_DIR}/cert_chain.pem" } + end +end if ::Puma::HAS_SSL && !::Puma::IS_JRUBY diff --git a/vendor/cache/puma-fba741b91780/test/test_pumactl.rb b/vendor/cache/puma-fba741b91780/test/test_pumactl.rb new file mode 100644 index 000000000..54ef71d48 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_pumactl.rb @@ -0,0 +1,347 @@ +require_relative "helper" +require_relative "helpers/ssl" + +require 'pathname' +require 'puma/control_cli' + +class TestPumaControlCli < Minitest::Test + include SSLHelper + + def setup + # use a pipe to get info across thread boundary + @wait, @ready = IO.pipe + end + + def wait_booted(log: nil) + @server_log = +'' + begin + sleep 0.01 until @wait.wait_readable(0.01) + line = @wait.gets + STDOUT.syswrite "#{line}\n" if log + @server_log << line + end until line&.include?('Use Ctrl-C to stop') + end + + def teardown + @wait.close + @ready.close unless @ready.closed? + end + + def with_config_file(path_to_config, port) + path = Pathname.new(path_to_config) + Dir.mktmpdir do |tmp_dir| + Dir.chdir(tmp_dir) do + FileUtils.mkdir_p(path.dirname) + File.open(path, "w") { |f| f << "port #{port}" } + yield + end + end + end + + def test_blank_command + assert_system_exit_with_cli_output [], "Available commands: #{Puma::ControlCLI::CMD_PATH_SIG_MAP.keys.join(", ")}" + end + + def test_invalid_command + assert_system_exit_with_cli_output ['an-invalid-command'], 'Invalid command: an-invalid-command' + end + + def test_config_file + control_cli = Puma::ControlCLI.new ["--config-file", "test/config/state_file_testing_config.rb", "halt"] + assert_equal "t3-pid", control_cli.instance_variable_get(:@pidfile) + ensure + File.unlink "t3-pid" if File.file? "t3-pid" + end + + def test_app_env_without_environment + env = { 'APP_ENV' => 'test' } + control_cli = Puma::ControlCLI.new ['halt'], env: env + assert_equal 'test', control_cli.instance_variable_get(:@environment) + end + + def test_rack_env_without_environment + env = { "RACK_ENV" => "test" } + control_cli = Puma::ControlCLI.new ["halt"], env: env + assert_equal "test", control_cli.instance_variable_get(:@environment) + end + + def test_app_env_precedence + env = { 'APP_ENV' => nil, 'RACK_ENV' => nil, 'RAILS_ENV' => 'production' } + control_cli = Puma::ControlCLI.new ['halt'], env: env + assert_equal 'production', control_cli.instance_variable_get(:@environment) + + env = { 'APP_ENV' => nil, 'RACK_ENV' => 'test', 'RAILS_ENV' => 'production' } + control_cli = Puma::ControlCLI.new ['halt'], env: env + assert_equal 'test', control_cli.instance_variable_get(:@environment) + + env = { 'APP_ENV' => 'development', 'RACK_ENV' => 'test', 'RAILS_ENV' => 'production' } + control_cli = Puma::ControlCLI.new ['halt'], env: env + assert_equal 'development', control_cli.instance_variable_get(:@environment) + + control_cli = Puma::ControlCLI.new ['-e', 'test', 'halt'], env: env + assert_equal 'test', control_cli.instance_variable_get(:@environment) + end + + def test_environment_without_app_env + env = { 'APP_ENV' => nil, 'RACK_ENV' => nil, 'RAILS_ENV' => nil } + + control_cli = Puma::ControlCLI.new ['halt'], env: env + assert_nil control_cli.instance_variable_get(:@environment) + + control_cli = Puma::ControlCLI.new ['-e', 'test', 'halt'], env: env + assert_equal 'test', control_cli.instance_variable_get(:@environment) + end + + def test_environment_without_rack_env + env = { "RACK_ENV" => nil, 'RAILS_ENV' => nil } + + control_cli = Puma::ControlCLI.new ["halt"], env: env + assert_nil control_cli.instance_variable_get(:@environment) + + control_cli = Puma::ControlCLI.new ["-e", "test", "halt"] + assert_equal "test", control_cli.instance_variable_get(:@environment) + end + + def test_environment_with_rack_env + env = { "RACK_ENV" => "production" } + + control_cli = Puma::ControlCLI.new ["halt"], env: env + assert_equal "production", control_cli.instance_variable_get(:@environment) + + control_cli = Puma::ControlCLI.new ["-e", "test", "halt"], env: env + assert_equal "test", control_cli.instance_variable_get(:@environment) + end + + def test_environment_specific_config_file_exist + port = UniquePort.call + puma_config_file = "config/puma.rb" + production_config_file = "config/puma/production.rb" + + env = { "RACK_ENV" => nil } + + with_config_file(puma_config_file, port) do + control_cli = Puma::ControlCLI.new ["-e", "production", "halt"], env: env + assert_equal puma_config_file, control_cli.instance_variable_get(:@config_file) + end + + with_config_file(production_config_file, port) do + control_cli = Puma::ControlCLI.new ["-e", "production", "halt"], env: env + assert_equal production_config_file, control_cli.instance_variable_get(:@config_file) + end + end + + def test_default_config_file_exist + port = UniquePort.call + puma_config_file = "config/puma.rb" + development_config_file = "config/puma/development.rb" + + env = { "RACK_ENV" => nil, 'RAILS_ENV' => nil } + + with_config_file(puma_config_file, port) do + control_cli = Puma::ControlCLI.new ["halt"], env: env + assert_equal puma_config_file, control_cli.instance_variable_get(:@config_file) + end + + with_config_file(development_config_file, port) do + control_cli = Puma::ControlCLI.new ["halt"], env: env + assert_equal development_config_file, control_cli.instance_variable_get(:@config_file) + end + end + + def test_control_no_token + opts = [ + "--config-file", "test/config/control_no_token.rb", + "start" + ] + + control_cli = Puma::ControlCLI.new opts, @ready, @ready + assert_equal 'none', control_cli.instance_variable_get(:@control_auth_token) + end + + def test_control_url_and_status + host = "127.0.0.1" + port = UniquePort.call + url = "tcp://#{host}:#{port}/" + + opts = [ + "--control-url", url, + "--control-token", "ctrl", + "--config-file", "test/config/app.rb" + ] + + control_cli = Puma::ControlCLI.new (opts + ["start"]), @ready, @ready + t = Thread.new do + control_cli.run + end + + wait_booted # read server log + + bind_port = @server_log[/Listening on http:.+:(\d+)$/, 1].to_i + s = TCPSocket.new host, bind_port + s << "GET / HTTP/1.0\r\n\r\n" + body = s.read + assert_includes body, "200 OK" + assert_includes body, "embedded app" + + assert_command_cli_output opts + ["status"], "Puma is started" + assert_command_cli_output opts + ["stop"], "Command stop sent success" + + assert_kind_of Thread, t.join, "server didn't stop" + end + + # This checks that a 'signal only' command is sent + # they are defined by the `Puma::ControlCLI::NO_REQ_COMMANDS` array + # test is skipped unless NO_REQ_COMMANDS is defined + def test_control_url_with_signal_only_cmd + skip_if :windows + skip unless defined? Puma::ControlCLI::NO_REQ_COMMANDS + host = "127.0.0.1" + port = UniquePort.call + url = "tcp://#{host}:#{port}/" + + opts = [ + "--control-url", url, + "--control-token", "ctrl", + "--config-file", "test/config/app.rb", + "--pid", "1234" + ] + cmd = Puma::ControlCLI::NO_REQ_COMMANDS.first + log = +'' + control_cli = Puma::ControlCLI.new (opts + [cmd]), @ready, @ready + + def control_cli.send_signal + message "send_signal #{@command}\n" + end + def control_cli.send_request + message "send_request #{@command}\n" + end + + control_cli.run + @ready.close + + log = @wait.read + + assert_includes log, "send_signal #{cmd}" + refute_includes log, 'send_request' + end + + def control_ssl(host) + skip_unless :ssl + ip = host&.start_with?('[') ? host[1..-2] : host + port = UniquePort.call(ip) + url = "ssl://#{host}:#{port}?#{ssl_query}" + + opts = [ + "--control-url", url, + "--control-token", "ctrl", + "--config-file", "test/config/app.rb" + ] + + control_cli = Puma::ControlCLI.new (opts + ["start"]), @ready, @ready + t = Thread.new do + control_cli.run + end + + wait_booted + + assert_command_cli_output opts + ["status"], "Puma is started" + assert_command_cli_output opts + ["stop"], "Command stop sent success" + + assert_kind_of Thread, t.join, "server didn't stop" + end + + + def test_control_ssl_ipv4 + skip_unless :ssl + control_ssl '127.0.0.1' + end + + def test_control_ssl_ipv6 + skip_unless :ssl + control_ssl '[::1]' + end + + def test_control_aunix + skip_unless :aunix + + url = "unix://@test_control_aunix.unix" + + opts = [ + "--control-url", url, + "--control-token", "ctrl", + "--config-file", "test/config/app.rb" + ] + + control_cli = Puma::ControlCLI.new (opts + ["start"]), @ready, @ready + t = Thread.new do + control_cli.run + end + + wait_booted + + assert_command_cli_output opts + ["status"], "Puma is started" + assert_command_cli_output opts + ["stop"], "Command stop sent success" + + assert_kind_of Thread, t.join, "server didn't stop" + end + + def test_control_ipv6 + port = UniquePort.call '::1' + url = "tcp://[::1]:#{port}" + + opts = [ + "--control-url", url, + "--control-token", "ctrl", + "--config-file", "test/config/app.rb" + ] + + control_cli = Puma::ControlCLI.new (opts + ["start"]), @ready, @ready + t = Thread.new do + control_cli.run + end + + wait_booted + + assert_command_cli_output opts + ["status"], "Puma is started" + assert_command_cli_output opts + ["stop"], "Command stop sent success" + + assert_kind_of Thread, t.join, "server didn't stop" + end + + private + + def assert_command_cli_output(options, expected_out) + @rd, @wr = IO.pipe + cmd = Puma::ControlCLI.new(options, @wr, @wr) + begin + cmd.run + rescue SystemExit + end + @wr.close + if String === expected_out + assert_includes @rd.read, expected_out + else + assert_match expected_out, @rd.read + end + ensure + @rd.close + end + + def assert_system_exit_with_cli_output(options, expected_out) + @rd, @wr = IO.pipe + + response = assert_raises(SystemExit) do + Puma::ControlCLI.new(options, @wr, @wr).run + end + @wr.close + + assert_equal(response.status, 1) + if String === expected_out + assert_includes @rd.read, expected_out + else + assert_match expected_out, @rd.read + end + ensure + @rd.close + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_rack_handler.rb b/vendor/cache/puma-fba741b91780/test/test_rack_handler.rb new file mode 100644 index 000000000..6d532538c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_rack_handler.rb @@ -0,0 +1,389 @@ +require_relative "helper" + +# Most tests check that ::Rack::Handler::Puma works by itself +# RackUp#test_bin runs Puma using the rackup bin file +module TestRackUp + require "rack/handler/puma" + require "puma/events" + + begin + require 'rackup/version' + rescue LoadError + end + + class TestOnBootedHandler < Minitest::Test + def app + Proc.new {|env| @input = env; [200, {}, ["hello world"]]} + end + + # `Verbose: true` is included for `NameError`, + # see https://github.com/puma/puma/pull/3118 + def test_on_booted + on_booted = false + events = Puma::Events.new + events.on_booted do + on_booted = true + end + + launcher = nil + thread = Thread.new do + Rack::Handler::Puma.run(app, events: events, Verbose: true, Silent: true, Port: 0) do |l| + launcher = l + end + end + + # Wait for launcher to boot + Timeout.timeout(10) do + sleep 0.5 until launcher + end + sleep 1.5 unless Puma::IS_MRI + + launcher.stop + thread.join + + assert_equal on_booted, true + end + end + + class TestPathHandler < Minitest::Test + def app + Proc.new {|env| @input = env; [200, {}, ["hello world"]]} + end + + def setup + @input = nil + end + + def in_handler(app, options = {}) + options[:Port] ||= 0 + options[:Silent] = true + + @launcher = nil + thread = Thread.new do + ::Rack::Handler::Puma.run(app, **options) do |s, p| + @launcher = s + end + end + + # Wait for launcher to boot + Timeout.timeout(10) do + sleep 0.5 until @launcher + end + sleep 1.5 unless Puma::IS_MRI + + yield @launcher + ensure + @launcher&.stop + thread&.join + end + + def test_handler_boots + host = '127.0.0.1' + port = UniquePort.call + opts = { Host: host, Port: port } + in_handler(app, opts) do |launcher| + hit(["http://#{host}:#{port}/test"]) + assert_equal("/test", @input["PATH_INFO"]) + end + end + end + + class TestUserSuppliedOptionsPortIsSet < Minitest::Test + def setup + @options = {} + @options[:user_supplied_options] = [:Port] + end + + def test_port_wins_over_config + user_port = 5001 + file_port = 6001 + + Dir.mktmpdir do |d| + Dir.chdir(d) do + FileUtils.mkdir("config") + File.open("config/puma.rb", "w") { |f| f << "port #{file_port}" } + + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:#{user_port}"], conf.options[:binds] + end + end + end + end + + class TestUserSuppliedOptionsHostIsSet < Minitest::Test + def setup + @options = {} + @options[:user_supplied_options] = [:Host] + end + + def test_host_uses_supplied_port_default + user_port = rand(1000..9999) + user_host = "123.456.789" + + @options[:Host] = user_host + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://#{user_host}:#{user_port}"], conf.options[:binds] + end + + def test_ipv6_host_supplied_port_default + @options[:Host] = "::1" + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://[::1]:9292"], conf.options[:binds] + end + + def test_ssl_host_supplied_port_default + @options[:Host] = "ssl://127.0.0.1" + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["ssl://127.0.0.1:9292"], conf.options[:binds] + end + + def test_relative_unix_host + @options[:Host] = "./relative.sock" + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["unix://./relative.sock"], conf.options[:binds] + end + + def test_absolute_unix_host + @options[:Host] = "/absolute.sock" + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["unix:///absolute.sock"], conf.options[:binds] + end + + def test_abstract_unix_host + @options[:Host] = "@abstract.sock" + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["unix://@abstract.sock"], conf.options[:binds] + end + end + + class TestUserSuppliedOptionsIsEmpty < Minitest::Test + def setup + @options = {} + @options[:user_supplied_options] = [] + end + + def test_config_file_wins_over_port + user_port = 5001 + file_port = 6001 + + Dir.mktmpdir do |d| + Dir.chdir(d) do + FileUtils.mkdir("config") + File.open("config/puma.rb", "w") { |f| f << "port #{file_port}" } + + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:#{file_port}"], conf.options[:binds] + end + end + end + + def test_default_host_when_using_config_file + user_port = 5001 + file_port = 6001 + + Dir.mktmpdir do |d| + Dir.chdir(d) do + FileUtils.mkdir("config") + File.open("config/puma.rb", "w") { |f| f << "port #{file_port}" } + + @options[:Host] = "localhost" + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://localhost:#{file_port}"], conf.options[:binds] + end + end + end + + def test_default_host_when_using_config_file_with_explicit_host + user_port = 5001 + file_port = 6001 + + Dir.mktmpdir do |d| + Dir.chdir(d) do + FileUtils.mkdir("config") + File.open("config/puma.rb", "w") { |f| f << "port #{file_port}, '1.2.3.4'" } + + @options[:Host] = "localhost" + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://1.2.3.4:#{file_port}"], conf.options[:binds] + end + end + end + end + + class TestUserSuppliedOptionsIsNotPresent < Minitest::Test + def setup + @options = {} + end + + def test_default_port_when_no_config_file + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:9292"], conf.options[:binds] + end + + def test_config_wins_over_default + file_port = 6001 + + Dir.mktmpdir do |d| + Dir.chdir(d) do + FileUtils.mkdir("config") + File.open("config/puma.rb", "w") { |f| f << "port #{file_port}" } + + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:#{file_port}"], conf.options[:binds] + end + end + end + + def test_user_port_wins_over_default_when_user_supplied_is_blank + user_port = 5001 + @options[:user_supplied_options] = [] + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:#{user_port}"], conf.options[:binds] + end + + def test_user_port_wins_over_default + user_port = 5001 + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:#{user_port}"], conf.options[:binds] + end + + def test_user_port_wins_over_config + user_port = 5001 + file_port = 6001 + + Dir.mktmpdir do |d| + Dir.chdir(d) do + FileUtils.mkdir("config") + File.open("config/puma.rb", "w") { |f| f << "port #{file_port}" } + + @options[:Port] = user_port + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal ["tcp://0.0.0.0:#{user_port}"], conf.options[:binds] + end + end + end + + def test_default_log_request_when_no_config_file + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal false, conf.options[:log_requests] + end + + def test_file_log_requests_wins_over_default_config + file_log_requests_config = true + + @options[:config_files] = [ + 'test/config/t1_conf.rb' + ] + + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal file_log_requests_config, conf.options[:log_requests] + end + + def test_user_log_requests_wins_over_file_config + user_log_requests_config = false + + @options[:log_requests] = user_log_requests_config + @options[:config_files] = [ + 'test/config/t1_conf.rb' + ] + + conf = ::Rack::Handler::Puma.config(->{}, @options) + conf.load + + assert_equal user_log_requests_config, conf.options[:log_requests] + end + end + + # Run using IO.popen so we don't load Rack and/or Rackup in the main process + class RackUp < Minitest::Test + def setup + FileUtils.copy_file 'test/rackup/hello.ru', 'config.ru' + end + + def teardown + FileUtils.rm 'config.ru' + end + + def test_bin + pid = nil + # JRuby & TruffleRuby take a long time using IO.popen + skip_unless :mri + io = IO.popen "rackup -p 0" + io.wait_readable 2 + sleep 0.7 + log = io.sysread 2_048 + pid = log[/PID: (\d+)/, 1] || io.pid + assert_includes log, 'Puma version' + assert_includes log, 'Use Ctrl-C to stop' + ensure + if pid + if Puma::IS_WINDOWS + `taskkill /F /PID #{pid}` + else + `kill -s KILL #{pid}` + end + end + end + + def test_rackup1 + pid = nil + # JRuby & TruffleRuby take a long time using IO.popen + skip_unless :mri + env = {'RUBYOPT' => '-rbundler/setup -rrack/version -rrack/handler -rrackup -rrack/handler/puma'} + io = IO.popen env, "ruby -e 'puts Rackup::VERSION'" + io.wait_readable 2 + pid = io.pid + log = io.sysread 2_048 + assert_start_with log, '1.0' + ensure + if pid + if Puma::IS_WINDOWS + `taskkill /F /PID #{pid}` + else + `kill -s KILL #{pid}` + end + end + end if Object.const_defined?(:Rackup) && ::Rackup::VERSION.start_with?('1.') + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_rack_server.rb b/vendor/cache/puma-fba741b91780/test/test_rack_server.rb new file mode 100644 index 000000000..b4540e122 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_rack_server.rb @@ -0,0 +1,273 @@ +# frozen_string_literal: true +require_relative "helper" +require "net/http" + +# don't load Rack, as it autoloads everything +begin + require "rack/body_proxy" + require "rack/lint" + require "rack/version" + require "rack/common_logger" +rescue LoadError # Rack 1.6 + require "rack" +end + +# Rack::Chunked is loaded by Rack v2, needs to be required by Rack 3.0, +# and is removed in Rack 3.1 +require "rack/chunked" if Rack.release.start_with? '3.0' + +require "nio" + +class TestRackServer < Minitest::Test + parallelize_me! + + HOST = '127.0.0.1' + + STR_1KB = "──#{SecureRandom.hex 507}─\n".freeze + + class ErrorChecker + def initialize(app) + @app = app + @exception = nil + end + + attr_reader :exception, :env + + def call(env) + begin + @app.call(env) + rescue Exception => e + @exception = e + [ 500, {}, ["Error detected"] ] + end + end + end + + class ServerLint < Rack::Lint + def call(env) + if Rack.release < '3' + check_env env + else + Wrapper.new(@app, env).check_environment env + end + + @app.call(env) + end + end + + def setup + @simple = lambda { |env| [200, { "x-header" => "Works" }, ["Hello"]] } + @server = Puma::Server.new @simple + @port = (@server.add_tcp_listener HOST, 0).addr[1] + @tcp = "http://#{HOST}:#{@port}" + @stopped = false + end + + def stop + @server.stop(true) + @stopped = true + end + + def teardown + @server.stop(true) unless @stopped + end + + def header_hash(socket) + t = socket.readline("\r\n\r\n").split("\r\n") + t.shift; t.map! { |line| line.split(/:\s?/) } + t.to_h + end + + def test_lint + @checker = ErrorChecker.new ServerLint.new(@simple) + @server.app = @checker + + @server.run + + hit(["#{@tcp}/test"]) + + stop + + refute @checker.exception, "Checker raised exception" + end + + def test_large_post_body + @checker = ErrorChecker.new ServerLint.new(@simple) + @server.app = @checker + + @server.run + + big = "x" * (1024 * 16) + + Net::HTTP.post_form URI.parse("#{@tcp}/test"), + { "big" => big } + + stop + + refute @checker.exception, "Checker raised exception" + end + + def test_path_info + input = nil + @server.app = lambda { |env| input = env; @simple.call(env) } + @server.run + + hit(["#{@tcp}/test/a/b/c"]) + + stop + + assert_equal "/test/a/b/c", input['PATH_INFO'] + end + + def test_after_reply + closed = false + + @server.app = lambda do |env| + env['rack.after_reply'] << lambda { closed = true } + @simple.call(env) + end + + @server.run + + hit(["#{@tcp}/test"]) + + stop + + assert_equal true, closed + end + + def test_after_reply_exception + @server.app = lambda do |env| + env['rack.after_reply'] << lambda { raise ArgumentError, "oops" } + @simple.call(env) + end + + @server.run + + socket = TCPSocket.open HOST, @port + socket.puts "GET /test HTTP/1.1\r\n" + socket.puts "Connection: Keep-Alive\r\n" + socket.puts "\r\n" + + headers = header_hash socket + + content_length = headers["Content-Length"].to_i + real_response_body = socket.read(content_length) + + assert_equal "Hello", real_response_body + + # When after_reply breaks the connection it will write the expected HTTP + # response followed by a second HTTP response: HTTP/1.1 500 + # + # This sleeps to give the server time to write the invalid/extra HTTP + # response. + # + # * If we can read from the socket, we know that extra content has been + # written to the connection and assert that it's our erroneous 500 + # response. + # * If we would block trying to read from the socket, we can assume that + # the erroneous 500 response wasn't/won't be written. + sleep 0.1 + assert_raises IO::WaitReadable do + content = socket.read_nonblock(12) + refute_includes content, "500" + end + + socket.close + + stop + end + + def test_rack_body_proxy + closed = false + body = Rack::BodyProxy.new(["Hello"]) { closed = true } + + @server.app = lambda { |env| [200, { "X-Header" => "Works" }, body] } + + @server.run + + hit(["#{@tcp}/test"]) + + stop + + assert_equal true, closed + end + + def test_rack_body_proxy_content_length + str_ary = %w[0123456789 0123456789 0123456789 0123456789] + str_ary_bytes = str_ary.to_ary.inject(0) { |sum, el| sum + el.bytesize } + + body = Rack::BodyProxy.new(str_ary) { } + + @server.app = lambda { |env| [200, { "X-Header" => "Works" }, body] } + + @server.run + + socket = TCPSocket.open HOST, @port + socket.puts "GET /test HTTP/1.1\r\n" + socket.puts "Connection: Keep-Alive\r\n" + socket.puts "\r\n" + + headers = header_hash socket + + socket.close + + stop + + if Rack.release.start_with? '1.' + assert_equal "chunked", headers["Transfer-Encoding"] + else + assert_equal str_ary_bytes, headers["Content-Length"].to_i + end + end + + def test_common_logger + log = StringIO.new + + logger = Rack::CommonLogger.new(@simple, log) + + @server.app = logger + + @server.run + + hit(["#{@tcp}/test"]) + + stop + + assert_match %r!GET /test HTTP/1\.1!, log.string + end + + def test_rack_chunked_array1 + body = [STR_1KB] + app = lambda { |env| [200, { 'content-type' => 'text/plain; charset=utf-8' }, body] } + rack_app = Rack::Chunked.new app + @server.app = rack_app + @server.run + + resp = Net::HTTP.get_response URI(@tcp) + assert_equal 'chunked', resp['transfer-encoding'] + assert_equal STR_1KB, resp.body.force_encoding(Encoding::UTF_8) + end if Rack.release < '3.1' + + def test_rack_chunked_array10 + body = Array.new 10, STR_1KB + app = lambda { |env| [200, { 'content-type' => 'text/plain; charset=utf-8' }, body] } + rack_app = Rack::Chunked.new app + @server.app = rack_app + @server.run + + resp = Net::HTTP.get_response URI(@tcp) + assert_equal 'chunked', resp['transfer-encoding'] + assert_equal STR_1KB * 10, resp.body.force_encoding(Encoding::UTF_8) + end if Rack.release < '3.1' + + def test_puma_enum + body = Array.new(10, STR_1KB).to_enum + @server.app = lambda { |env| [200, { 'content-type' => 'text/plain; charset=utf-8' }, body] } + @server.run + + resp = Net::HTTP.get_response URI(@tcp) + assert_equal 'chunked', resp['transfer-encoding'] + assert_equal STR_1KB * 10, resp.body.force_encoding(Encoding::UTF_8) + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_redirect_io.rb b/vendor/cache/puma-fba741b91780/test/test_redirect_io.rb new file mode 100644 index 000000000..4a8d9df97 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_redirect_io.rb @@ -0,0 +1,102 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/integration" + +class TestRedirectIO < TestIntegration + parallelize_me! + + FILE_STR = 'puma startup' + + def setup + skip_unless_signal_exist? :HUP + super + + # Keep the Tempfile instances alive to avoid being GC'd + @out_file = Tempfile.new('puma-out') + @err_file = Tempfile.new('puma-err') + @out_file_path = @out_file.path + @err_file_path = @err_file.path + + @cli_args = ['--redirect-stdout', @out_file_path, + '--redirect-stderr', @err_file_path, + 'test/rackup/hello.ru' + ] + + end + + def teardown + return if skipped? + super + + paths = (skipped? ? [@out_file_path, @err_file_path] : + [@out_file_path, @err_file_path, @old_out_file_path, @old_err_file_path]).compact + + File.unlink(*paths) + @out_file = nil + @err_file = nil + end + + def test_sighup_redirects_io_single + skip_if :jruby # Server isn't coming up in CI, TODO Fix + + cli_server @cli_args.join ' ' + + rotate_check_logs + end + + def test_sighup_redirects_io_cluster + skip_unless :fork + + cli_server (['-w', '1'] + @cli_args).join ' ' + + rotate_check_logs + end + + private + + def log_rotate_output_files + # rename both files to .old + @old_out_file_path = "#{@out_file_path}.old" + @old_err_file_path = "#{@err_file_path}.old" + + File.rename @out_file_path, @old_out_file_path + File.rename @err_file_path, @old_err_file_path + + File.new(@out_file_path, File::CREAT).close + File.new(@err_file_path, File::CREAT).close + end + + def rotate_check_logs + assert_file_contents @out_file_path + assert_file_contents @err_file_path + + log_rotate_output_files + + Process.kill :HUP, @pid + + assert_file_contents @out_file_path + assert_file_contents @err_file_path + end + + def assert_file_contents(path, include = FILE_STR) + retries = 0 + retries_max = 50 # 5 seconds + File.open(path) do |file| + begin + file.read_nonblock 1 + file.seek 0 + assert_includes file.read, include, + "File #{File.basename(path)} does not include #{include}" + rescue EOFError + sleep 0.1 + retries += 1 + if retries < retries_max + retry + else + flunk 'File read took too long' + end + end + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_request_invalid.rb b/vendor/cache/puma-fba741b91780/test/test_request_invalid.rb new file mode 100644 index 000000000..57eb2b39b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_request_invalid.rb @@ -0,0 +1,246 @@ +require_relative "helper" + +# These tests check for invalid request headers and metadata. +# Content-Length, Transfer-Encoding, and chunked body size +# values are checked for validity +# +# See https://datatracker.ietf.org/doc/html/rfc7230 +# +# https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2 Content-Length +# https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1 Transfer-Encoding +# https://datatracker.ietf.org/doc/html/rfc7230#section-4.1 chunked body size +# +class TestRequestInvalid < Minitest::Test + # running parallel seems to take longer... + # parallelize_me! unless JRUBY_HEAD + + GET_PREFIX = "GET / HTTP/1.1\r\nConnection: close\r\n" + CHUNKED = "1\r\nH\r\n4\r\nello\r\n5\r\nWorld\r\n0\r\n\r\n" + + def setup + @host = '127.0.0.1' + + @ios = [] + + # this app should never be called, used for debugging + app = ->(env) { + body = +'' + env.each do |k,v| + body << "#{k} = #{v}\n" + if k == 'rack.input' + body << "#{v.read}\n" + end + end + [200, {}, [body]] + } + + @log_writer = Puma::LogWriter.strings + @server = Puma::Server.new app, nil, {log_writer: @log_writer} + @port = (@server.add_tcp_listener @host, 0).addr[1] + @server.run + sleep 0.15 if Puma.jruby? + end + + def teardown + @server.stop(true) + @ios.each { |io| io.close if io && !io.closed? } + end + + def send_http_and_read(req) + send_http(req).read + end + + def send_http(req) + new_connection << req + end + + def new_connection + TCPSocket.new(@host, @port).tap {|sock| @ios << sock} + end + + def assert_status(str, status = 400) + assert str.start_with?("HTTP/1.1 #{status}"), "'#{str[/[^\r]+/]}' should be #{status}" + end + + # ──────────────────────────────────── below are invalid Content-Length + + def test_content_length_multiple + te = [ + 'Content-Length: 5', + 'Content-Length: 5' + ].join "\r\n" + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\nHello\r\n\r\n" + + assert_status data + end + + def test_content_length_bad_characters_1 + te = 'Content-Length: 5.01' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\nHello\r\n\r\n" + + assert_status data + end + + def test_content_length_bad_characters_2 + te = 'Content-Length: +5' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\nHello\r\n\r\n" + + assert_status data + end + + def test_content_length_bad_characters_3 + te = 'Content-Length: 5 test' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\nHello\r\n\r\n" + + assert_status data + end + + # ──────────────────────────────────── below are invalid Transfer-Encoding + + def test_transfer_encoding_chunked_not_last + te = [ + 'Transfer-Encoding: chunked', + 'Transfer-Encoding: gzip' + ].join "\r\n" + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{CHUNKED}" + + assert_status data + end + + def test_transfer_encoding_chunked_multiple + te = [ + 'Transfer-Encoding: chunked', + 'Transfer-Encoding: gzip', + 'Transfer-Encoding: chunked' + ].join "\r\n" + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{CHUNKED}" + + assert_status data + end + + def test_transfer_encoding_invalid_single + te = 'Transfer-Encoding: xchunked' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{CHUNKED}" + + assert_status data, 501 + end + + def test_transfer_encoding_invalid_multiple + te = [ + 'Transfer-Encoding: x_gzip', + 'Transfer-Encoding: gzip', + 'Transfer-Encoding: chunked' + ].join "\r\n" + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{CHUNKED}" + + assert_status data, 501 + end + + def test_transfer_encoding_single_not_chunked + te = 'Transfer-Encoding: gzip' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{CHUNKED}" + + assert_status data + end + + # ──────────────────────────────────── below are invalid chunked size + + def test_chunked_size_bad_characters_1 + te = 'Transfer-Encoding: chunked' + chunked ='5.01' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n1\r\nh\r\n#{chunked}\r\nHello\r\n0\r\n\r\n" + + assert_status data + end + + def test_chunked_size_bad_characters_2 + te = 'Transfer-Encoding: chunked' + chunked ='+5' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n1\r\nh\r\n#{chunked}\r\nHello\r\n0\r\n\r\n" + + assert_status data + end + + def test_chunked_size_bad_characters_3 + te = 'Transfer-Encoding: chunked' + chunked ='5 bad' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n1\r\nh\r\n#{chunked}\r\nHello\r\n0\r\n\r\n" + + assert_status data + end + + def test_chunked_size_bad_characters_4 + te = 'Transfer-Encoding: chunked' + chunked ='0xA' + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n1\r\nh\r\n#{chunked}\r\nHelloHello\r\n0\r\n\r\n" + + assert_status data + end + + # size is less than bytesize + def test_chunked_size_mismatch_1 + te = 'Transfer-Encoding: chunked' + chunked = + "5\r\nHello\r\n" \ + "4\r\nWorld\r\n" \ + "0" + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{chunked}\r\n\r\n" + + assert_status data + end + + # size is greater than bytesize + def test_chunked_size_mismatch_2 + te = 'Transfer-Encoding: chunked' + chunked = + "5\r\nHello\r\n" \ + "6\r\nWorld\r\n" \ + "0" + + data = send_http_and_read "#{GET_PREFIX}#{te}\r\n\r\n#{chunked}\r\n\r\n" + + assert_status data + end + + def test_underscore_header_1 + hdrs = [ + "X-FORWARDED-FOR: 1.1.1.1", # proper + "X-FORWARDED-FOR: 2.2.2.2", # proper + "X_FORWARDED-FOR: 3.3.3.3", # invalid, contains underscore + "Content-Length: 5", + ].join "\r\n" + + response = send_http_and_read "#{GET_PREFIX}#{hdrs}\r\n\r\nHello\r\n\r\n" + + assert_includes response, "HTTP_X_FORWARDED_FOR = 1.1.1.1, 2.2.2.2" + refute_includes response, "3.3.3.3" + end + + def test_underscore_header_2 + hdrs = [ + "X_FORWARDED-FOR: 3.3.3.3", # invalid, contains underscore + "X-FORWARDED-FOR: 2.2.2.2", # proper + "X-FORWARDED-FOR: 1.1.1.1", # proper + "Content-Length: 5", + ].join "\r\n" + + response = send_http_and_read "#{GET_PREFIX}#{hdrs}\r\n\r\nHello\r\n\r\n" + + assert_includes response, "HTTP_X_FORWARDED_FOR = 2.2.2.2, 1.1.1.1" + refute_includes response, "3.3.3.3" + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_response_header.rb b/vendor/cache/puma-fba741b91780/test/test_response_header.rb new file mode 100644 index 000000000..2f33c846c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_response_header.rb @@ -0,0 +1,160 @@ +require_relative "helper" +require "puma/events" +require "net/http" +require "nio" + +class TestResponseHeader < Minitest::Test + parallelize_me! + + # this file has limited response length, so 10kB works. + CLIENT_SYSREAD_LENGTH = 10_240 + + def setup + @host = "127.0.0.1" + + @ios = [] + + @app = ->(env) { [200, {}, [env['rack.url_scheme']]] } + + @log_writer = Puma::LogWriter.strings + @server = Puma::Server.new @app, ::Puma::Events.new, {log_writer: @log_writer, min_threads: 1} + end + + def teardown + @server.stop(true) + @ios.each { |io| io.close if io && !io.closed? } + end + + def server_run(app: @app, early_hints: false) + @server.app = app + @port = (@server.add_tcp_listener @host, 0).addr[1] + @server.instance_variable_set(:@early_hints, true) if early_hints + @server.run + end + + def send_http_and_read(req) + send_http(req).sysread CLIENT_SYSREAD_LENGTH + end + + def send_http(req) + new_connection << req + end + + def new_connection + TCPSocket.new(@host, @port).tap {|sock| @ios << sock} + end + + # The header keys must be Strings + def test_integer_key + server_run app: ->(env) { [200, { 1 => 'Boo'}, []] } + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + assert_match(/Puma caught this error/, data) + end + + # The header must respond to each + def test_nil_header + server_run app: ->(env) { [200, nil, []] } + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + assert_match(/Puma caught this error/, data) + end + + # The values of the header must be Strings + def test_integer_value + server_run app: ->(env) { [200, {'Content-Length' => 500}, []] } + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + assert_match(/HTTP\/1.0 200 OK\r\nContent-Length: 500\r\n\r\n/, data) + end + + def assert_ignore_header(name, value, opts={}) + header = { name => value } + + if opts[:early_hints] + app = ->(env) do + env['rack.early_hints'].call(header) + [200, {}, ['Hello']] + end + else + app = -> (env) { [200, header, ['hello']]} + end + + server_run(app: app, early_hints: opts[:early_hints]) + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + if opts[:early_hints] + refute_includes data, "HTTP/1.1 103 Early Hints" + end + + refute_includes data, "#{name}: #{value}" + end + + # The header must not contain a Status key. + def test_status_key + assert_ignore_header("Status", "500") + end + + # The header key can contain the word status. + def test_key_containing_status + server_run app: ->(env) { [200, {'Teapot-Status' => 'Boiling'}, []] } + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + assert_match(/HTTP\/1.0 200 OK\r\nTeapot-Status: Boiling\r\nContent-Length: 0\r\n\r\n/, data) + end + + # Special headers starting “rack.” are for communicating with the server, and must not be sent back to the client. + def test_rack_key + assert_ignore_header("rack.command_to_server_only", "work") + end + + # The header key can still start with the word rack + def test_racket_key + server_run app: ->(env) { [200, {'Racket' => 'Bouncy'}, []] } + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + assert_match(/HTTP\/1.0 200 OK\r\nRacket: Bouncy\r\nContent-Length: 0\r\n\r\n/, data) + end + + # testing header key must conform rfc token specification + # i.e. cannot contain non-printable ASCII, DQUOTE or “(),/:;<=>?@[]{}”. + # Header keys will be set through two ways: Regular and early hints. + + def test_illegal_character_in_key + assert_ignore_header("\"F\u0000o\u0025(@o}", "Boo") + end + + def test_illegal_character_in_key_when_early_hints + assert_ignore_header("\"F\u0000o\u0025(@o}", "Boo", early_hints: true) + end + + # testing header value can be separated by \n into line, and each line must not contain characters below 037 + # Header values can be set through three ways: Regular, early hints and a special case for overriding content-length + + def test_illegal_character_in_value + assert_ignore_header("X-header", "First \000Lin\037e") + end + + def test_illegal_character_in_value_when_early_hints + assert_ignore_header("X-header", "First \000Lin\037e", early_hints: true) + end + + def test_illegal_character_in_value_when_override_content_length + assert_ignore_header("Content-Length", "\037") + end + + def test_illegal_character_in_value_when_newline + server_run app: ->(env) { [200, {'X-header' => "First\000 line\nSecond Lin\037e"}, ["Hello"]] } + data = send_http_and_read "GET / HTTP/1.0\r\n\r\n" + + refute_match("X-header: First\000 line\r\nX-header: Second Lin\037e\r\n", data) + end + + def test_header_value_array + server_run app: ->(env) { [200, {'set-cookie' => ['z=1', 'a=2']}, ['Hello']] } + data = send_http_and_read "GET / HTTP/1.1\r\n\r\n" + + resp = "HTTP/1.1 200 OK\r\nset-cookie: z=1\r\nset-cookie: a=2\r\nContent-Length: 5\r\n\r\n" + assert_includes data, resp + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_skip_systemd.rb b/vendor/cache/puma-fba741b91780/test/test_skip_systemd.rb new file mode 100644 index 000000000..5fc8d71fe --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_skip_systemd.rb @@ -0,0 +1,35 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/integration" + +require "puma/plugin" + +class TestSkipSystemd < TestIntegration + + def setup + skip_unless :linux + skip_unless :unix + skip_unless_signal_exist? :TERM + skip_if :jruby + + super + end + + def teardown + super unless skipped? + end + + def test_systemd_plugin_not_loaded + cli_server "test/rackup/hello.ru", + env: { 'PUMA_SKIP_SYSTEMD' => 'true', 'NOTIFY_SOCKET' => '/tmp/doesntmatter' }, config: <<~CONFIG + app do |_| + [200, {}, [Puma::Plugins.instance_variable_get(:@plugins)['systemd'].to_s]] + end + CONFIG + + assert_empty read_body(connect) + + stop_server + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_state_file.rb b/vendor/cache/puma-fba741b91780/test/test_state_file.rb new file mode 100644 index 000000000..fb16b809a --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_state_file.rb @@ -0,0 +1,27 @@ +require_relative "helper" +require_relative "helpers/tmp_path" + +require 'puma/state_file' + +class TestStateFile < Minitest::Test + include TmpPath + + def test_load_empty_value_as_nil + state_path = tmp_path('.state') + File.write state_path, <<-STATE +--- +pid: 123456 +control_url: +control_auth_token: +running_from: "/path/to/app" + STATE + + sf = Puma::StateFile.new + sf.load(state_path) + assert_equal 123456, sf.pid + assert_equal '/path/to/app', sf.running_from + assert_nil sf.control_url + assert_nil sf.control_auth_token + + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_thread_pool.rb b/vendor/cache/puma-fba741b91780/test/test_thread_pool.rb new file mode 100644 index 000000000..a70383fd8 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_thread_pool.rb @@ -0,0 +1,363 @@ +require_relative "helper" + +require "puma/thread_pool" + +class TestThreadPool < Minitest::Test + + def teardown + @pool.shutdown(1) if defined?(@pool) + end + + def new_pool(min, max, &block) + block = proc { } unless block + options = { + min_threads: min, + max_threads: max + } + @pool = Puma::ThreadPool.new('tst', options, &block) + end + + def mutex_pool(min, max, &block) + block = proc { } unless block + options = { + min_threads: min, + max_threads: max + } + @pool = MutexPool.new('tst', options, &block) + end + + # Wraps ThreadPool work in mutex for better concurrency control. + class MutexPool < Puma::ThreadPool + # Wait until the added work is completed before returning. + # Array argument is treated as a batch of work items to be added. + # Block will run after work is added but before it is executed on a worker thread. + def <<(work, &block) + work = [work] unless work.is_a?(Array) + with_mutex do + work.each {|arg| super arg} + yield if block + @not_full.wait(@mutex) + end + end + + def signal + @not_full.signal + end + + # If +wait+ is true, wait until the trim request is completed before returning. + def trim(force=false, wait: true) + super(force) + Thread.pass until @trim_requested == 0 if wait + end + end + + def test_append_spawns + saw = [] + pool = mutex_pool(0, 1) do |work| + saw << work + end + + pool << 1 + assert_equal 1, pool.spawned + assert_equal [1], saw + end + + def test_thread_name + skip 'Thread.name not supported' unless Thread.current.respond_to?(:name) + thread_name = nil + pool = mutex_pool(0, 1) {thread_name = Thread.current.name} + pool << 1 + assert_equal('puma tst tp 001', thread_name) + end + + def test_thread_name_linux + skip 'Thread.name not supported' unless Thread.current.respond_to?(:name) + + task_dir = File.join('', 'proc', Process.pid.to_s, 'task') + skip 'This test only works under Linux and MRI Ruby with appropriate permissions' if !(File.directory?(task_dir) && File.readable?(task_dir) && Puma::IS_MRI) + + expected_thread_name = 'puma tst tp 001' + found_thread = false + pool = mutex_pool(0, 1) do + # Read every /proc//task//comm file to find the thread name + Dir.entries(task_dir).select {|tid| File.directory?(File.join(task_dir, tid))}.each do |tid| + comm_file = File.join(task_dir, tid, 'comm') + next unless File.file?(comm_file) && File.readable?(comm_file) + + if File.read(comm_file).strip == expected_thread_name + found_thread = true + break + end + end + end + pool << 1 + + assert(found_thread, "Did not find thread with name '#{expected_thread_name}'") + end + + def test_converts_pool_sizes + pool = new_pool('0', '1') + + assert_equal 0, pool.spawned + + pool << 1 + + assert_equal 1, pool.spawned + end + + def test_append_queues_on_max + pool = new_pool(0, 0) do + "Hello World!" + end + + pool << 1 + pool << 2 + pool << 3 + + assert_equal 3, pool.backlog + end + + def test_thread_start_hook + started = Queue.new + options = { + min_threads: 0, + max_threads: 1, + before_thread_start: [ + proc do + started << 1 + end + ] + } + block = proc { } + pool = MutexPool.new('tst', options, &block) + + pool << 1 + + assert_equal 1, pool.spawned + assert_equal 1, started.length + end + + def test_trim + pool = mutex_pool(0, 1) + + pool << 1 + + assert_equal 1, pool.spawned + + pool.trim + assert_equal 0, pool.spawned + end + + def test_trim_leaves_min + pool = mutex_pool(1, 2) + + pool << [1, 2] + + assert_equal 2, pool.spawned + + pool.trim + assert_equal 1, pool.spawned + + pool.trim + assert_equal 1, pool.spawned + end + + def test_force_trim_doesnt_overtrim + pool = mutex_pool(1, 2) + + pool.<< [1, 2] do + assert_equal 2, pool.spawned + pool.trim true, wait: false + pool.trim true, wait: false + end + + assert_equal 1, pool.spawned + end + + def test_trim_is_ignored_if_no_waiting_threads + pool = mutex_pool(1, 2) + + pool.<< [1, 2] do + assert_equal 2, pool.spawned + pool.trim + pool.trim + end + + assert_equal 2, pool.spawned + assert_equal 0, pool.trim_requested + end + + def test_trim_thread_exit_hook + exited = Queue.new + options = { + min_threads: 0, + max_threads: 1, + before_thread_exit: [ -> { exited << 1 } ] + } + block = proc { } + pool = MutexPool.new('tst', options, &block) + + pool << 1 + + assert_equal 1, pool.spawned + + # Thread.pass helps with intermittent tests, JRuby + pool.trim + Thread.pass + sleep 0.1 unless Puma::IS_MRI # intermittent without + assert_equal 0, pool.spawned + Thread.pass + sleep 0.1 unless Puma::IS_MRI # intermittent without + assert_equal 1, exited.length + end + + def test_autotrim + pool = mutex_pool(1, 2) + + timeout = 0 + pool.auto_trim! timeout + + pool.<< [1, 2] do + assert_equal 2, pool.spawned + end + + start = Time.now + Thread.pass until pool.spawned == 1 || Time.now - start > 1 + + assert_equal 1, pool.spawned + end + + def test_cleanliness + values = [] + n = 100 + + pool = mutex_pool(1,1) { + values.push Thread.current[:foo] + Thread.current[:foo] = :hai + } + + pool.instance_variable_set :@clean_thread_locals, true + + pool << [1] * n + + assert_equal n, values.length + + assert_equal [], values.compact + end + + def test_reap_only_dead_threads + pool = mutex_pool(2,2) do + th = Thread.current + Thread.new {th.join; pool.signal} + th.kill + end + + assert_equal 2, pool.spawned + + pool << 1 + + assert_equal 2, pool.spawned + + pool.reap + + assert_equal 1, pool.spawned + + pool << 2 + + assert_equal 1, pool.spawned + + pool.reap + + assert_equal 0, pool.spawned + end + + def test_auto_reap_dead_threads + pool = mutex_pool(2,2) do + th = Thread.current + Thread.new {th.join; pool.signal} + th.kill + end + + timeout = 0 + pool.auto_reap! timeout + + assert_equal 2, pool.spawned + + pool << 1 + pool << 2 + + start = Time.now + Thread.pass until pool.spawned == 0 || Time.now - start > 1 + + assert_equal 0, pool.spawned + end + + def test_force_shutdown_immediately + rescued = false + + pool = mutex_pool(0, 1) do + begin + pool.with_force_shutdown do + pool.signal + sleep + end + rescue Puma::ThreadPool::ForceShutdown + rescued = true + end + end + + pool << 1 + pool.shutdown(0) + + assert_equal 0, pool.spawned + assert rescued + end + + def test_waiting_on_startup + pool = new_pool(1, 2) + assert_equal 1, pool.waiting + end + + def test_shutdown_with_grace + timeout = 0.01 + grace = 0.01 + + rescued = [] + pool = mutex_pool(2, 2) do + begin + pool.with_force_shutdown do + pool.signal + sleep + end + rescue Puma::ThreadPool::ForceShutdown + rescued << Thread.current + sleep + end + end + + pool << 1 + pool << 2 + + Puma::ThreadPool.stub_const(:SHUTDOWN_GRACE_TIME, grace) do + pool.shutdown(timeout) + end + assert_equal 0, pool.spawned + assert_equal 2, rescued.length + refute rescued.compact.any?(&:alive?) + end + + def test_correct_waiting_count_for_killed_threads + pool = new_pool(1, 1) { |_| } + sleep 1 + + # simulate our waiting worker thread getting killed for whatever reason + pool.instance_eval { @workers[0].kill } + sleep 1 + pool.reap + sleep 1 + + pool << 0 + sleep 1 + assert_equal 0, pool.backlog + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_unix_socket.rb b/vendor/cache/puma-fba741b91780/test/test_unix_socket.rb new file mode 100644 index 000000000..854d44c4c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_unix_socket.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require_relative "helper" +require_relative "helpers/tmp_path" + +class TestPumaUnixSocket < Minitest::Test + include TmpPath + + App = lambda { |env| [200, {}, ["Works"]] } + + def teardown + return if skipped? + @server.stop(true) + end + + def server_unix(type) + @tmp_socket_path = type == :unix ? tmp_path('.sock') : "@TestPumaUnixSocket" + @server = Puma::Server.new App + @server.add_unix_listener @tmp_socket_path + @server.run + end + + def test_server_unix + skip_unless :unix + server_unix :unix + sock = UNIXSocket.new @tmp_socket_path + + sock << "GET / HTTP/1.0\r\nHost: blah.com\r\n\r\n" + + expected = "HTTP/1.0 200 OK\r\nContent-Length: 5\r\n\r\nWorks" + + assert_equal expected, sock.read(expected.size) + end + + def test_server_aunix + skip_unless :aunix + server_unix :aunix + sock = UNIXSocket.new @tmp_socket_path.sub(/\A@/, "\0") + + sock << "GET / HTTP/1.0\r\nHost: blah.com\r\n\r\n" + + expected = "HTTP/1.0 200 OK\r\nContent-Length: 5\r\n\r\nWorks" + + assert_equal expected, sock.read(expected.size) + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_url_map.rb b/vendor/cache/puma-fba741b91780/test/test_url_map.rb new file mode 100644 index 000000000..2411d3501 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_url_map.rb @@ -0,0 +1,24 @@ +require_relative "helper" +require_relative "helpers/integration" + +class TestURLMap < TestIntegration + + def teardown + return if skipped? + super + end + + # make sure the mapping defined in url_map_test/config.ru works + def test_basic_url_mapping + skip_if :jruby + env = { "BUNDLE_GEMFILE" => "#{__dir__}/url_map_test/Gemfile" } + Dir.chdir("#{__dir__}/url_map_test") do + cli_server set_pumactl_args, env: env + end + connection = connect("/ok") + # Puma 6.2.2 and below will time out here with Ruby v3.3 + # see https://github.com/puma/puma/pull/3165 + body = read_body(connection, 1) + assert_equal("OK", body) + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_web_concurrency_auto.rb b/vendor/cache/puma-fba741b91780/test/test_web_concurrency_auto.rb new file mode 100644 index 000000000..61c0f6303 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_web_concurrency_auto.rb @@ -0,0 +1,66 @@ +require_relative "helper" +require_relative "helpers/integration" +require_relative "helpers/test_puma/puma_socket" + +require "puma/configuration" +require 'puma/log_writer' + +class TestWebConcurrencyAuto < TestIntegration + + include TestPuma::PumaSocket + + ENV_WC_TEST = { + # -W0 removes logging of bundled gem warnings + "RUBYOPT" => "#{ENV["RUBYOPT"]} -W0", + "WEB_CONCURRENCY" => "auto" + } + + def teardown + return if skipped? + super + end + + # we use `cli_server` so no concurrent_ruby files are loaded in the test process + def test_web_concurrency_with_concurrent_ruby_available + skip_unless :fork + + app = "app { |_| [200, {}, [Concurrent.available_processor_count.to_i.to_s]] }\n" + + cli_server set_pumactl_args, env: ENV_WC_TEST, config: app + + # this is the value of `@options[:workers]` in Puma::Cluster + actual = @server_log[/\* +Workers: +(\d+)$/, 1] + + get_worker_pids 0, 2 # make sure some workers have booted + + expected = send_http_read_resp_body GET_11 + + assert_equal expected, actual + end + + # Rename the processor_counter file, then restore + def test_web_concurrency_with_concurrent_ruby_unavailable + file_path = nil + skip_unless :fork + + ccr_gem = 'concurrent-ruby' + file_require = 'concurrent/utility/processor_counter' + file_path = Dir["#{ENV['GEM_HOME']}/gems/#{ccr_gem}-*/lib/#{ccr_gem}/#{file_require}.rb"].first + + if file_path && File.exist?(file_path) + File.rename file_path, "#{file_path}_orig" + else + # cannot find concurrent-ruby file? + end + + out, err = capture_io do + assert_raises(LoadError) { Puma::Configuration.new({}, {}, ENV_WC_TEST) } + end + assert_includes err, 'Please add "concurrent-ruby" to your Gemfile' + + ensure + if file_path && File.exist?("#{file_path}_orig") + File.rename "#{file_path}_orig", file_path + end + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_web_server.rb b/vendor/cache/puma-fba741b91780/test/test_web_server.rb new file mode 100644 index 000000000..73b9a591f --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_web_server.rb @@ -0,0 +1,138 @@ +# frozen_string_literal: true +# Copyright (c) 2011 Evan Phoenix +# Copyright (c) 2005 Zed A. Shaw + +require_relative "helper" + +require "puma/server" + +class TestHandler + attr_reader :ran_test + + def call(env) + @ran_test = true + + [200, {"Content-Type" => "text/plain"}, ["hello!"]] + end +end + +class WebServerTest < Minitest::Test + parallelize_me! + + VALID_REQUEST = "GET / HTTP/1.1\r\nHost: www.zedshaw.com\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n" + + def setup + @tester = TestHandler.new + @server = Puma::Server.new @tester, nil, {log_writer: Puma::LogWriter.strings} + @port = (@server.add_tcp_listener "127.0.0.1", 0).addr[1] + @tcp = "http://127.0.0.1:#{@port}" + @server.run + end + + def teardown + @server.stop(true) + end + + def test_simple_server + hit(["#{@tcp}/test"]) + assert @tester.ran_test, "Handler didn't really run" + end + + def test_requests_count + assert_equal @server.requests_count, 0 + 3.times do + hit(["#{@tcp}/test"]) + end + assert_equal @server.requests_count, 3 + end + + def test_trickle_attack + socket = do_test(VALID_REQUEST, 3) + assert_match "hello", socket.read + socket.close + end + + def test_close_client + assert_raises IOError do + do_test_raise(VALID_REQUEST, 10, 20) + end + end + + def test_bad_client + socket = do_test("GET /test HTTP/BAD", 3) + assert_match "Bad Request", socket.read + socket.close + end + + def test_bad_path + socket = do_test("GET : HTTP/1.1\r\n\r\n", 3) + data = socket.read + assert_start_with data, "HTTP/1.1 400 Bad Request\r\nContent-Length: " + # match is for last backtrace line, may be brittle + assert_match(/\.rb:\d+:in [`'][^']+'\z/, data) + socket.close + end + + def test_header_is_too_long + long = "GET /test HTTP/1.1\r\n" + ("X-Big: stuff\r\n" * 15000) + "\r\n" + assert_raises Errno::ECONNRESET, Errno::EPIPE, Errno::ECONNABORTED, Errno::EINVAL, IOError do + do_test_raise(long, long.length/2, 10) + end + end + + def test_file_streamed_request + body = "a" * (Puma::Const::MAX_BODY * 2) + long = "GET /test HTTP/1.1\r\nContent-length: #{body.length}\r\nConnection: close\r\n\r\n" + body + socket = do_test(long, (Puma::Const::CHUNK_SIZE * 2) - 400) + assert_match "hello", socket.read + socket.close + end + + def test_supported_http_method + socket = do_test("PATCH www.zedshaw.com:443 HTTP/1.1\r\nConnection: close\r\n\r\n", 100) + response = socket.read + assert_match "hello", response + socket.close + end + + def test_nonexistent_http_method + socket = do_test("FOOBARBAZ www.zedshaw.com:443 HTTP/1.1\r\nConnection: close\r\n\r\n", 100) + response = socket.read + assert_match "Not Implemented", response + socket.close + end + + private + + def do_test(string, chunk) + # Do not use instance variables here, because it needs to be thread safe + socket = TCPSocket.new("127.0.0.1", @port); + request = StringIO.new(string) + chunks_out = 0 + + while data = request.read(chunk) + chunks_out += socket.write(data) + socket.flush + end + socket + end + + def do_test_raise(string, chunk, close_after = nil) + # Do not use instance variables here, because it needs to be thread safe + socket = TCPSocket.new("127.0.0.1", @port); + request = StringIO.new(string) + chunks_out = 0 + + while data = request.read(chunk) + chunks_out += socket.write(data) + socket.flush + socket.close if close_after && chunks_out > close_after + end + + socket.write(" ") # Some platforms only raise the exception on attempted write + socket.flush + socket + ensure + socket.close unless socket.closed? + end +end diff --git a/vendor/cache/puma-fba741b91780/test/test_worker_gem_independence.rb b/vendor/cache/puma-fba741b91780/test/test_worker_gem_independence.rb new file mode 100644 index 000000000..48030206c --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/test_worker_gem_independence.rb @@ -0,0 +1,141 @@ +require_relative "helper" +require_relative "helpers/integration" + +class TestWorkerGemIndependence < TestIntegration + + ENV_RUBYOPT = { + 'RUBYOPT' => ENV['RUBYOPT'] + } + + def setup + skip_unless :fork + super + end + + def teardown + return if skipped? + FileUtils.rm current_release_symlink, force: true + super + end + + def test_changing_nio4r_version_during_phased_restart + change_gem_version_during_phased_restart old_app_dir: 'worker_gem_independence_test/old_nio4r', + old_version: '2.7.1', + new_app_dir: 'worker_gem_independence_test/new_nio4r', + new_version: '2.7.2' + end + + def test_changing_json_version_during_phased_restart + change_gem_version_during_phased_restart old_app_dir: 'worker_gem_independence_test/old_json', + old_version: '2.7.1', + new_app_dir: 'worker_gem_independence_test/new_json', + new_version: '2.7.0' + end + + def test_changing_json_version_during_phased_restart_after_querying_stats_from_status_server + @control_tcp_port = UniquePort.call + server_opts = "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN}" + before_restart = ->() do + cli_pumactl "stats" + end + + change_gem_version_during_phased_restart server_opts: server_opts, + before_restart: before_restart, + old_app_dir: 'worker_gem_independence_test/old_json', + old_version: '2.7.1', + new_app_dir: 'worker_gem_independence_test/new_json', + new_version: '2.7.0' + end + + def test_changing_json_version_during_phased_restart_after_querying_gc_stats_from_status_server + @control_tcp_port = UniquePort.call + server_opts = "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN}" + before_restart = ->() do + cli_pumactl "gc-stats" + end + + change_gem_version_during_phased_restart server_opts: server_opts, + before_restart: before_restart, + old_app_dir: 'worker_gem_independence_test/old_json', + old_version: '2.7.1', + new_app_dir: 'worker_gem_independence_test/new_json', + new_version: '2.7.0' + end + + def test_changing_json_version_during_phased_restart_after_querying_thread_backtraces_from_status_server + @control_tcp_port = UniquePort.call + server_opts = "--control-url tcp://#{HOST}:#{@control_tcp_port} --control-token #{TOKEN}" + before_restart = ->() do + cli_pumactl "thread-backtraces" + end + + change_gem_version_during_phased_restart server_opts: server_opts, + before_restart: before_restart, + old_app_dir: 'worker_gem_independence_test/old_json', + old_version: '2.7.1', + new_app_dir: 'worker_gem_independence_test/new_json', + new_version: '2.7.0' + end + + def test_changing_json_version_during_phased_restart_after_accessing_puma_stats_directly + change_gem_version_during_phased_restart old_app_dir: 'worker_gem_independence_test/old_json_with_puma_stats_after_fork', + old_version: '2.7.1', + new_app_dir: 'worker_gem_independence_test/new_json_with_puma_stats_after_fork', + new_version: '2.7.0' + end + + private + + def change_gem_version_during_phased_restart(old_app_dir:, + new_app_dir:, + old_version:, + new_version:, + server_opts: '', + before_restart: nil) + skip_unless_signal_exist? :USR1 + + set_release_symlink File.expand_path(old_app_dir, __dir__) + + Dir.chdir(current_release_symlink) do + with_unbundled_env do + silent_and_checked_system_command("bundle config --local path vendor/bundle") + silent_and_checked_system_command("bundle install") + cli_server "--prune-bundler -w 1 #{server_opts}", env: ENV_RUBYOPT + end + end + + connection = connect + initial_reply = read_body(connection) + assert_equal old_version, initial_reply + + before_restart&.call + + set_release_symlink File.expand_path(new_app_dir, __dir__) + Dir.chdir(current_release_symlink) do + with_unbundled_env do + silent_and_checked_system_command("bundle config --local path vendor/bundle") + silent_and_checked_system_command("bundle install") + end + end + start_phased_restart + + connection = connect + new_reply = read_body(connection) + assert_equal new_version, new_reply + end + + def current_release_symlink + File.expand_path "worker_gem_independence_test/current", __dir__ + end + + def set_release_symlink(target_dir) + FileUtils.rm current_release_symlink, force: true + FileUtils.symlink target_dir, current_release_symlink, force: true + end + + def start_phased_restart + Process.kill :USR1, @pid + + true while @server.gets !~ /booted in [.0-9]+s, phase: 1/ + end +end diff --git a/vendor/cache/puma-fba741b91780/test/url_map_test/Gemfile b/vendor/cache/puma-fba741b91780/test/url_map_test/Gemfile new file mode 100644 index 000000000..e0695ee62 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/url_map_test/Gemfile @@ -0,0 +1 @@ +gem 'puma', path: '../..' diff --git a/vendor/cache/puma-fba741b91780/test/url_map_test/config.ru b/vendor/cache/puma-fba741b91780/test/url_map_test/config.ru new file mode 100644 index 000000000..0bd0a16df --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/url_map_test/config.ru @@ -0,0 +1,9 @@ +map "/ok" do + run ->(env) { + if Object.const_defined?(:Rack) && ::Rack.const_defined?(:URLMap) + [200, {}, ["::Rack::URLMap is loaded"]] + else + [200, {}, ["OK"]] + end + } +end diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/Gemfile b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/Gemfile new file mode 100644 index 000000000..ff11b1e09 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/Gemfile @@ -0,0 +1,8 @@ +source "https://rubygems.org" + +gem 'puma', path: '../../..' +gem 'json', '= 2.7.0' + +if RUBY_VERSION > '3.4' + gem 'ostruct', '>= 0.6.0' +end diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/config.ru b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/config.ru new file mode 100644 index 000000000..a2d096f6b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/config.ru @@ -0,0 +1,2 @@ +require 'json' +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [JSON::VERSION]] } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/config/puma.rb b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/config/puma.rb new file mode 100644 index 000000000..ab019ff13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json/config/puma.rb @@ -0,0 +1 @@ +directory File.expand_path("../../current", __dir__) diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/Gemfile b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/Gemfile new file mode 100644 index 000000000..ff11b1e09 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/Gemfile @@ -0,0 +1,8 @@ +source "https://rubygems.org" + +gem 'puma', path: '../../..' +gem 'json', '= 2.7.0' + +if RUBY_VERSION > '3.4' + gem 'ostruct', '>= 0.6.0' +end diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/config.ru b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/config.ru new file mode 100644 index 000000000..a2d096f6b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/config.ru @@ -0,0 +1,2 @@ +require 'json' +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [JSON::VERSION]] } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/config/puma.rb b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/config/puma.rb new file mode 100644 index 000000000..98bf44583 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_json_with_puma_stats_after_fork/config/puma.rb @@ -0,0 +1,2 @@ +directory File.expand_path("../../current", __dir__) +after_worker_fork { Puma.stats } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/Gemfile b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/Gemfile new file mode 100644 index 000000000..9490eb0ba --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/Gemfile @@ -0,0 +1,4 @@ +source "https://rubygems.org" + +gem 'puma', path: '../../..' +gem 'nio4r', '= 2.7.2' diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/config.ru b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/config.ru new file mode 100644 index 000000000..773d1eed0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/config.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [NIO::VERSION]] } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/config/puma.rb b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/config/puma.rb new file mode 100644 index 000000000..ab019ff13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/new_nio4r/config/puma.rb @@ -0,0 +1 @@ +directory File.expand_path("../../current", __dir__) diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/Gemfile b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/Gemfile new file mode 100644 index 000000000..e9e40faa3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/Gemfile @@ -0,0 +1,8 @@ +source "https://rubygems.org" + +gem 'puma', path: '../../..' +gem 'json', '= 2.7.1' + +if RUBY_VERSION > '3.4' + gem 'ostruct', '>= 0.6.0' +end diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/config.ru b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/config.ru new file mode 100644 index 000000000..a2d096f6b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/config.ru @@ -0,0 +1,2 @@ +require 'json' +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [JSON::VERSION]] } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/config/puma.rb b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/config/puma.rb new file mode 100644 index 000000000..ab019ff13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json/config/puma.rb @@ -0,0 +1 @@ +directory File.expand_path("../../current", __dir__) diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/Gemfile b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/Gemfile new file mode 100644 index 000000000..e9e40faa3 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/Gemfile @@ -0,0 +1,8 @@ +source "https://rubygems.org" + +gem 'puma', path: '../../..' +gem 'json', '= 2.7.1' + +if RUBY_VERSION > '3.4' + gem 'ostruct', '>= 0.6.0' +end diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/config.ru b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/config.ru new file mode 100644 index 000000000..a2d096f6b --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/config.ru @@ -0,0 +1,2 @@ +require 'json' +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [JSON::VERSION]] } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/config/puma.rb b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/config/puma.rb new file mode 100644 index 000000000..98bf44583 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_json_with_puma_stats_after_fork/config/puma.rb @@ -0,0 +1,2 @@ +directory File.expand_path("../../current", __dir__) +after_worker_fork { Puma.stats } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/Gemfile b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/Gemfile new file mode 100644 index 000000000..3bdc81f66 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/Gemfile @@ -0,0 +1,4 @@ +source "https://rubygems.org" + +gem 'puma', path: '../../..' +gem 'nio4r', '= 2.7.1' diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/config.ru b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/config.ru new file mode 100644 index 000000000..773d1eed0 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/config.ru @@ -0,0 +1 @@ +run lambda { |env| [200, {'Content-Type'=>'text/plain'}, [NIO::VERSION]] } diff --git a/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/config/puma.rb b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/config/puma.rb new file mode 100644 index 000000000..ab019ff13 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/test/worker_gem_independence_test/old_nio4r/config/puma.rb @@ -0,0 +1 @@ +directory File.expand_path("../../current", __dir__) diff --git a/vendor/cache/puma-fba741b91780/tools/Dockerfile b/vendor/cache/puma-fba741b91780/tools/Dockerfile new file mode 100644 index 000000000..88cd8dfbf --- /dev/null +++ b/vendor/cache/puma-fba741b91780/tools/Dockerfile @@ -0,0 +1,16 @@ +# Use this Dockerfile to create minimal reproductions of issues + +FROM ruby:3.2 + +# throw errors if Gemfile has been modified since Gemfile.lock +RUN bundle config --global frozen 1 + +WORKDIR /usr/src/app + +COPY . . + +RUN bundle install +RUN bundle exec rake compile + +EXPOSE 9292 +CMD bundle exec bin/puma test/rackup/hello.ru diff --git a/vendor/cache/puma-fba741b91780/tools/trickletest.rb b/vendor/cache/puma-fba741b91780/tools/trickletest.rb new file mode 100644 index 000000000..fa5438062 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/tools/trickletest.rb @@ -0,0 +1,44 @@ +require 'socket' +require 'stringio' + +def do_test(st, chunk) + s = TCPSocket.new('127.0.0.1',ARGV[0].to_i); + req = StringIO.new(st) + nout = 0 + randstop = rand(st.length / 10) + STDERR.puts "stopping after: #{randstop}" + + begin + while data = req.read(chunk) + nout += s.write(data) + s.flush + sleep 0.1 + if nout > randstop + STDERR.puts "BANG! after #{nout} bytes." + break + end + end + rescue Object => e + STDERR.puts "ERROR: #{e}" + ensure + s.close + end +end + +content = "-" * (1024 * 240) +st = "GET / HTTP/1.1\r\nHost: www.zedshaw.com\r\nContent-Type: text/plain\r\nContent-Length: #{content.length}\r\n\r\n#{content}" + +puts "length: #{content.length}" + +threads = [] +ARGV[1].to_i.times do + t = Thread.new do + size = 100 + puts ">>>> #{size} sized chunks" + do_test(st, size) + end + + threads << t +end + +threads.each {|t| t.join} diff --git a/vendor/cache/puma-fba741b91780/win_gem_test/Rakefile_wintest b/vendor/cache/puma-fba741b91780/win_gem_test/Rakefile_wintest new file mode 100644 index 000000000..42b3038bf --- /dev/null +++ b/vendor/cache/puma-fba741b91780/win_gem_test/Rakefile_wintest @@ -0,0 +1,11 @@ +# rake -f Rakefile_wintest -N -R norakelib + +require "rake/testtask" + +Rake::TestTask.new(:win_test) do |t| + t.libs << "test" + t.warning = false + t.options = '--verbose' +end + +task :default => [:win_test] diff --git a/vendor/cache/puma-fba741b91780/win_gem_test/package_gem.rb b/vendor/cache/puma-fba741b91780/win_gem_test/package_gem.rb new file mode 100644 index 000000000..fc3b3c704 --- /dev/null +++ b/vendor/cache/puma-fba741b91780/win_gem_test/package_gem.rb @@ -0,0 +1,21 @@ +# frozen_string_literal: true + +require 'rubygems' +require 'rubygems/package' + +spec = Gem::Specification.load("./puma.gemspec") + +spec.files.concat ['Rakefile_wintest', 'lib/puma/puma_http11.rb'] +spec.files.concat Dir['lib/**/*.so'] +spec.test_files = Dir['{examples,test}/**/*.*'] + +# below lines are required and not gem specific +spec.platform = ARGV[0] +spec.required_ruby_version = [">= #{ARGV[1]}", "< #{ARGV[2]}"] +spec.extensions = [] +if spec.respond_to?(:metadata=) + spec.metadata.delete("msys2_mingw_dependencies") + spec.metadata['commit'] = ENV['commit_info'] +end + +Gem::Package.build(spec) diff --git a/vendor/cache/puma-fba741b91780/win_gem_test/puma.ps1 b/vendor/cache/puma-fba741b91780/win_gem_test/puma.ps1 new file mode 100644 index 000000000..01dea20bc --- /dev/null +++ b/vendor/cache/puma-fba741b91780/win_gem_test/puma.ps1 @@ -0,0 +1,67 @@ +# PowerShell script for building & testing SQLite3-Ruby fat binary gem +# Code by MSP-Greg, see https://github.com/MSP-Greg/av-gem-build-test + +# load utility functions, pass 64 or 32 +. $PSScriptRoot\shared\appveyor_setup.ps1 $args[0] +if ($LastExitCode) { exit } + +# above is required code +#———————————————————————————————————————————————————————————————— above for all repos + +Make-Const gem_name 'puma' +Make-Const repo_name 'puma' +Make-Const url_repo 'https://github.com/puma/puma.git' + +#———————————————————————————————————————————————————————————————— lowest ruby version +Make-Const ruby_vers_low 22 +# null = don't compile; false = compile, ignore test (allow failure); +# true = compile & test +Make-Const trunk $false ; Make-Const trunk_x64 $false +Make-Const trunk_JIT $null ; Make-Const trunk_x64_JIT $null + +#———————————————————————————————————————————————————————————————— make info +Make-Const dest_so 'lib\puma' +Make-Const exts @( + @{ 'conf' = 'ext/puma_http11/extconf.rb' ; 'so' = 'puma_http11' } +) +Make-Const write_so_require $true + +#———————————————————————————————————————————————————————————————— Pre-Compile +# runs before compiling starts on every ruby version +function Pre-Compile { + # load the correct OpenSSL version in the build system + Check-OpenSSL + Write-Host Compiling With $env:SSL_VERS +} + +#———————————————————————————————————————————————————————————————— Pre-Gem-Install +function Pre-Gem-Install { + if ($ruby -lt '23') { + gem install -N --no-user-install nio4r:2.3.1 + } else { + gem install -N --no-user-install nio4r + } +} + +#———————————————————————————————————————————————————————————————— Run-Tests +function Run-Tests { + # call with comma separated list of gems to install or update + Update-Gems minitest, minitest-retry, minitest-proveit, rack, rake + $env:CI = 1 + rake -f Rakefile_wintest -N -R norakelib | Set-Content -Path $log_name -PassThru -Encoding UTF8 + # add info after test results + $(ruby -ropenssl -e "STDOUT.puts $/ + OpenSSL::OPENSSL_LIBRARY_VERSION") | + Add-Content -Path $log_name -PassThru -Encoding UTF8 + minitest # collects test results +} + +#———————————————————————————————————————————————————————————————— below for all repos +# below is required code +Make-Const dir_gem $(Convert-Path $PSScriptRoot\..) +Make-Const dir_ps $PSScriptRoot + +Push-Location $PSScriptRoot +.\shared\make.ps1 +.\shared\test.ps1 +Pop-Location +exit $ttl_errors_fails + $exit_code diff --git a/vendor/cache/rails-html-sanitizer-1.6.0.gem b/vendor/cache/rails-html-sanitizer-1.6.0.gem new file mode 100644 index 000000000..08054b3e9 Binary files /dev/null and b/vendor/cache/rails-html-sanitizer-1.6.0.gem differ diff --git a/vendor/cache/rails-html-sanitizer-1.6.2.gem b/vendor/cache/rails-html-sanitizer-1.6.2.gem deleted file mode 100644 index 4e9da15ef..000000000 Binary files a/vendor/cache/rails-html-sanitizer-1.6.2.gem and /dev/null differ diff --git a/vendor/cache/rb-inotify-0.10.1.gem b/vendor/cache/rb-inotify-0.10.1.gem new file mode 100644 index 000000000..276590b7e Binary files /dev/null and b/vendor/cache/rb-inotify-0.10.1.gem differ diff --git a/vendor/cache/rb-inotify-0.11.1.gem b/vendor/cache/rb-inotify-0.11.1.gem deleted file mode 100644 index e52c22c12..000000000 Binary files a/vendor/cache/rb-inotify-0.11.1.gem and /dev/null differ diff --git a/vendor/cache/regexp_parser-2.8.1.gem b/vendor/cache/regexp_parser-2.8.1.gem new file mode 100644 index 000000000..58c023a82 Binary files /dev/null and b/vendor/cache/regexp_parser-2.8.1.gem differ diff --git a/vendor/cache/regexp_parser-2.9.3.gem b/vendor/cache/regexp_parser-2.9.3.gem deleted file mode 100644 index 6eb65e2e1..000000000 Binary files a/vendor/cache/regexp_parser-2.9.3.gem and /dev/null differ diff --git a/vendor/cache/rexml-3.3.1.gem b/vendor/cache/rexml-3.3.1.gem new file mode 100644 index 000000000..7b5956e18 Binary files /dev/null and b/vendor/cache/rexml-3.3.1.gem differ diff --git a/vendor/cache/rexml-3.4.0.gem b/vendor/cache/rexml-3.4.0.gem deleted file mode 100644 index 426d6392b..000000000 Binary files a/vendor/cache/rexml-3.4.0.gem and /dev/null differ diff --git a/vendor/cache/rubocop-1.42.0.gem b/vendor/cache/rubocop-1.42.0.gem new file mode 100644 index 000000000..7726cd23c Binary files /dev/null and b/vendor/cache/rubocop-1.42.0.gem differ diff --git a/vendor/cache/rubocop-1.69.2.gem b/vendor/cache/rubocop-1.69.2.gem deleted file mode 100644 index a5582bf20..000000000 Binary files a/vendor/cache/rubocop-1.69.2.gem and /dev/null differ diff --git a/vendor/cache/rubocop-ast-1.24.1.gem b/vendor/cache/rubocop-ast-1.24.1.gem new file mode 100644 index 000000000..a6fe2b4d3 Binary files /dev/null and b/vendor/cache/rubocop-ast-1.24.1.gem differ diff --git a/vendor/cache/rubocop-ast-1.37.0.gem b/vendor/cache/rubocop-ast-1.37.0.gem deleted file mode 100644 index ddfbe00a7..000000000 Binary files a/vendor/cache/rubocop-ast-1.37.0.gem and /dev/null differ diff --git a/vendor/cache/rubocop-capybara-2.21.0.gem b/vendor/cache/rubocop-capybara-2.21.0.gem deleted file mode 100644 index 8fd87ef72..000000000 Binary files a/vendor/cache/rubocop-capybara-2.21.0.gem and /dev/null differ diff --git a/vendor/cache/rubocop-factory_bot-2.26.1.gem b/vendor/cache/rubocop-factory_bot-2.26.1.gem deleted file mode 100644 index 63cd2ac7d..000000000 Binary files a/vendor/cache/rubocop-factory_bot-2.26.1.gem and /dev/null differ diff --git a/vendor/cache/rubocop-performance-1.15.2.gem b/vendor/cache/rubocop-performance-1.15.2.gem new file mode 100644 index 000000000..8736bf195 Binary files /dev/null and b/vendor/cache/rubocop-performance-1.15.2.gem differ diff --git a/vendor/cache/rubocop-performance-1.23.0.gem b/vendor/cache/rubocop-performance-1.23.0.gem deleted file mode 100644 index 68fa27f50..000000000 Binary files a/vendor/cache/rubocop-performance-1.23.0.gem and /dev/null differ diff --git a/vendor/cache/rubocop-rails-2.17.4.gem b/vendor/cache/rubocop-rails-2.17.4.gem new file mode 100644 index 000000000..9b465fe83 Binary files /dev/null and b/vendor/cache/rubocop-rails-2.17.4.gem differ diff --git a/vendor/cache/rubocop-rails-2.27.0.gem b/vendor/cache/rubocop-rails-2.27.0.gem deleted file mode 100644 index 6fff0e142..000000000 Binary files a/vendor/cache/rubocop-rails-2.27.0.gem and /dev/null differ diff --git a/vendor/cache/rubocop-rspec-2.16.0.gem b/vendor/cache/rubocop-rspec-2.16.0.gem new file mode 100644 index 000000000..c989a4fe0 Binary files /dev/null and b/vendor/cache/rubocop-rspec-2.16.0.gem differ diff --git a/vendor/cache/rubocop-rspec-2.31.0.gem b/vendor/cache/rubocop-rspec-2.31.0.gem deleted file mode 100644 index cef0728cf..000000000 Binary files a/vendor/cache/rubocop-rspec-2.31.0.gem and /dev/null differ diff --git a/vendor/cache/rubocop-rspec_rails-2.29.1.gem b/vendor/cache/rubocop-rspec_rails-2.29.1.gem deleted file mode 100644 index 351b67c3f..000000000 Binary files a/vendor/cache/rubocop-rspec_rails-2.29.1.gem and /dev/null differ diff --git a/vendor/cache/ruby-progressbar-1.11.0.gem b/vendor/cache/ruby-progressbar-1.11.0.gem new file mode 100644 index 000000000..a9d84e50b Binary files /dev/null and b/vendor/cache/ruby-progressbar-1.11.0.gem differ diff --git a/vendor/cache/ruby-progressbar-1.13.0.gem b/vendor/cache/ruby-progressbar-1.13.0.gem deleted file mode 100644 index c50b94b26..000000000 Binary files a/vendor/cache/ruby-progressbar-1.13.0.gem and /dev/null differ diff --git a/vendor/cache/scss_lint-0.59.0.gem b/vendor/cache/scss_lint-0.59.0.gem new file mode 100644 index 000000000..b46560d13 Binary files /dev/null and b/vendor/cache/scss_lint-0.59.0.gem differ diff --git a/vendor/cache/scss_lint-0.60.0.gem b/vendor/cache/scss_lint-0.60.0.gem deleted file mode 100644 index 48b12eda2..000000000 Binary files a/vendor/cache/scss_lint-0.60.0.gem and /dev/null differ diff --git a/vendor/cache/securerandom-0.4.0.gem b/vendor/cache/securerandom-0.4.0.gem new file mode 100644 index 000000000..9e1f4656f Binary files /dev/null and b/vendor/cache/securerandom-0.4.0.gem differ diff --git a/vendor/cache/securerandom-0.4.1.gem b/vendor/cache/securerandom-0.4.1.gem deleted file mode 100644 index 05072caba..000000000 Binary files a/vendor/cache/securerandom-0.4.1.gem and /dev/null differ diff --git a/vendor/cache/strscan-3.0.7.gem b/vendor/cache/strscan-3.0.7.gem new file mode 100644 index 000000000..2c084b406 Binary files /dev/null and b/vendor/cache/strscan-3.0.7.gem differ diff --git a/vendor/cache/strscan-3.0.9.gem b/vendor/cache/strscan-3.0.9.gem deleted file mode 100644 index 1a5f15422..000000000 Binary files a/vendor/cache/strscan-3.0.9.gem and /dev/null differ diff --git a/vendor/cache/unicode-display_width-2.4.2.gem b/vendor/cache/unicode-display_width-2.4.2.gem new file mode 100644 index 000000000..4402c5dd9 Binary files /dev/null and b/vendor/cache/unicode-display_width-2.4.2.gem differ diff --git a/vendor/cache/unicode-display_width-3.1.2.gem b/vendor/cache/unicode-display_width-3.1.2.gem deleted file mode 100644 index 5a6ee68bf..000000000 Binary files a/vendor/cache/unicode-display_width-3.1.2.gem and /dev/null differ diff --git a/vendor/cache/unicode-emoji-4.0.4.gem b/vendor/cache/unicode-emoji-4.0.4.gem deleted file mode 100644 index bae638f04..000000000 Binary files a/vendor/cache/unicode-emoji-4.0.4.gem and /dev/null differ