diff --git a/404.html b/404.html index dd5423b3e..b8e40a22e 100644 --- a/404.html +++ b/404.html @@ -10,13 +10,13 @@ - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/3df306b2.52523ef3.js b/assets/js/3df306b2.52523ef3.js deleted file mode 100644 index 53141bfe2..000000000 --- a/assets/js/3df306b2.52523ef3.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkuc_2_docs=self.webpackChunkuc_2_docs||[]).push([[4485],{3905:(e,t,n)=>{n.d(t,{Zo:()=>u,kt:()=>h});var r=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),s=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},u=function(e){var t=s(e.components);return r.createElement(c.Provider,{value:t},e.children)},d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},p=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,u=l(e,["components","mdxType","originalType","parentName"]),p=s(n),h=a,m=p["".concat(c,".").concat(h)]||p[h]||d[h]||o;return n?r.createElement(m,i(i({ref:t},u),{},{components:n})):r.createElement(m,i({ref:t},u))}));function h(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=p;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>s});var r=n(87462),a=(n(67294),n(3905));const o={id:"uc2e2v2",title:"UC2 Standalone Boar V2"},i=void 0,l={unversionedId:"Electronics/uc2e2v2",id:"Electronics/uc2e2v2",title:"UC2 Standalone Boar V2",description:"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)",source:"@site/docs/03_Electronics/02_Board_Schematics.md",sourceDirName:"03_Electronics",slug:"/Electronics/uc2e2v2",permalink:"/docs/Electronics/uc2e2v2",draft:!1,tags:[],version:"current",sidebarPosition:2,frontMatter:{id:"uc2e2v2",title:"UC2 Standalone Boar V2"},sidebar:"tutorialSidebar",previous:{title:"Introduction",permalink:"/docs/Electronics/uc2e1"},next:{title:"UC2 Standalone Boar V3",permalink:"/docs/Electronics/uc2e2v3"}},c={},s=[{value:"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)",id:"-board-layout-and-schematics-uc2-standalon-v2",level:2},{value:"\u26a1 Wiring",id:"-wiring",level:2},{value:"\ud83d\udca1 UC2 LED Ring",id:"-uc2-led-ring",level:2}],u={toc:s};function d(e){let{components:t,...o}=e;return(0,a.kt)("wrapper",(0,r.Z)({},u,o,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h2",{id:"-board-layout-and-schematics-uc2-standalon-v2"},"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)"),(0,a.kt)("p",null,"The board comes with 4 motor controllers (e.g. A4988 Bipolar Stepper controller), the ESP32 Dev Kit, a bunch of pins for in/outgoing connections, 3 darlington transistors (BD809) and the power distribution. It is inspired by the CNC shield and can"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"run up to 4 steppers"),(0,a.kt)("li",{parentName:"ul"},"run multiple high power LEDs"),(0,a.kt)("li",{parentName:"ul"},"be controlled via PS3/PS4 Controllers"),(0,a.kt)("li",{parentName:"ul"},"drive Adafruits Neopixels"),(0,a.kt)("li",{parentName:"ul"},"trigger a Camera"),(0,a.kt)("li",{parentName:"ul"},"provide scanning patterns for Galvos"),(0,a.kt)("li",{parentName:"ul"},"control/readout external devices using I2C")),(0,a.kt)("p",null,"We use the ESP32 in order to ensure connectivity via"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"Wifi"),(0,a.kt)("li",{parentName:"ul"},"Bluetooth"),(0,a.kt)("li",{parentName:"ul"},"USB Serial (mostly used)")),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(75584).Z,width:"3208",height:"2442"})),(0,a.kt)("h2",{id:"-wiring"},"\u26a1 Wiring"),(0,a.kt)("p",null,"Duration:5"),(0,a.kt)("p",null,"All connectors are coming with 2.54mm spaced male pins that can connect to JST connectors (e.g. Motors and LED Array). In case the casing is blocking it mechanically, you can remove the case. The WS2812B RGB Leds that are used for the LED Matrix should survive wrong polarity, but we recommend to not stress it. ",(0,a.kt)("strong",{parentName:"p"},"WARNING:")," It is also not recommended to drive the motor drivers without any motors attached. Make sure you have an electric load attached to the board when you power it up, otherwise the motor drivers may get damaged."),(0,a.kt)("div",{class:"alert-danger"},"The polarity of the LED Array matters! The UC2 LED ring module comes with a JST connector where the 3 pins represent (5V, Data, GND) **RED** / Black / Black. Make sure the RED cable is connected to the 5V on the PCB."),(0,a.kt)("p",null,"Below you can find a rendering of the PCB that is sitting inside the 3D printed Box with all its connectors."),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(58583).Z,width:"2084",height:"1528"})),(0,a.kt)("p",null,"For those who are keen to understand the wiring, please click on the schematics below. The sources of the board will be published soon."),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(97674).Z,width:"2706",height:"1408"})),(0,a.kt)("h2",{id:"-uc2-led-ring"},"\ud83d\udca1 UC2 LED Ring"),(0,a.kt)("p",null,"Duration:2"),(0,a.kt)("p",null,"As for the illumination, you can use a large variation of different light sources to get your sample in the right light. But sometimes you want to have the ability to change the contrast by illuminating from oblique, only in the center, using a darkfield ring or change degree of coherence. An off-the-shelf LED Matrix is very sufficient in most cases, but it lacks the rotational symmetry. For this we have the UC2 LED Ring Module that fits directly in the Cube, has concentric rings and still offers all the degrees of freedom that the Adafruit Neopixel gives you (e.g. RGB, white, patterns..)."),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(64052).Z,width:"3648",height:"2736"})),(0,a.kt)("p",null,"The 3 wires that leave the satellite board deliver 5V, Data and GND and directly connect to the UC2e via the LED pin:"),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(84796).Z,width:"3648",height:"2736"})))}d.isMDXComponent=!0},64052:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board2-a358bc9a40f95dbbfb2acdc376fdd375.jpg"},84796:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board3-bb3be5adae04ce1b932e673cf2cb4763.jpg"},58583:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board_Kicad_0-79ec90191925a7fa808ada8fa2e45b5c.png"},75584:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board_Kicad_6-4c23e9c6f1bb4d5bec2cdbe3ec8d9559.png"},97674:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2eSchematics-8a156644b821b974b82fb61326e993c9.png"}}]); \ No newline at end of file diff --git a/assets/js/3df306b2.c95554d1.js b/assets/js/3df306b2.c95554d1.js new file mode 100644 index 000000000..d4d50d9bf --- /dev/null +++ b/assets/js/3df306b2.c95554d1.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkuc_2_docs=self.webpackChunkuc_2_docs||[]).push([[4485],{3905:(e,t,n)=>{n.d(t,{Zo:()=>d,kt:()=>h});var r=n(67294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var c=r.createContext({}),s=function(e){var t=r.useContext(c),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},d=function(e){var t=s(e.components);return r.createElement(c.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},p=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,d=l(e,["components","mdxType","originalType","parentName"]),p=s(n),h=a,m=p["".concat(c,".").concat(h)]||p[h]||u[h]||o;return n?r.createElement(m,i(i({ref:t},d),{},{components:n})):r.createElement(m,i({ref:t},d))}));function h(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=p;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:a,i[1]=l;for(var s=2;s{n.r(t),n.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>u,frontMatter:()=>o,metadata:()=>l,toc:()=>s});var r=n(87462),a=(n(67294),n(3905));const o={id:"uc2e2v2",title:"UC2 Standalone Board V2"},i=void 0,l={unversionedId:"Electronics/uc2e2v2",id:"Electronics/uc2e2v2",title:"UC2 Standalone Board V2",description:"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)",source:"@site/docs/03_Electronics/02_Board_Schematics.md",sourceDirName:"03_Electronics",slug:"/Electronics/uc2e2v2",permalink:"/docs/Electronics/uc2e2v2",draft:!1,tags:[],version:"current",sidebarPosition:2,frontMatter:{id:"uc2e2v2",title:"UC2 Standalone Board V2"},sidebar:"tutorialSidebar",previous:{title:"Introduction",permalink:"/docs/Electronics/uc2e1"},next:{title:"UC2 Standalone Board V3",permalink:"/docs/Electronics/uc2e2v3"}},c={},s=[{value:"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)",id:"-board-layout-and-schematics-uc2-standalon-v2",level:2},{value:"\u26a1 Wiring",id:"-wiring",level:2},{value:"\ud83d\udca1 UC2 LED Ring",id:"-uc2-led-ring",level:2}],d={toc:s};function u(e){let{components:t,...o}=e;return(0,a.kt)("wrapper",(0,r.Z)({},d,o,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h2",{id:"-board-layout-and-schematics-uc2-standalon-v2"},"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)"),(0,a.kt)("p",null,"The board comes with 4 motor controllers (e.g. A4988 Bipolar Stepper controller), the ESP32 Dev Kit, a bunch of pins for in/outgoing connections, 3 darlington transistors (BD809) and the power distribution. It is inspired by the CNC shield and can"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"run up to 4 steppers"),(0,a.kt)("li",{parentName:"ul"},"run multiple high power LEDs"),(0,a.kt)("li",{parentName:"ul"},"be controlled via PS3/PS4 Controllers"),(0,a.kt)("li",{parentName:"ul"},"drive Adafruits Neopixels"),(0,a.kt)("li",{parentName:"ul"},"trigger a Camera"),(0,a.kt)("li",{parentName:"ul"},"provide scanning patterns for Galvos"),(0,a.kt)("li",{parentName:"ul"},"control/readout external devices using I2C")),(0,a.kt)("p",null,"We use the ESP32 in order to ensure connectivity via"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"Wifi"),(0,a.kt)("li",{parentName:"ul"},"Bluetooth"),(0,a.kt)("li",{parentName:"ul"},"USB Serial (mostly used)")),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(75584).Z,width:"3208",height:"2442"})),(0,a.kt)("h2",{id:"-wiring"},"\u26a1 Wiring"),(0,a.kt)("p",null,"Duration:5"),(0,a.kt)("p",null,"All connectors are coming with 2.54mm spaced male pins that can connect to JST connectors (e.g. Motors and LED Array). In case the casing is blocking it mechanically, you can remove the case. The WS2812B RGB Leds that are used for the LED Matrix should survive wrong polarity, but we recommend to not stress it. ",(0,a.kt)("strong",{parentName:"p"},"WARNING:")," It is also not recommended to drive the motor drivers without any motors attached. Make sure you have an electric load attached to the board when you power it up, otherwise the motor drivers may get damaged."),(0,a.kt)("div",{class:"alert-danger"},"The polarity of the LED Array matters! The UC2 LED ring module comes with a JST connector where the 3 pins represent (5V, Data, GND) **RED** / Black / Black. Make sure the RED cable is connected to the 5V on the PCB."),(0,a.kt)("p",null,"Below you can find a rendering of the PCB that is sitting inside the 3D printed Box with all its connectors."),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(58583).Z,width:"2084",height:"1528"})),(0,a.kt)("p",null,"For those who are keen to understand the wiring, please click on the schematics below. The sources of the board will be published soon."),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(97674).Z,width:"2706",height:"1408"})),(0,a.kt)("h2",{id:"-uc2-led-ring"},"\ud83d\udca1 UC2 LED Ring"),(0,a.kt)("p",null,"Duration:2"),(0,a.kt)("p",null,"As for the illumination, you can use a large variation of different light sources to get your sample in the right light. But sometimes you want to have the ability to change the contrast by illuminating from oblique, only in the center, using a darkfield ring or change degree of coherence. An off-the-shelf LED Matrix is very sufficient in most cases, but it lacks the rotational symmetry. For this we have the UC2 LED Ring Module that fits directly in the Cube, has concentric rings and still offers all the degrees of freedom that the Adafruit Neopixel gives you (e.g. RGB, white, patterns..)."),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(64052).Z,width:"3648",height:"2736"})),(0,a.kt)("p",null,"The 3 wires that leave the satellite board deliver 5V, Data and GND and directly connect to the UC2e via the LED pin:"),(0,a.kt)("p",null,(0,a.kt)("img",{src:n(84796).Z,width:"3648",height:"2736"})))}u.isMDXComponent=!0},64052:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board2-a358bc9a40f95dbbfb2acdc376fdd375.jpg"},84796:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board3-bb3be5adae04ce1b932e673cf2cb4763.jpg"},58583:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board_Kicad_0-79ec90191925a7fa808ada8fa2e45b5c.png"},75584:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2_electronics_board_Kicad_6-4c23e9c6f1bb4d5bec2cdbe3ec8d9559.png"},97674:(e,t,n)=>{n.d(t,{Z:()=>r});const r=n.p+"assets/images/UC2eSchematics-8a156644b821b974b82fb61326e993c9.png"}}]); \ No newline at end of file diff --git a/assets/js/7a9250c2.45d64fb7.js b/assets/js/7a9250c2.45d64fb7.js deleted file mode 100644 index 86e142995..000000000 --- a/assets/js/7a9250c2.45d64fb7.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkuc_2_docs=self.webpackChunkuc_2_docs||[]).push([[7886],{3905:(e,t,r)=>{r.d(t,{Zo:()=>s,kt:()=>m});var n=r(67294);function o(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}var c=n.createContext({}),u=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},s=function(e){var t=u(e.components);return n.createElement(c.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,o=e.mdxType,a=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),d=u(r),m=o,f=d["".concat(c,".").concat(m)]||d[m]||p[m]||a;return r?n.createElement(f,i(i({ref:t},s),{},{components:r})):n.createElement(f,i({ref:t},s))}));function m(e,t){var r=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var a=r.length,i=new Array(a);i[0]=d;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:o,i[1]=l;for(var u=2;u{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>p,frontMatter:()=>a,metadata:()=>l,toc:()=>u});var n=r(87462),o=(r(67294),r(3905));const a={id:"uc2e1",title:"Introduction"},i="UC2e - A guide to use the UC2 Electronics",l={unversionedId:"Electronics/uc2e1",id:"Electronics/uc2e1",title:"Introduction",description:"Overview",source:"@site/docs/03_Electronics/01_Overview.md",sourceDirName:"03_Electronics",slug:"/Electronics/uc2e1",permalink:"/docs/Electronics/uc2e1",draft:!1,tags:[],version:"current",sidebarPosition:1,frontMatter:{id:"uc2e1",title:"Introduction"},sidebar:"tutorialSidebar",previous:{title:"README",permalink:"/docs/Investigator/FlowStopper/"},next:{title:"UC2 Standalone Boar V2",permalink:"/docs/Electronics/uc2e2v2"}},c={},u=[{value:"Overview",id:"overview",level:2},{value:"What will you learn?",id:"what-will-you-learn",level:3},{value:"Introduction into the UC2e Board ("Standalone")",id:"introduction-into-the-uc2e-board-standalone",level:2}],s={toc:u};function p(e){let{components:t,...a}=e;return(0,o.kt)("wrapper",(0,n.Z)({},s,a,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"uc2e---a-guide-to-use-the-uc2-electronics"},"UC2e - A guide to use the UC2 Electronics"),(0,o.kt)("h2",{id:"overview"},"Overview"),(0,o.kt)("p",null,"Duration:1"),(0,o.kt)("admonition",{type:"warning"},(0,o.kt)("b",null,"IMPORTANT")," The system is under constant development and may be subject to changes. If you find any bug or something feels unclear, you can help us improving the system! Feel lucky and file your issue today by opening one here: ",(0,o.kt)("a",{href:"https://github.com/openUC2/UC2-REST/issues/new"},"GitHub: UC2-REST")),(0,o.kt)("h3",{id:"what-will-you-learn"},"What will you learn?"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"How to connect the UC2 electronics?"),(0,o.kt)("li",{parentName:"ul"},"How to wire up the board with external components"),(0,o.kt)("li",{parentName:"ul"},"How to get the playstation controller working?\n\u02d8")),(0,o.kt)("h2",{id:"introduction-into-the-uc2e-board-standalone"},'Introduction into the UC2e Board ("Standalone")'),(0,o.kt)("p",null,"Duration: 5"),(0,o.kt)("p",null,"For a microscsope you have several I/Os that need to be controlled via Software. This majorly concerns:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Lasers"),(0,o.kt)("li",{parentName:"ul"},"Motors (e.g. for positioning)"),(0,o.kt)("li",{parentName:"ul"},"LEDs for changing the contrast (e.g. LED Array)"),(0,o.kt)("li",{parentName:"ul"},"Sensors (e.g. Endstops)")),(0,o.kt)("p",null,"There exist a number of boards that can do it by default. Here, we created our own driver electronics that is based on the Espressif ESP32 microcontroller unit (MCU) that has:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"4x Stepper outputs"),(0,o.kt)("li",{parentName:"ul"},"3x PWM outputs for e.g. Lasers"),(0,o.kt)("li",{parentName:"ul"},"1x Neopixel Slot (for the LED Ring Array)"),(0,o.kt)("li",{parentName:"ul"},"3x PWM amplified for e.g. power LEDs"),(0,o.kt)("li",{parentName:"ul"},"1x I2C connection"),(0,o.kt)("li",{parentName:"ul"},"USB Serial connection"),(0,o.kt)("li",{parentName:"ul"},"2x DAC (8Bit)")),(0,o.kt)("p",null,'It is based on common "GRBL" boards that drive 3D printers, CNC routers or alike.'),(0,o.kt)("p",null,"A fully assembled board with 12V power, the UC2 LED matrix and the linear stepper motor can be found below:"),(0,o.kt)("p",null,(0,o.kt)("img",{src:r(77501).Z,width:"2736",height:"3648"})))}p.isMDXComponent=!0},77501:(e,t,r)=>{r.d(t,{Z:()=>n});const n=r.p+"assets/images/UC2_electronics_board0-7ce4a4577db082e6506bd74d9749d544.jpg"}}]); \ No newline at end of file diff --git a/assets/js/7a9250c2.684fa076.js b/assets/js/7a9250c2.684fa076.js new file mode 100644 index 000000000..158538a31 --- /dev/null +++ b/assets/js/7a9250c2.684fa076.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkuc_2_docs=self.webpackChunkuc_2_docs||[]).push([[7886],{3905:(e,t,r)=>{r.d(t,{Zo:()=>s,kt:()=>m});var n=r(67294);function o(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function i(e){for(var t=1;t=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}var c=n.createContext({}),u=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):i(i({},t),e)),r},s=function(e){var t=u(e.components);return n.createElement(c.Provider,{value:t},e.children)},p={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,o=e.mdxType,a=e.originalType,c=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),d=u(r),m=o,f=d["".concat(c,".").concat(m)]||d[m]||p[m]||a;return r?n.createElement(f,i(i({ref:t},s),{},{components:r})):n.createElement(f,i({ref:t},s))}));function m(e,t){var r=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var a=r.length,i=new Array(a);i[0]=d;var l={};for(var c in t)hasOwnProperty.call(t,c)&&(l[c]=t[c]);l.originalType=e,l.mdxType="string"==typeof e?e:o,i[1]=l;for(var u=2;u{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>i,default:()=>p,frontMatter:()=>a,metadata:()=>l,toc:()=>u});var n=r(87462),o=(r(67294),r(3905));const a={id:"uc2e1",title:"Introduction"},i="UC2e - A guide to use the UC2 Electronics",l={unversionedId:"Electronics/uc2e1",id:"Electronics/uc2e1",title:"Introduction",description:"Overview",source:"@site/docs/03_Electronics/01_Overview.md",sourceDirName:"03_Electronics",slug:"/Electronics/uc2e1",permalink:"/docs/Electronics/uc2e1",draft:!1,tags:[],version:"current",sidebarPosition:1,frontMatter:{id:"uc2e1",title:"Introduction"},sidebar:"tutorialSidebar",previous:{title:"README",permalink:"/docs/Investigator/FlowStopper/"},next:{title:"UC2 Standalone Board V2",permalink:"/docs/Electronics/uc2e2v2"}},c={},u=[{value:"Overview",id:"overview",level:2},{value:"What will you learn?",id:"what-will-you-learn",level:3},{value:"Introduction into the UC2e Board ("Standalone")",id:"introduction-into-the-uc2e-board-standalone",level:2}],s={toc:u};function p(e){let{components:t,...a}=e;return(0,o.kt)("wrapper",(0,n.Z)({},s,a,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"uc2e---a-guide-to-use-the-uc2-electronics"},"UC2e - A guide to use the UC2 Electronics"),(0,o.kt)("h2",{id:"overview"},"Overview"),(0,o.kt)("p",null,"Duration:1"),(0,o.kt)("admonition",{type:"warning"},(0,o.kt)("b",null,"IMPORTANT")," The system is under constant development and may be subject to changes. If you find any bug or something feels unclear, you can help us improving the system! Feel lucky and file your issue today by opening one here: ",(0,o.kt)("a",{href:"https://github.com/openUC2/UC2-REST/issues/new"},"GitHub: UC2-REST")),(0,o.kt)("h3",{id:"what-will-you-learn"},"What will you learn?"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"How to connect the UC2 electronics?"),(0,o.kt)("li",{parentName:"ul"},"How to wire up the board with external components"),(0,o.kt)("li",{parentName:"ul"},"How to get the playstation controller working?\n\u02d8")),(0,o.kt)("h2",{id:"introduction-into-the-uc2e-board-standalone"},'Introduction into the UC2e Board ("Standalone")'),(0,o.kt)("p",null,"Duration: 5"),(0,o.kt)("p",null,"For a microscsope you have several I/Os that need to be controlled via Software. This majorly concerns:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"Lasers"),(0,o.kt)("li",{parentName:"ul"},"Motors (e.g. for positioning)"),(0,o.kt)("li",{parentName:"ul"},"LEDs for changing the contrast (e.g. LED Array)"),(0,o.kt)("li",{parentName:"ul"},"Sensors (e.g. Endstops)")),(0,o.kt)("p",null,"There exist a number of boards that can do it by default. Here, we created our own driver electronics that is based on the Espressif ESP32 microcontroller unit (MCU) that has:"),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},"4x Stepper outputs"),(0,o.kt)("li",{parentName:"ul"},"3x PWM outputs for e.g. Lasers"),(0,o.kt)("li",{parentName:"ul"},"1x Neopixel Slot (for the LED Ring Array)"),(0,o.kt)("li",{parentName:"ul"},"3x PWM amplified for e.g. power LEDs"),(0,o.kt)("li",{parentName:"ul"},"1x I2C connection"),(0,o.kt)("li",{parentName:"ul"},"USB Serial connection"),(0,o.kt)("li",{parentName:"ul"},"2x DAC (8Bit)")),(0,o.kt)("p",null,'It is based on common "GRBL" boards that drive 3D printers, CNC routers or alike.'),(0,o.kt)("p",null,"A fully assembled board with 12V power, the UC2 LED matrix and the linear stepper motor can be found below:"),(0,o.kt)("p",null,(0,o.kt)("img",{src:r(77501).Z,width:"2736",height:"3648"})))}p.isMDXComponent=!0},77501:(e,t,r)=>{r.d(t,{Z:()=>n});const n=r.p+"assets/images/UC2_electronics_board0-7ce4a4577db082e6506bd74d9749d544.jpg"}}]); \ No newline at end of file diff --git a/assets/js/935f2afb.633c2767.js b/assets/js/935f2afb.633c2767.js new file mode 100644 index 000000000..693c49d4d --- /dev/null +++ b/assets/js/935f2afb.633c2767.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkuc_2_docs=self.webpackChunkuc_2_docs||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"tutorialSidebar":[{"type":"category","label":"Educational Kits","collapsible":true,"collapsed":true,"items":[{"type":"category","label":"DiscoveryCore","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Optics and Imaging for Medical Photonics Students","href":"/docs/Toolboxes/DiscoveryCore/Opticsintro","docId":"Toolboxes/DiscoveryCore/Opticsintro"},{"type":"link","label":"openUC2 Smartphone Microscope with a finite corrected objective lens","href":"/docs/Toolboxes/DiscoveryCore/Smartphone Microscope","docId":"Toolboxes/DiscoveryCore/Smartphone Microscope"},{"type":"category","label":"CHINESE","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"uc2miniboxCN","href":"/docs/Toolboxes/DiscoveryCore/CHINESE/uc2miniboxCN","docId":"Toolboxes/DiscoveryCore/CHINESE/uc2miniboxCN"}]},{"type":"category","label":"ENGLISH","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"UC2 miniBOX (english)","href":"/docs/Toolboxes/DiscoveryCore/ENGLISH/uc2miniboxEN","docId":"Toolboxes/DiscoveryCore/ENGLISH/uc2miniboxEN"},{"type":"link","label":"Lens","href":"/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreLens","docId":"Toolboxes/DiscoveryCore/ENGLISH/CoreLens"},{"type":"link","label":"Telescope","href":"/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreTelescope","docId":"Toolboxes/DiscoveryCore/ENGLISH/CoreTelescope"},{"type":"link","label":"Microscope","href":"/docs/Toolboxes/DiscoveryCore/ENGLISH/coreMicroscope","docId":"Toolboxes/DiscoveryCore/ENGLISH/coreMicroscope"}]},{"type":"category","label":"FRENCH","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"UC2 miniBOX (fran\xe7ais)","href":"/docs/Toolboxes/DiscoveryCore/FRENCH/uc2miniboxFR","docId":"Toolboxes/DiscoveryCore/FRENCH/uc2miniboxFR"},{"type":"link","label":"Lentille","href":"/docs/Toolboxes/DiscoveryCore/FRENCH/CoreLensFR","docId":"Toolboxes/DiscoveryCore/FRENCH/CoreLensFR"},{"type":"link","label":"T\xe9lescope","href":"/docs/Toolboxes/DiscoveryCore/FRENCH/CoreTelescopeFR","docId":"Toolboxes/DiscoveryCore/FRENCH/CoreTelescopeFR"},{"type":"link","label":"Microscope","href":"/docs/Toolboxes/DiscoveryCore/FRENCH/coreMicroscopeFR","docId":"Toolboxes/DiscoveryCore/FRENCH/coreMicroscopeFR"}]},{"type":"category","label":"GERMAN","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"UC2 miniBOX (deutsch)","href":"/docs/Toolboxes/DiscoveryCore/GERMAN/uc2miniboxDE","docId":"Toolboxes/DiscoveryCore/GERMAN/uc2miniboxDE"},{"type":"link","label":"Linse","href":"/docs/Toolboxes/DiscoveryCore/GERMAN/CoreLens","docId":"Toolboxes/DiscoveryCore/GERMAN/CoreLens"},{"type":"link","label":"Teleskop","href":"/docs/Toolboxes/DiscoveryCore/GERMAN/CoreTelescope","docId":"Toolboxes/DiscoveryCore/GERMAN/CoreTelescope"},{"type":"link","label":"Mikroskop","href":"/docs/Toolboxes/DiscoveryCore/GERMAN/coreMicroscope","docId":"Toolboxes/DiscoveryCore/GERMAN/coreMicroscope"}]},{"type":"category","label":"SPANISH","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"core_intro","href":"/docs/Toolboxes/DiscoveryCore/SPANISH/core_intro","docId":"Toolboxes/DiscoveryCore/SPANISH/core_intro"}]}]},{"type":"category","label":"DiscoveryElectronics","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"openUC2 Electronics kit that enables automation","href":"/docs/Toolboxes/DiscoveryElectronics/Automation_intro","docId":"Toolboxes/DiscoveryElectronics/Automation_intro"},{"type":"link","label":"openUC2 Camera Setup","href":"/docs/Toolboxes/DiscoveryElectronics/Camera Setup","docId":"Toolboxes/DiscoveryElectronics/Camera Setup"},{"type":"link","label":"XYZ Micrometer Stage for Precise Motion Control","href":"/docs/Toolboxes/DiscoveryElectronics/XYZ_stage_mico","docId":"Toolboxes/DiscoveryElectronics/XYZ_stage_mico"},{"type":"link","label":"ESP32 XIAO Sense-based microscope","href":"/docs/Toolboxes/DiscoveryElectronics/seeedmicroscope","docId":"Toolboxes/DiscoveryElectronics/seeedmicroscope"},{"type":"link","label":"openUC2 *Spectrometer*","href":"/docs/Toolboxes/DiscoveryElectronics/spectrometer","docId":"Toolboxes/DiscoveryElectronics/spectrometer"},{"type":"link","label":"openUC2 XIAO Microscope Documentation","href":"/docs/Toolboxes/DiscoveryElectronics/04_1_seeedmicroscope","docId":"Toolboxes/DiscoveryElectronics/04_1_seeedmicroscope"}]},{"type":"category","label":"DiscoveryInterferometer","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"openUC2 Interferometer Introduction","href":"/docs/Toolboxes/DiscoveryInterferometer/Interferometer_intro","docId":"Toolboxes/DiscoveryInterferometer/Interferometer_intro"},{"type":"link","label":"openUC2 In-line holography","href":"/docs/Toolboxes/DiscoveryInterferometer/InlineHolography","docId":"Toolboxes/DiscoveryInterferometer/InlineHolography"},{"type":"link","label":"openUC2 Michelson Interferometer","href":"/docs/Toolboxes/DiscoveryInterferometer/MichelsonInterferometer","docId":"Toolboxes/DiscoveryInterferometer/MichelsonInterferometer"},{"type":"link","label":"openUC2 Mach-Zender Interferometer","href":"/docs/Toolboxes/DiscoveryInterferometer/MachZenderInterferometer","docId":"Toolboxes/DiscoveryInterferometer/MachZenderInterferometer"},{"type":"link","label":"HIK-Camera Software Installation","href":"/docs/Toolboxes/DiscoveryInterferometer/SoftwareTutorial","docId":"Toolboxes/DiscoveryInterferometer/SoftwareTutorial"}]},{"type":"category","label":"Building The CourseBOX","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"The Course BOX Alignment Procedure (Finite Optics)","href":"/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_FinOptics/","docId":"Toolboxes/DiscoveryDiffraction/ALIGNMENT_FinOptics/Readme"},{"type":"link","label":"CourseBOX: Light Microscopy and Optical Alignment (Infinity Optics)","href":"/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_InfOptics/","docId":"Toolboxes/DiscoveryDiffraction/ALIGNMENT_InfOptics/Readme"},{"type":"link","label":"MicroscopyCore","href":"/docs/Toolboxes/DiscoveryDiffraction/MicroscopyCore","docId":"Toolboxes/DiscoveryDiffraction/MicroscopyCore"}],"href":"/docs/Toolboxes/DiscoveryDiffraction/"},{"type":"category","label":"Polarisation Experiments","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Brewster Angle Experiment","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Brewster_Angle_Experiment/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Brewster_Angle_Experiment/Readme"},{"type":"link","label":"Circular Polarizer","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Circular_Polarizer/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Circular_Polarizer/Readme"},{"type":"link","label":"Crossed Polarizers","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Crossed_Polarizers/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Crossed_Polarizers/Readme"},{"type":"link","label":"Many Microscope Slides Experiment","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Many_Microscope_Slides_Experiment/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Many_Microscope_Slides_Experiment/Readme"},{"type":"link","label":"Newton\'s Rings Experiment","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Newtons_Rings_Experiment/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Newtons_Rings_Experiment/Readme"},{"type":"link","label":"Polarization Experiment using Optically Active Solution","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Polarization_using_optically_active_solution/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Polarization_using_optically_active_solution/Readme"},{"type":"link","label":"Stress Birefringence Experiment","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Stress_Birefringence/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Stress_Birefringence/Readme"},{"type":"link","label":"Three Polarizers (0, 45, 90 degrees)","href":"/docs/Toolboxes/DiscoveryPolarization/APP_POL_Three_Polarizers/","docId":"Toolboxes/DiscoveryPolarization/APP_POL_Three_Polarizers/Readme"}],"href":"/docs/Toolboxes/DiscoveryPolarization/"},{"type":"link","label":"Fluorescence Extension","href":"/docs/Toolboxes/DiscoveryFluorescence/","docId":"Toolboxes/DiscoveryFluorescence/README"},{"type":"category","label":"Phase Microscopy","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Differential Phase Contrast Microscopy","href":"/docs/Toolboxes/DiscoveryPhaseMicroscopy/DPCmicroscopy","docId":"Toolboxes/DiscoveryPhaseMicroscopy/DPCmicroscopy"}],"href":"/docs/Toolboxes/DiscoveryPhaseMicroscopy/"}],"href":"/docs/Toolboxes/"},{"type":"category","label":"Investigator","collapsible":true,"collapsed":true,"items":[{"type":"category","label":"ZMicroscope","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Unpack the openUC2 Z-Microscope","href":"/docs/Investigator/ZMicroscope/UpackZMicroscope","docId":"Investigator/ZMicroscope/UpackZMicroscope"}]},{"type":"category","label":"XYZMicroscope","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Aligning the Beamsplitter Cube","href":"/docs/Investigator/XYZMicroscope/AlignLaser","docId":"Investigator/XYZMicroscope/AlignLaser"},{"type":"link","label":"openUC2 FiveD v1","href":"/docs/Investigator/XYZMicroscope/FiveD_v1","docId":"Investigator/XYZMicroscope/FiveD_v1"},{"type":"link","label":"openUC2 FiveD v2","href":"/docs/Investigator/XYZMicroscope/FiveD_v2","docId":"Investigator/XYZMicroscope/FiveD_v2"},{"type":"link","label":"openUC2 FiveD v3","href":"/docs/Investigator/XYZMicroscope/FiveD_v3","docId":"Investigator/XYZMicroscope/FiveD_v3"},{"type":"link","label":"openUC2 FiveD v4","href":"/docs/Investigator/XYZMicroscope/FiveD_v4","docId":"Investigator/XYZMicroscope/FiveD_v4"},{"type":"link","label":"Histo Scanner Plugin Documentation","href":"/docs/Investigator/XYZMicroscope/HistoScan","docId":"Investigator/XYZMicroscope/HistoScan"},{"type":"link","label":"MCT (Multi-Colour Timelapse) Imaging Plugin","href":"/docs/Investigator/XYZMicroscope/MCTPlugin","docId":"Investigator/XYZMicroscope/MCTPlugin"},{"type":"link","label":"ROI Scanner","href":"/docs/Investigator/XYZMicroscope/ROIScanner","docId":"Investigator/XYZMicroscope/ROIScanner"},{"type":"link","label":"openUC2 Phase-Contrast Setup Tutorial","href":"/docs/Investigator/XYZMicroscope/SetupPhasecontrast","docId":"Investigator/XYZMicroscope/SetupPhasecontrast"},{"type":"link","label":"openUC2 Setting up the tube lens","href":"/docs/Investigator/XYZMicroscope/SetupTubelens","docId":"Investigator/XYZMicroscope/SetupTubelens"},{"type":"link","label":"Smart Microscopy Using openUC2 and ImSwitch","href":"/docs/Investigator/XYZMicroscope/SmartMicroscopy","docId":"Investigator/XYZMicroscope/SmartMicroscopy"},{"type":"link","label":"Stage Mapping and Stage Calibration","href":"/docs/Investigator/XYZMicroscope/StageCalibration","docId":"Investigator/XYZMicroscope/StageCalibration"},{"type":"link","label":"Stage Scanning and Image Stitching (ASHLAR)","href":"/docs/Investigator/XYZMicroscope/StageScanning","docId":"Investigator/XYZMicroscope/StageScanning"}]},{"type":"category","label":"Lightsheet","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"openUC2 Light-Sheet Microscope","href":"/docs/Investigator/Lightsheet/LightSheet","docId":"Investigator/Lightsheet/LightSheet"},{"type":"link","label":"openUC2 Light-Sheet Microscope (Old Version)","href":"/docs/Investigator/Lightsheet/LightSheetOld","docId":"Investigator/Lightsheet/LightSheetOld"},{"type":"link","label":"openUC2 Light-Sheet Tips and Tricks","href":"/docs/Investigator/Lightsheet/LightSheet Sample","docId":"Investigator/Lightsheet/LightSheet Sample"},{"type":"link","label":"Light-sheet alignment","href":"/docs/Investigator/Lightsheet/LightsheetCalibration","docId":"Investigator/Lightsheet/LightsheetCalibration"}]},{"type":"category","label":"STORM","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"U.C.*STORM*","href":"/docs/Investigator/STORM/Main","docId":"Investigator/STORM/Main"},{"type":"link","label":"Setting up the laser","href":"/docs/Investigator/STORM/Illumination","docId":"Investigator/STORM/Illumination"},{"type":"link","label":"Stability","href":"/docs/Investigator/STORM/Stability","docId":"Investigator/STORM/Stability"},{"type":"link","label":"Software","href":"/docs/Investigator/STORM/Software","docId":"Investigator/STORM/Software"},{"type":"link","label":"Electronics","href":"/docs/Investigator/STORM/Electronics","docId":"Investigator/STORM/Electronics"},{"type":"link","label":"Results","href":"/docs/Investigator/STORM/Results","docId":"Investigator/STORM/Results"}]},{"type":"link","label":"README","href":"/docs/Investigator/FlowStopper/","docId":"Investigator/FlowStopper/README"}]},{"type":"category","label":"Electronics","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Introduction","href":"/docs/Electronics/uc2e1","docId":"Electronics/uc2e1"},{"type":"link","label":"UC2 Standalone Board V2","href":"/docs/Electronics/uc2e2v2","docId":"Electronics/uc2e2v2"},{"type":"link","label":"UC2 Standalone Board V3","href":"/docs/Electronics/uc2e2v3","docId":"Electronics/uc2e2v3"},{"type":"link","label":"Getting Started","href":"/docs/Electronics/uc2e3","docId":"Electronics/uc2e3"},{"type":"link","label":"REST principle","href":"/docs/Electronics/uc2e5","docId":"Electronics/uc2e5"},{"type":"link","label":"REST commands","href":"/docs/Electronics/uc2e5.1","docId":"Electronics/uc2e5.1"},{"type":"link","label":"Connecting devices","href":"/docs/Electronics/uc2e6","docId":"Electronics/uc2e6"},{"type":"link","label":"Controlling the UC2e","href":"/docs/Electronics/uc2e7","docId":"Electronics/uc2e7"},{"type":"link","label":"Compiling from Scratch","href":"/docs/Electronics/uc2e8","docId":"Electronics/uc2e8"},{"type":"link","label":"Replace Hardware","href":"/docs/Electronics/uc2e9","docId":"Electronics/uc2e9"},{"type":"link","label":"PS4-Controller","href":"/docs/Electronics/PS4-Controller","docId":"Electronics/PS4-Controller"},{"type":"link","label":"Python commands","href":"/docs/Electronics/uc2e5.2","docId":"Electronics/uc2e5.2"},{"type":"category","label":"APIDescription","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"UC2-ESP","href":"/docs/Electronics/APIDescription/INTRO","docId":"Electronics/APIDescription/INTRO"},{"type":"link","label":"AS 5311 linear encoder for real-time feedback loop","href":"/docs/Electronics/APIDescription/Encoder","docId":"Electronics/APIDescription/Encoder"},{"type":"link","label":"Home","href":"/docs/Electronics/APIDescription/Home","docId":"Electronics/APIDescription/Home"},{"type":"link","label":"LED array","href":"/docs/Electronics/APIDescription/LEDArray","docId":"Electronics/APIDescription/LEDArray"},{"type":"link","label":"Motor","href":"/docs/Electronics/APIDescription/Motor","docId":"Electronics/APIDescription/Motor"},{"type":"link","label":"PinConfig","href":"/docs/Electronics/APIDescription/PinConfig","docId":"Electronics/APIDescription/PinConfig"}]},{"type":"category","label":"UC2-ESP","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"UC2-ESP Firmware for the openUC2 UC2e electronics","href":"/docs/Electronics/UC2-ESP/Setup_Buildenvironment","docId":"Electronics/UC2-ESP/Setup_Buildenvironment"}]},{"type":"category","label":"UC2-REST","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"UC2-REST","href":"/docs/Electronics/UC2-REST/INTRO","docId":"Electronics/UC2-REST/INTRO"},{"type":"link","label":"UC2-REST: Messaging","href":"/docs/Electronics/UC2-REST/ESP32_Messaging_Callback","docId":"Electronics/UC2-REST/ESP32_Messaging_Callback"},{"type":"link","label":"UC2-REST: Motor","href":"/docs/Electronics/UC2-REST/ESP32_Motor","docId":"Electronics/UC2-REST/ESP32_Motor"}]}]},{"type":"category","label":"ImSwitch","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Install driver for Daheng Camera","href":"/docs/ImSwitch/DahengCamera","docId":"ImSwitch/DahengCamera"},{"type":"link","label":"ImSwitchClient Documentation","href":"/docs/ImSwitch/ImSwitchClient","docId":"ImSwitch/ImSwitchClient"},{"type":"link","label":"ImSwitchConfig","href":"/docs/ImSwitch/ImSwitchConfig","docId":"ImSwitch/ImSwitchConfig"},{"type":"link","label":"ImSwitch in Docker","href":"/docs/ImSwitch/ImSwitchDocker","docId":"ImSwitch/ImSwitchDocker"},{"type":"link","label":"ImSwitch Experimental Features Documentation","href":"/docs/ImSwitch/ImSwitchExperimental","docId":"ImSwitch/ImSwitchExperimental"},{"type":"link","label":"Install ImSwitch","href":"/docs/ImSwitch/ImSwitchInstall","docId":"ImSwitch/ImSwitchInstall"},{"type":"link","label":"ImSwitchInstallUbuntu","href":"/docs/ImSwitch/ImSwitchInstallUbuntu","docId":"ImSwitch/ImSwitchInstallUbuntu"},{"type":"link","label":"ImSwitchInstallWindows","href":"/docs/ImSwitch/ImSwitchInstallWindows","docId":"ImSwitch/ImSwitchInstallWindows"},{"type":"link","label":"Install ImSwitch using the ImSwitch Installer (Electron updated Version)","href":"/docs/ImSwitch/ImSwitchInstaller","docId":"ImSwitch/ImSwitchInstaller"},{"type":"link","label":"Install ImSwitch using the ImSwitch Installer (CONDA INSTALLER OUTDATED)","href":"/docs/ImSwitch/ImSwitchInstallerConda","docId":"ImSwitch/ImSwitchInstallerConda"},{"type":"link","label":"ImSwitchUpdate","href":"/docs/ImSwitch/ImSwitchUpdate","docId":"ImSwitch/ImSwitchUpdate"}]},{"type":"category","label":"openUC2 Workshops","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"openUC2 Workshop at BioRTC in Nigeria","href":"/docs/WORKSHOPS/Workshop Nigeria","docId":"WORKSHOPS/Workshop Nigeria"}],"href":"/docs/WORKSHOPS/"},{"type":"category","label":"PRODUCTION","collapsible":true,"collapsed":true,"items":[{"type":"category","label":"INVESTIGATOR","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Assemble the XYZ Microscope","href":"/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope","docId":"PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope"}]},{"type":"category","label":"Modules","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"USB Camera","href":"/docs/PRODUCTION/Modules/Camera","docId":"PRODUCTION/Modules/Camera"},{"type":"link","label":"Eyepiece","href":"/docs/PRODUCTION/Modules/Eyepiece","docId":"PRODUCTION/Modules/Eyepiece"},{"type":"link","label":"LENS","href":"/docs/PRODUCTION/Modules/LENS","docId":"PRODUCTION/Modules/LENS"},{"type":"link","label":"KINEMATIC MIRROR (45\xb0)","href":"/docs/PRODUCTION/Modules/KIN_MIR_45","docId":"PRODUCTION/Modules/KIN_MIR_45"},{"type":"link","label":"KINEMATIC MIRROR (90\xb0)","href":"/docs/PRODUCTION/Modules/KIN_MIR_90","docId":"PRODUCTION/Modules/KIN_MIR_90"},{"type":"link","label":"KINEMATIC MIRROR (90\xb0)","href":"/docs/PRODUCTION/Modules/BEAMSPLITTER","docId":"PRODUCTION/Modules/BEAMSPLITTER"},{"type":"link","label":"Mirror (45\xb0)","href":"/docs/PRODUCTION/Modules/MIR_45","docId":"PRODUCTION/Modules/MIR_45"},{"type":"link","label":"Kinematic XY Mount","href":"/docs/PRODUCTION/Modules/KIN_XY_LASER","docId":"PRODUCTION/Modules/KIN_XY_LASER"},{"type":"link","label":"Kinematic XY Mount / Laser","href":"/docs/PRODUCTION/Modules/STAGE_Z_MANUAL","docId":"PRODUCTION/Modules/STAGE_Z_MANUAL"},{"type":"link","label":"Z-Stage Motorized NEMA12 25mm","href":"/docs/PRODUCTION/Modules/STAGE_Z_NEMA","docId":"PRODUCTION/Modules/STAGE_Z_NEMA"},{"type":"link","label":"Torch","href":"/docs/PRODUCTION/Modules/TORCH","docId":"PRODUCTION/Modules/TORCH"},{"type":"link","label":"Sample Holder","href":"/docs/PRODUCTION/Modules/SAMPLE_HOLDEr","docId":"PRODUCTION/Modules/SAMPLE_HOLDEr"},{"type":"link","label":"Polarization Rotator","href":"/docs/PRODUCTION/Modules/POLARIZER_ROTATING","docId":"PRODUCTION/Modules/POLARIZER_ROTATING"},{"type":"link","label":"Apertures","href":"/docs/PRODUCTION/Modules/APERTURES","docId":"PRODUCTION/Modules/APERTURES"}]}]},{"type":"link","label":"openUC2 Documentation","href":"/docs/intro","docId":"intro"}]},"docs":{"Electronics/APIDescription/Encoder":{"id":"Electronics/APIDescription/Encoder","title":"AS 5311 linear encoder for real-time feedback loop","description":"The relevant code can be found here:","sidebar":"tutorialSidebar"},"Electronics/APIDescription/Home":{"id":"Electronics/APIDescription/Home","title":"Home","description":"UC2-ESP Motor Homing Interface API Description","sidebar":"tutorialSidebar"},"Electronics/APIDescription/INTRO":{"id":"Electronics/APIDescription/INTRO","title":"UC2-ESP","description":"This is the API description for the UC2 firmware running on the ESP32 boards. It\'s under heavy active development. You can find the current version here:","sidebar":"tutorialSidebar"},"Electronics/APIDescription/LEDArray":{"id":"Electronics/APIDescription/LEDArray","title":"LED array","description":"This API provides a convenient method for controlling individual LEDs within a NeoPixel LED array using the UC2-ESP firmware. The interface facilitates the manipulation of LED colors and array display modes. It operates through JSON documents sent over USB serial communication.","sidebar":"tutorialSidebar"},"Electronics/APIDescription/Motor":{"id":"Electronics/APIDescription/Motor","title":"Motor","description":"This API provides a straightforward way to control and manage motors using the UC2-ESP firmware. The interface operates over USB serial communication and accepts JSON documents to control motor movements. The main endpoint for motor control is /motor_act.","sidebar":"tutorialSidebar"},"Electronics/APIDescription/PinConfig":{"id":"Electronics/APIDescription/PinConfig","title":"PinConfig","description":"UC2 System Version 2 and 3, and WEMOS Board Pinout Description","sidebar":"tutorialSidebar"},"Electronics/PS4-Controller":{"id":"Electronics/PS4-Controller","title":"PS4-Controller","description":"If you are using the webserial online flashing tool provided by UC2 (https://youseetoo.github.io/) to flash the firmware onto your ESP8266 or ESP32 development board, the process of connecting the PS4 controller to the UC2-ESP remains similar to the steps mentioned earlier. However, please note that the flashing tool is a separate tool for uploading firmware, and the Bluetooth communication with the PS4 controller needs to be implemented in your firmware code.","sidebar":"tutorialSidebar"},"Electronics/UC2-ESP/Setup_Buildenvironment":{"id":"Electronics/UC2-ESP/Setup_Buildenvironment","title":"UC2-ESP Firmware for the openUC2 UC2e electronics","description":"This refers to the UC2-ESP firmware that can be found here//github.com/youseetoo/uc2-esp32","sidebar":"tutorialSidebar"},"Electronics/UC2-REST/ESP32_Messaging_Callback":{"id":"Electronics/UC2-REST/ESP32_Messaging_Callback","title":"UC2-REST: Messaging","description":"This documentation covers the callback functionality integrated into the firmware, particularly focusing on the Message class. The Message class is designed to parse incoming messages from the ESP32, facilitating the conversion of hardware inputs and other events into software actions.","sidebar":"tutorialSidebar"},"Electronics/UC2-REST/ESP32_Motor":{"id":"Electronics/UC2-REST/ESP32_Motor","title":"UC2-REST: Motor","description":"This section provides detailed documentation on the Motor class designed for controlling motors via the firmware. The Motor class includes functionality for motor movement, triggering, position tracking, and stage scanning, among other features.","sidebar":"tutorialSidebar"},"Electronics/UC2-REST/INTRO":{"id":"Electronics/UC2-REST/INTRO","title":"UC2-REST","description":"This explains the basic functionality of the UC2-REST Python interface in conjunction with the UC2-ESP32 mainboard. This was mostly designed to interface with ImSwitch.","sidebar":"tutorialSidebar"},"Electronics/uc2e1":{"id":"Electronics/uc2e1","title":"Introduction","description":"Overview","sidebar":"tutorialSidebar"},"Electronics/uc2e2v2":{"id":"Electronics/uc2e2v2","title":"UC2 Standalone Board V2","description":"\ud83d\udd0c Board layout and schematics (UC2 Standalon v2)","sidebar":"tutorialSidebar"},"Electronics/uc2e2v3":{"id":"Electronics/uc2e2v3","title":"UC2 Standalone Board V3","description":"\ud83d\udd0c Board layout and schematics (UC2 Standalon v3)","sidebar":"tutorialSidebar"},"Electronics/uc2e3":{"id":"Electronics/uc2e3","title":"Getting Started","description":"First Steps, Getting Started, Flashing - Simply Quick Start!","sidebar":"tutorialSidebar"},"Electronics/uc2e5":{"id":"Electronics/uc2e5","title":"REST principle","description":"Introduction into the ESP32 microcontroller firmware","sidebar":"tutorialSidebar"},"Electronics/uc2e5.1":{"id":"Electronics/uc2e5.1","title":"REST commands","description":"Controlling hardware using the WebSerial Standard","sidebar":"tutorialSidebar"},"Electronics/uc2e5.2":{"id":"Electronics/uc2e5.2","title":"Python commands","description":"Using UC2-REST in Python","sidebar":"tutorialSidebar"},"Electronics/uc2e6":{"id":"Electronics/uc2e6","title":"Connecting devices","description":"Connect devices","sidebar":"tutorialSidebar"},"Electronics/uc2e7":{"id":"Electronics/uc2e7","title":"Controlling the UC2e","description":"Controlling the ESP32","sidebar":"tutorialSidebar"},"Electronics/uc2e8":{"id":"Electronics/uc2e8","title":"Compiling from Scratch","description":"UC2-ESP Firmware for the openUC2 UC2e electronics","sidebar":"tutorialSidebar"},"Electronics/uc2e9":{"id":"Electronics/uc2e9","title":"Replace Hardware","description":"\u274c Replacing parts","sidebar":"tutorialSidebar"},"ImSwitch/DahengCamera":{"id":"ImSwitch/DahengCamera","title":"Install driver for Daheng Camera","description":"Windows","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchClient":{"id":"ImSwitch/ImSwitchClient","title":"ImSwitchClient Documentation","description":"ImSwitchClient is a Python package designed to connect to the ImSwitch REST API, enabling remote control of ImSwitchUC2 functionalities directly from Jupyter Notebooks. This client facilitates easy integration with the ImSwitch ecosystem, offering programmable access to various features like laser control, stage manipulation, and image acquisition.","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchConfig":{"id":"ImSwitch/ImSwitchConfig","title":"ImSwitchConfig","description":"ImSwitch Config File","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchDocker":{"id":"ImSwitch/ImSwitchDocker","title":"ImSwitch in Docker","description":"Setting Up ImSwitch React and Backend (optional: with Docker Compose)","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchExperimental":{"id":"ImSwitch/ImSwitchExperimental","title":"ImSwitch Experimental Features Documentation","description":"Overview","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchInstall":{"id":"ImSwitch/ImSwitchInstall","title":"Install ImSwitch","description":"What will you learn?","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchInstaller":{"id":"ImSwitch/ImSwitchInstaller","title":"Install ImSwitch using the ImSwitch Installer (Electron updated Version)","description":"This is a work-in-progress installer. Please have a look for updates or file an issue here https://github.com/openUC2/ImSwitchInstaller/issues","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchInstallerConda":{"id":"ImSwitch/ImSwitchInstallerConda","title":"Install ImSwitch using the ImSwitch Installer (CONDA INSTALLER OUTDATED)","description":"This tutorial is outdated! Please look for the Standalone Electron-based version!","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchInstallUbuntu":{"id":"ImSwitch/ImSwitchInstallUbuntu","title":"ImSwitchInstallUbuntu","description":"ImSwitch Installation Ubuntu","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchInstallWindows":{"id":"ImSwitch/ImSwitchInstallWindows","title":"ImSwitchInstallWindows","description":"ImSwitch Installation on Windows","sidebar":"tutorialSidebar"},"ImSwitch/ImSwitchUpdate":{"id":"ImSwitch/ImSwitchUpdate","title":"ImSwitchUpdate","description":"Updated openUC2 ImSwitch","sidebar":"tutorialSidebar"},"intro":{"id":"intro","title":"openUC2 Documentation","description":"Here you can find all information to enhance, repair, improve, use, communicate,.... our optical toolbox openUC2. Did not find what you were looking for? No problem. Send us a mail or write an issue in our github repository https://github.com/openUC2/UC2-GIT/issues.","sidebar":"tutorialSidebar"},"Investigator/FlowStopper/README":{"id":"Investigator/FlowStopper/README","title":"README","description":"Setup Wifi Access Point on the Raspi","sidebar":"tutorialSidebar"},"Investigator/Lightsheet/LightSheet":{"id":"Investigator/Lightsheet/LightSheet","title":"openUC2 Light-Sheet Microscope","description":"In this experiment, we will explore the concept of optical sectioning to improve the resolution along the optical axis and the XY plane. The Light-Sheet Microscope, also known as the Light-Sheet Microscopy or Lattice Light-Sheet Microscopy, is a powerful technique used to acquire volumetric images of samples, such as zebrafishes. This technique enables us to visualize biological specimens in three dimensions with high resolution and minimal phototoxicity.","sidebar":"tutorialSidebar"},"Investigator/Lightsheet/LightSheet Sample":{"id":"Investigator/Lightsheet/LightSheet Sample","title":"openUC2 Light-Sheet Tips and Tricks","description":"Introduction to the openUC2 Light-Sheet Microscope","sidebar":"tutorialSidebar"},"Investigator/Lightsheet/LightsheetCalibration":{"id":"Investigator/Lightsheet/LightsheetCalibration","title":"Light-sheet alignment","description":"This tutorial will show you how to find the light-sheet and align this w.r.t. the camera plane.","sidebar":"tutorialSidebar"},"Investigator/Lightsheet/LightSheetOld":{"id":"Investigator/Lightsheet/LightSheetOld","title":"openUC2 Light-Sheet Microscope (Old Version)","description":"This is the manual for the Light sheet Microscope.","sidebar":"tutorialSidebar"},"Investigator/STORM/Electronics":{"id":"Investigator/STORM/Electronics","title":"Electronics","description":"Here we make use of the ESP32 Wemos D1 R32 microcontroller board in combination with the CNC Shield v3. The wiring of the different components is straight forward as the Stepper Motors are attached to the stepper drivers and the Laser is triggered by the SpinEn pin. The NeoPixel LED mounts to the Hold pin.","sidebar":"tutorialSidebar"},"Investigator/STORM/Illumination":{"id":"Investigator/STORM/Illumination","title":"Setting up the laser","description":"Laser illumination","sidebar":"tutorialSidebar"},"Investigator/STORM/Main":{"id":"Investigator/STORM/Main","title":"U.C.*STORM*","description":"---","sidebar":"tutorialSidebar"},"Investigator/STORM/Results":{"id":"Investigator/STORM/Results","title":"Results","description":"Imaging with the UC2-STORM setup","sidebar":"tutorialSidebar"},"Investigator/STORM/Software":{"id":"Investigator/STORM/Software","title":"Software","description":"For the control and acquisition software, we use ImSwitch. This is an open-source software centered around Napari as a multi-layer viewer and a rich framework for QT-based widgets. We make use of the open-source localization framework \\"microEye\\" ()","sidebar":"tutorialSidebar"},"Investigator/STORM/Stability":{"id":"Investigator/STORM/Stability","title":"Stability","description":"Setup stability","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/AlignLaser":{"id":"Investigator/XYZMicroscope/AlignLaser","title":"Aligning the Beamsplitter Cube","description":"The new xyz microscope has a special 2x1 cube that holds the fluorescence optics. Inside the beamsplitter cube is mounted kinematically and can be adjusted with 3 set screws. It\'s important that the fiber coupled laser is focussed / reimaged in the back focal plane of the objective lens. Therefore, we have created a little tutorial to get you starting how this works.","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/FiveD_v1":{"id":"Investigator/XYZMicroscope/FiveD_v1","title":"openUC2 FiveD v1","description":"Unpacking the microscope","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/FiveD_v2":{"id":"Investigator/XYZMicroscope/FiveD_v2","title":"openUC2 FiveD v2","description":"Design Files","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/FiveD_v3":{"id":"Investigator/XYZMicroscope/FiveD_v3","title":"openUC2 FiveD v3","description":"Design Files","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/FiveD_v4":{"id":"Investigator/XYZMicroscope/FiveD_v4","title":"openUC2 FiveD v4","description":"Design Files","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/HistoScan":{"id":"Investigator/XYZMicroscope/HistoScan","title":"Histo Scanner Plugin Documentation","description":"Welcome to the documentation page for the Histo Scanner Plugin, a powerful tool for scanning large areas and stitching images onto a large canvas. This page provides detailed information on how to configure and use the plugin effectively.","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/MCTPlugin":{"id":"Investigator/XYZMicroscope/MCTPlugin","title":"MCT (Multi-Colour Timelapse) Imaging Plugin","description":"More information are coming soon","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/ROIScanner":{"id":"Investigator/XYZMicroscope/ROIScanner","title":"ROI Scanner","description":"Starting ImSwitch on Ubuntu and Start the ROI Scanner","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/SetupPhasecontrast":{"id":"Investigator/XYZMicroscope/SetupPhasecontrast","title":"openUC2 Phase-Contrast Setup Tutorial","description":"Introduction: Understanding Phase-Contrast Microscopy","sidebar":"tutorialSidebar"},"Investigator/XYZMicroscope/SetupTubelens":{"id":"Investigator/XYZMicroscope/SetupTubelens","title":"openUC2 Setting up the tube lens","description":"

The process in images

Visit https://youseetoo.github.io/: +

Getting Started

First Steps, Getting Started, Flashing - Simply Quick Start!

Duration:20

Installing the driver

Prerequirements: We make use of the Espressif ESP32 MCU, which comes with the CH340 USB-UART interface. For this you need to install the appropriate driver.

Installing the USB Serial Driver Install the CH340 USB Serial driver is explained in more detail here: Sparkfun

Flash the Firmware using the Web Tool

Go to our ESP flashing tool website and do everything within the browser!!

https://youseetoo.github.io/

The process in images

Visit https://youseetoo.github.io/:

Connect the ESP32 to the computer using the USb and hit the connect button

Select the Serial port from the list of possible devices

Install the firmware @@ -28,8 +28,8 @@ 4.1. Go to Platformio Home and navigate to Devices 4.2 Copy the Device port (if connected) and insert that into the platformio.ini, e.g. upload_port = /dev/cu.SLAB_USBtoUART or COM3for windoof

  • Hit the PlatformIO upload button; The following task will be run: platformio run --target upload; The code is getting compiled and saved into ./.pio/build/ 5.1 The code will be uploaded. If eerything goes right the terminal says: `Leaving... Hard resetting via RTS pin...``
  • open the PlatformIO serial monitor (remember to also change the port in the platform.io accordingly) and check the ESP32's output (eventually hit the reset button)
  • In case you have any problems: File an issue :-)
  • In order to test several commands, you can find a useful list of jsonfiles in this file: main/json_api_BD.txt

    Flashing latest version of the Firmware (DEPRECATED!)

    We have multiple ways to flash the firmware, which we will describe briefly:

    1. Use precompiled binaries and upload using the esptool.py (Github)
    2. Use the Arduino IDE to compile and upload the software
    3. Use the Arduino IDE to upload the OTA example and upload the precompiled binary

    The current firmware can be found in the UC2-REST repository. A Github Action builds the binaries everytime a new release is getting published. The artifacts are also pushed to the build folder. This way you don't need to hassle with the Arduino IDE in order to install all libraries and dependencies. With the binaries, there are two ways to flash them on a freshly bought ESP32:

    1. Using the esptool.py to upload it through USB
    2. Flash the Arduino-OTA example, browse to the Website and upload the .bin file

    Flashing the code with esptool.py (DEPRECATED!)

    The UC2-REST offers a firmware flasher to help you going through the steps:

    1. Download the latest firmware
    2. Start opening the Port
    3. Flash the Firmware

    For this we prepared a jupyter notebook that you can access and run here -The full process will take around 20 minutes.

    Flashing the code with OTA (DEPRECATED!)

    For this you can flash the example code BasicOTA.ino that comes in the Arduino IDE under Examples => Arduino OTA.

    Use the following code below (change SSID/Password to your Wifi that the computer uses), flash it and open the Browser to open the webpage. Upload the Binary and you'Re done!

    #include <WiFi.h>
    #include <ESPmDNS.h>
    #include <WiFiUdp.h>
    #include <ArduinoOTA.h>

    const char* ssid = "..........";
    const char* password = "..........";

    void setup() {
    Serial.begin(115200);
    Serial.println("Booting");
    WiFi.mode(WIFI_STA);
    WiFi.begin(ssid, password);
    while (WiFi.waitForConnectResult() != WL_CONNECTED) {
    Serial.println("Connection Failed! Rebooting...");
    delay(5000);
    ESP.restart();
    }

    ArduinoOTA
    .onStart([]() {
    String type;
    if (ArduinoOTA.getCommand() == U_FLASH)
    type = "sketch";
    else // U_SPIFFS
    type = "filesystem";

    // NOTE: if updating SPIFFS this would be the place to unmount SPIFFS using SPIFFS.end()
    Serial.println("Start updating " + type);
    })
    .onEnd([]() {
    Serial.println("\nEnd");
    })
    .onProgress([](unsigned int progress, unsigned int total) {
    Serial.printf("Progress: %u%%\r", (progress / (total / 100)));
    })
    .onError([](ota_error_t error) {
    Serial.printf("Error[%u]: ", error);
    if (error == OTA_AUTH_ERROR) Serial.println("Auth Failed");
    else if (error == OTA_BEGIN_ERROR) Serial.println("Begin Failed");
    else if (error == OTA_CONNECT_ERROR) Serial.println("Connect Failed");
    else if (error == OTA_RECEIVE_ERROR) Serial.println("Receive Failed");
    else if (error == OTA_END_ERROR) Serial.println("End Failed");
    });

    ArduinoOTA.begin();

    Serial.println("Ready");
    Serial.print("IP address: ");
    Serial.println(WiFi.localIP());
    }

    void loop() {
    ArduinoOTA.handle();
    }

    Compiling and flashing the code using the Arduino IDE

    You can download/clone the UC2-REST repository and open the file main.ino, copy the libraries in the library folder into the Arduino IDE library folder under Documents/Arduino/libraries, compile and upload it. More information comes in the very end of this tutorial

    - +The full process will take around 20 minutes.

    Flashing the code with OTA (DEPRECATED!)

    For this you can flash the example code BasicOTA.ino that comes in the Arduino IDE under Examples => Arduino OTA.

    Use the following code below (change SSID/Password to your Wifi that the computer uses), flash it and open the Browser to open the webpage. Upload the Binary and you'Re done!

    #include <WiFi.h>
    #include <ESPmDNS.h>
    #include <WiFiUdp.h>
    #include <ArduinoOTA.h>

    const char* ssid = "..........";
    const char* password = "..........";

    void setup() {
    Serial.begin(115200);
    Serial.println("Booting");
    WiFi.mode(WIFI_STA);
    WiFi.begin(ssid, password);
    while (WiFi.waitForConnectResult() != WL_CONNECTED) {
    Serial.println("Connection Failed! Rebooting...");
    delay(5000);
    ESP.restart();
    }

    ArduinoOTA
    .onStart([]() {
    String type;
    if (ArduinoOTA.getCommand() == U_FLASH)
    type = "sketch";
    else // U_SPIFFS
    type = "filesystem";

    // NOTE: if updating SPIFFS this would be the place to unmount SPIFFS using SPIFFS.end()
    Serial.println("Start updating " + type);
    })
    .onEnd([]() {
    Serial.println("\nEnd");
    })
    .onProgress([](unsigned int progress, unsigned int total) {
    Serial.printf("Progress: %u%%\r", (progress / (total / 100)));
    })
    .onError([](ota_error_t error) {
    Serial.printf("Error[%u]: ", error);
    if (error == OTA_AUTH_ERROR) Serial.println("Auth Failed");
    else if (error == OTA_BEGIN_ERROR) Serial.println("Begin Failed");
    else if (error == OTA_CONNECT_ERROR) Serial.println("Connect Failed");
    else if (error == OTA_RECEIVE_ERROR) Serial.println("Receive Failed");
    else if (error == OTA_END_ERROR) Serial.println("End Failed");
    });

    ArduinoOTA.begin();

    Serial.println("Ready");
    Serial.print("IP address: ");
    Serial.println(WiFi.localIP());
    }

    void loop() {
    ArduinoOTA.handle();
    }

    Compiling and flashing the code using the Arduino IDE

    You can download/clone the UC2-REST repository and open the file main.ino, copy the libraries in the library folder into the Arduino IDE library folder under Documents/Arduino/libraries, compile and upload it. More information comes in the very end of this tutorial

    + \ No newline at end of file diff --git a/docs/Electronics/uc2e5.1/index.html b/docs/Electronics/uc2e5.1/index.html index f5732c0ea..7829941d1 100644 --- a/docs/Electronics/uc2e5.1/index.html +++ b/docs/Electronics/uc2e5.1/index.html @@ -10,16 +10,16 @@ - +
    -

    REST commands

    Controlling hardware using the WebSerial Standard

    The ESP32 works best by receiving commands over serial. We have a python interface that is also explained in a bit more depth in the connect-to-the-hardware section and can be found as a pip package here: https://pypi.org/project/UC2-REST/ and the source-code here: https://github.com/openUC2/UC2-REST/tree/master/uc2rest.

    However, installing python and starting a script sometimes is a bit cumbersome. Therefore we have implemented a quick-start tool in the browser, that makes use of the new WebSerial standard. You can connect the ESP32 and easily communicate with it by first connecting to it and then sending commands back and forth. The steps are as follows:

    Visit the page https://youseetoo.github.io/indexWebSerialTest.html

    Connect to the board by hitting the connect button +

    REST commands

    Controlling hardware using the WebSerial Standard

    The ESP32 works best by receiving commands over serial. We have a python interface that is also explained in a bit more depth in the connect-to-the-hardware section and can be found as a pip package here: https://pypi.org/project/UC2-REST/ and the source-code here: https://github.com/openUC2/UC2-REST/tree/master/uc2rest.

    However, installing python and starting a script sometimes is a bit cumbersome. Therefore we have implemented a quick-start tool in the browser, that makes use of the new WebSerial standard. You can connect the ESP32 and easily communicate with it by first connecting to it and then sending commands back and forth. The steps are as follows:

    Visit the page https://youseetoo.github.io/indexWebSerialTest.html

    Connect to the board by hitting the connect button

    Choose the device that represents your ESP32

    Control Motors by hitting the button, observe the serial output or send commands on your own by entering it in the bar in the lower end

    Cheat-sheet for the different control commands

    Motors

    Documentation for MOTOR API:

    The MOTOR API is used for controlling the motion of steppers motors. The API consists of several commands that can be used to set up and control the motors.

    Motor Setup

    To set up the motor, you need to use the "/motor_set" command. This command takes a JSON object that defines the motor configuration.

    The configuration consists of an array of stepper motor objects, where each object represents a single stepper motor. The following properties are defined for each stepper motor object:

    • stepperid: A unique identifier for the stepper motor. This value can be any integer.
    • step: The pin number used to control the stepper motor's step signal.
    • dir: The pin number used to control the stepper motor's direction signal.
    • enable: The pin number used to control the stepper motor's enable signal.
    • step_inverted: A boolean value indicating whether the step signal is inverted.
    • dir_inverted: A boolean value indicating whether the direction signal is inverted.
    • enable_inverted: A boolean value indicating whether the enable signal is inverted.
    • min_pos: The minimum position value for the motor. This value can be any integer.
    • max_pos: The maximum position value for the motor. This value can be any integer.

    Here's an example JSON object that can be used with the "/motor_set" command:

    {"task":"/motor_set",
    "motor":
    {
    "steppers": [
    { "stepperid": 1, "step": 26, "dir": 16, "enable": 12, "step_inverted": 0, "dir_inverted": 0, "enable_inverted": 0 , "min_pos":0, "max_pos":0},
    { "stepperid": 2, "step": 25, "dir": 27, "enable": 12, "step_inverted": 0, "dir_inverted": 0, "enable_inverted": 0 , "min_pos":0, "max_pos":0},
    { "stepperid": 3, "step": 17, "dir": 14, "enable": 12, "step_inverted": 0, "dir_inverted": 0, "enable_inverted": 0 , "min_pos":0, "max_pos":0},
    { "stepperid": 0, "step": 19, "dir": 18, "enable": 12, "step_inverted": 0, "dir_inverted": 0, "enable_inverted": 0 , "min_pos":0, "max_pos":0}
    ]
    }
    }

    The "motor" property is an object that contains the "steppers" array, which defines the stepper motors. In this example, four stepper motors are defined, each with their own unique stepperid and pin assignments for the step, dir, and enable signals.

    Motor Enable/Disable

    To enable or disable the stepper motors, you can use the "/motor_set" command with the "isen" property set to 1 to enable or 0 to disable the motors. Here's an example JSON object that can be used to enable the motors:

    {"task":"/motor_set", "isen":1}

    Motor Movement

    To move the stepper motors, you can use the "/motor_act" command. This command takes a JSON object that defines the movement configuration for the motors.

    The configuration consists of an array of stepper motor objects, where each object represents a single stepper motor. The following properties are defined for each stepper motor object:

    • stepperid: The unique identifier for the stepper motor that you want

    DIGITAL OUT

    Trigger Setup

    To set up a digital output pin as a trigger, use the following command:

    {"task":"/digitalout_set", "digitaloutid":1, "digitaloutpin":4}

    Here, digitaloutid specifies the ID of the digital output pin, and digitaloutpin specifies the physical pin number to which the output is connected.

    Trigger Activation

    To activate the trigger, use the following command:

    {"task":"/digitalout_act", "digitalout1TriggerDelayOn":100, "digitalout1TriggerDelayOff":10, "digitalout1IsTrigger":1,  "digitalout2TriggerDelayOn":10, "digitalout2TriggerDelayOff":100, "digitalout2IsTrigger":1}

    This command activates both digitalout1 and digitalout2 as triggers. The digitalout1TriggerDelayOn and digitalout1TriggerDelayOff parameters specify the time delays in milliseconds for turning the output on and off, respectively. The digitalout2TriggerDelayOn and digitalout2TriggerDelayOff parameters specify the time delays for the second output.

    The digitalout1IsTrigger and digitalout2IsTrigger parameters indicate whether the outputs should be used as triggers (1) or not (0).

    Trigger Reset

    To reset the trigger, use the following command:

    {"task":"/digitalout_act", "digitaloutistriggerreset":1}

    This command resets all the digital outputs that are used as triggers.

    LASER

    Laser Setup

    To set up a laser, use the following command:

    {"task": "/laser_act", "LASERid":2, "LASERpin":19}

    Here, LASERid specifies the ID of the laser channel, and LASERpin specifies the physical pin number to which the laser is connected.

    Laser Activation

    To activate the laser, use the following command:

    {"task": "/laser_act", "LASERid":1, "LASERval": 1024}

    This command activates the laser on channel 1 with a power value of 1024.

    Laser Control

    To control the laser, use the following command:

    payload = {
    "task": path,
    "LASERid": channel,
    "LASERval": value,
    "LASERdespeckle": int(value*despeckleAmplitude),
    "LASERdespecklePeriod": int(despecklePeriod),
    }
    {"task": "/laser_act", "LASERid":1, "LASERval": 512, "LASERdespeckle": 10, "LASERdespecklePeriod": 100}

    Here, LASERid specifies the ID of the laser channel, and LASERval specifies the power value for the laser. The LASERdespeckle parameter is an optional parameter that specifies the despeckle amplitude, and LASERdespecklePeriod is an optional parameter that specifies the despeckle period.

    Laser Reading

    To read the laser value, use the following command:

    {"task": "/laser_get"}

    This command returns the current value of the laser.

    LED ARRAY

    To set up an LED array, use the following command:

    {"task": "/ledarr_set", "led":{"ledArrPin":32, "ledArrNum":64}}

    Here, ledArrPin specifies the physical pin number to which the LED array is connected, and ledArrNum specifies the number of LEDs in the array.

    - + \ No newline at end of file diff --git a/docs/Electronics/uc2e5.2/index.html b/docs/Electronics/uc2e5.2/index.html index d8e49a184..0231f8ba6 100644 --- a/docs/Electronics/uc2e5.2/index.html +++ b/docs/Electronics/uc2e5.2/index.html @@ -10,13 +10,13 @@ - +
    -

    Python commands

    Using UC2-REST in Python

    The uc2rest library communicates with an ESP32 microcontroller via a serial connection and is available here https://github.com/openUC2/UC2-REST/ and via pip install uc2-rest.

    The script below starts by importing the necessary modules, including uc2rest, which provides a high-level interface for communicating with the ESP32 using the UC2 protocol. Next, the script initializes the UC2Client object with a serial port and enables debug output for the serial connection.

    After verifying that the correct device is connected, the script sends a test command to the ESP32 to retrieve the current state of the motor using the motor_get task. The returned data is printed to the console.

    Note that the command is formatted as a string in JSON format, using single quotes instead of double quotes. This is because the JSON format requires double quotes, but Python interprets double quotes as the beginning or end of a string, which can cause issues when sending commands over a serial connection.

    Initialize the Library

    # Import necessary libraries
    import uc2rest
    import numpy as np
    import time

    # Define serial port
    port = "unknown"

    # Create UC2Client object with specified serial port and debug settings
    ESP32 = uc2rest.UC2Client(serialport=port, DEBUG=True)
    ESP32.serial.DEBUG=True # Setting debug output of the serial to true - all messages will be printed

    # Check if device is connected and confirm it is the right one
    mState = ESP32.state.get_state()

    Manual Command Sending

    Send a command you formulate manually

    ''' ################
    SERIAL
    ################'''

    # Define a test command to send to the UC2 device via serial
    test_cmd = "{'task': '/motor_get'}"

    # Send the test command to the UC2 device via serial
    ESP32.serial.writeSerial(test_cmd)

    The below code can be used to manipulate the LED.

    ''' ################
    LED
    ################'''

    # Create an instance of the ESP32 class
    ESP32 = ESP32()

    # Turn on all LEDs with full white color (RGB)
    mResult = ESP32.led.send_LEDMatrix_full(intensity=(255, 255, 255))
    time.sleep(0.5) # Pause for half a second

    # Turn off all LEDs
    mResult = ESP32.led.send_LEDMatrix_full(intensity=(0, 0, 0))

    # Turn on each LED one by one with full white color (RGB) and turn it off immediately
    for iLED in range(5):
    # Turn on a single LED by index number
    mResult = ESP32.led.send_LEDMatrix_single(indexled=iLED, intensity=(255, 255, 255), timeout=0.)
    # Turn off the same LED
    mResult = ESP32.led.send_LEDMatrix_single(indexled=iLED, intensity=(0, 0, 0), timeout=0.)

    # Display a random pattern on the LED matrix for 5 times
    for i in range(5):
    # Generate a random pattern of 25 LEDs with 3 color values (RGB)
    led_pattern = np.random.randint(0, 55, (25, 3))
    # Display the pattern on the LED matrix
    mResult = ESP32.led.send_LEDMatrix_array(led_pattern=led_pattern, timeout=0)
    # Check if the pattern was sent successfully
    assert mResult["success"] == 1, "Failed sending LED command"

    # Display a left-to-right moving pattern on the LED matrix (commented out with if statement)
    if(0):
    # Create an empty LED pattern
    led_pattern = np.zeros((25, 3))
    # Define the left and right halves of the LED matrix by index numbers
    list_left = (0, 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17)
    list_right = (0, 5, 6, 7, 8, 9, 18, 19, 20, 21, 22, 23, 24)
    # Turn on the left half of the LED pattern to full red (RGB = (255, 0, 0))
    led_pattern[list_left, 0] = 255
    # Turn on the right half of the LED pattern to full green (RGB = (0, 255, 0))
    led_pattern[list_right, 1] = 255
    # Display the pattern on the LED matrix for 1 second
    ESP32.led.send_LEDMatrix_array(led_pattern=led_pattern, timeout=1)
    # Turn off the LED matrix
    ESP32.led.send_LEDMatrix_array(led_pattern=led_pattern*0, timeout=1)
    - +

    Python commands

    Using UC2-REST in Python

    The uc2rest library communicates with an ESP32 microcontroller via a serial connection and is available here https://github.com/openUC2/UC2-REST/ and via pip install uc2-rest.

    The script below starts by importing the necessary modules, including uc2rest, which provides a high-level interface for communicating with the ESP32 using the UC2 protocol. Next, the script initializes the UC2Client object with a serial port and enables debug output for the serial connection.

    After verifying that the correct device is connected, the script sends a test command to the ESP32 to retrieve the current state of the motor using the motor_get task. The returned data is printed to the console.

    Note that the command is formatted as a string in JSON format, using single quotes instead of double quotes. This is because the JSON format requires double quotes, but Python interprets double quotes as the beginning or end of a string, which can cause issues when sending commands over a serial connection.

    Initialize the Library

    # Import necessary libraries
    import uc2rest
    import numpy as np
    import time

    # Define serial port
    port = "unknown"

    # Create UC2Client object with specified serial port and debug settings
    ESP32 = uc2rest.UC2Client(serialport=port, DEBUG=True)
    ESP32.serial.DEBUG=True # Setting debug output of the serial to true - all messages will be printed

    # Check if device is connected and confirm it is the right one
    mState = ESP32.state.get_state()

    Manual Command Sending

    Send a command you formulate manually

    ''' ################
    SERIAL
    ################'''

    # Define a test command to send to the UC2 device via serial
    test_cmd = "{'task': '/motor_get'}"

    # Send the test command to the UC2 device via serial
    ESP32.serial.writeSerial(test_cmd)

    The below code can be used to manipulate the LED.

    ''' ################
    LED
    ################'''

    # Create an instance of the ESP32 class
    ESP32 = ESP32()

    # Turn on all LEDs with full white color (RGB)
    mResult = ESP32.led.send_LEDMatrix_full(intensity=(255, 255, 255))
    time.sleep(0.5) # Pause for half a second

    # Turn off all LEDs
    mResult = ESP32.led.send_LEDMatrix_full(intensity=(0, 0, 0))

    # Turn on each LED one by one with full white color (RGB) and turn it off immediately
    for iLED in range(5):
    # Turn on a single LED by index number
    mResult = ESP32.led.send_LEDMatrix_single(indexled=iLED, intensity=(255, 255, 255), timeout=0.)
    # Turn off the same LED
    mResult = ESP32.led.send_LEDMatrix_single(indexled=iLED, intensity=(0, 0, 0), timeout=0.)

    # Display a random pattern on the LED matrix for 5 times
    for i in range(5):
    # Generate a random pattern of 25 LEDs with 3 color values (RGB)
    led_pattern = np.random.randint(0, 55, (25, 3))
    # Display the pattern on the LED matrix
    mResult = ESP32.led.send_LEDMatrix_array(led_pattern=led_pattern, timeout=0)
    # Check if the pattern was sent successfully
    assert mResult["success"] == 1, "Failed sending LED command"

    # Display a left-to-right moving pattern on the LED matrix (commented out with if statement)
    if(0):
    # Create an empty LED pattern
    led_pattern = np.zeros((25, 3))
    # Define the left and right halves of the LED matrix by index numbers
    list_left = (0, 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17)
    list_right = (0, 5, 6, 7, 8, 9, 18, 19, 20, 21, 22, 23, 24)
    # Turn on the left half of the LED pattern to full red (RGB = (255, 0, 0))
    led_pattern[list_left, 0] = 255
    # Turn on the right half of the LED pattern to full green (RGB = (0, 255, 0))
    led_pattern[list_right, 1] = 255
    # Display the pattern on the LED matrix for 1 second
    ESP32.led.send_LEDMatrix_array(led_pattern=led_pattern, timeout=1)
    # Turn off the LED matrix
    ESP32.led.send_LEDMatrix_array(led_pattern=led_pattern*0, timeout=1)
    + \ No newline at end of file diff --git a/docs/Electronics/uc2e5/index.html b/docs/Electronics/uc2e5/index.html index 743c2f818..3a13d621c 100644 --- a/docs/Electronics/uc2e5/index.html +++ b/docs/Electronics/uc2e5/index.html @@ -10,14 +10,14 @@ - +
    -

    REST principle

    Introduction into the ESP32 microcontroller firmware

    Duration:5

    The firmware that runs on the ESP32 is under constant development and subject to heavy changes! However, the core idea will remain the same and is inspired by the +

    REST principle

    Introduction into the ESP32 microcontroller firmware

    Duration:5

    The firmware that runs on the ESP32 is under constant development and subject to heavy changes! However, the core idea will remain the same and is inspired by the "REST-API", which deals with "endpoints" in the HTML world (e.g. "/home"). We implemented the follow functions:

    • /*_act-> this starts an action
    • /*_get-> this will return parameters or states
    • /*_set-> this will set parameters or states

    The functions will work on different actuators and sensors e.g. motors, lasers, leds and so on.

    The API is callable through USB Serial and/or Wifi. The ESP32 can connect to a nearby Wifi Hotspot or even create its own access point (AP). Additional documentation for this will follow soon.

    In general, to interact with a device (e.g. stage), one has to send a JSON document, which is similar to the REST-API in the Internet world. A simple example to rotate a motor would be:

    {
    "task": "/motor_act",
    "axis":1,
    "speed":1000,
    "position":1000,
    "isabsolute":1,
    "isblocking":1
    }
    - + \ No newline at end of file diff --git a/docs/Electronics/uc2e6/index.html b/docs/Electronics/uc2e6/index.html index 3d5df6edf..85b8baf88 100644 --- a/docs/Electronics/uc2e6/index.html +++ b/docs/Electronics/uc2e6/index.html @@ -10,13 +10,13 @@ - +
    -

    Connecting devices

    Connect devices

    Here you learn how to connect the ESP32 to the Arduino IDE, connect external hardware components (e.g. LED matrix) and control the electronics using the USB-serial interface. This is an older version of the ESP32, but the mechanism stays the same 🙃

    - +

    Connecting devices

    Connect devices

    Here you learn how to connect the ESP32 to the Arduino IDE, connect external hardware components (e.g. LED matrix) and control the electronics using the USB-serial interface. This is an older version of the ESP32, but the mechanism stays the same 🙃

    + \ No newline at end of file diff --git a/docs/Electronics/uc2e7/index.html b/docs/Electronics/uc2e7/index.html index 905c98105..b4952e432 100644 --- a/docs/Electronics/uc2e7/index.html +++ b/docs/Electronics/uc2e7/index.html @@ -10,15 +10,15 @@ - +
    -

    Controlling the UC2e

    Controlling the ESP32

    The unified "REST-API" (inspired, not following full protocol), enables you to control the functionalities from multiple different clients (e.g. Python, Webrowser, Android Phone). The Core idea is to file post/get requests (serial/wifi) that send/receive JSON files that do "something".

    Installing the USB Serial Driver Install the CH340 USB Serial driver is explained in more detail here: Sparkfun

    🐍 Python Bindings

    In order to interact with the electronics, we implemented a Python library called UC2-REST, available here that will help you to work with the device. The easiest way to install it would be:

    pip install uc2-rest

    It will automatically detect your UC2e (if the driver is installed), connect and will offer you the basic functionalities such as moving the motor, etc.

    In order to give you a deep dive in what's possible, we provide a Jupyter Notebook that guides you through all the functionalities. You can find it here +

    Controlling the UC2e

    Controlling the ESP32

    The unified "REST-API" (inspired, not following full protocol), enables you to control the functionalities from multiple different clients (e.g. Python, Webrowser, Android Phone). The Core idea is to file post/get requests (serial/wifi) that send/receive JSON files that do "something".

    Installing the USB Serial Driver Install the CH340 USB Serial driver is explained in more detail here: Sparkfun

    🐍 Python Bindings

    In order to interact with the electronics, we implemented a Python library called UC2-REST, available here that will help you to work with the device. The easiest way to install it would be:

    pip install uc2-rest

    It will automatically detect your UC2e (if the driver is installed), connect and will offer you the basic functionalities such as moving the motor, etc.

    In order to give you a deep dive in what's possible, we provide a Jupyter Notebook that guides you through all the functionalities. You can find it here Start Jupiter Tutorial

    📲 Android APP

    This is coming soon. You will be able to control the electronics using the Wifi connection of your Android phone.

    💻 Browser APP

    If the ESP32 is offereing an access point or is connected to your wifi router, you can access the webserver running on the ESP32 using a browser. It offers limited control over the Endpoints by filing post and get requests.

    More information are coming soon!

    🎮 Playstation 3 or Playstation 4 Controller (comming soon)

    With the open-source libraries PS3Controller and PS4Controller we are able to make use of the Bluetooth-able joysticks from your beloved game console.

    When a PS4 controller is 'paired' to a PS4 console, it just means that it has stored the console's Bluetooth MAC address, which is the only device the controller will connect to. Usually, this pairing happens when you connect the controller to the PS4 console using a USB cable, and press the PS button. This initiates writing the console's MAC address to the controller.

    Therefore, if you want to connect your PS4 controller to the ESP32, you either need to figure out what the Bluetooth MAC address of your PS4 console is and set the ESP32's address to it, or change the MAC address stored in the PS4 controller.

    Whichever path you choose, you might want a tool to read and/or write the currently paired MAC address from the PS4 controller. You can try using sixaxispairer for this purpose.

    If you opted to change the ESP32's MAC address, you'll need to include the ip address in the PS4.begin() function during within the setup() Arduino function like below where 1a:2b:3c:01:01:01 is the MAC address (note that MAC address must be unicast):

    void setup()
    {
    PS4.begin("1a:2b:3c:01:01:01");
    Serial.println("Ready.");
    }

    Controlling using ImSwitch

    Please have a look here for more information about how to install ImSwitch and here for the UC2-related setup files including the UC2-REST serial interface.

    - + \ No newline at end of file diff --git a/docs/Electronics/uc2e8/index.html b/docs/Electronics/uc2e8/index.html index 597b8ca1c..944204677 100644 --- a/docs/Electronics/uc2e8/index.html +++ b/docs/Electronics/uc2e8/index.html @@ -10,16 +10,16 @@ - +
    -

    Compiling from Scratch

    UC2-ESP Firmware for the openUC2 UC2e electronics

    danger

    Note: We started the firmware in version V1 in this repository: UC2-REST and continued development the firmware version in V2 in another repository UC2-ESP32. We emphasize to use the latest firmware V2

    This repository provides the latest (V2) firmware that controls external hardware like Motors, LEDs, Lasers and other customized elements using an ESP32 and an adapter board. It is inspired by the UC2-REST firmware, but features a much more structured way of the code by dividing modules into separated classes. A ModuleController ensures a proper initializiation of individual modules at runtime, which makes the entire code very modular and follows the overall UC2 principle.

    Similar to the legacy UC2-REST Firmware, the microcontroller can communicate using the wired serial and the wireless WiFi protocol. Both rely on a more-less similar REST API that uses endpoints to address an act, get, set command. For example, the information about the state of the ESP can be retrieved by issuing the code:

    {"task":"/state_get"}

    A list of all commands that can be sent via HTTP requests and serial commands (e.g. by using the Arduino IDE-contained Serial monitor at 115200 BAUD) can be found in the RestApi.md-file.

    Setting up the build/develpment environment

    In order to build the code, you have to follow the following steps:

    1. Install Visual Studio Code + the Extension called "Platform.io" => Restart Visual studio code to load PIO
    2. Clone the repository including all the submodules: git clone --recurse-submodules https://github.com/youseetoo/uc2-esp32
    3. Open the main folder in the Visual Studio Code
    4. Adjust the settings in the file platformio.ini-file (mostly the port) +

      Compiling from Scratch

      UC2-ESP Firmware for the openUC2 UC2e electronics

      danger

      Note: We started the firmware in version V1 in this repository: UC2-REST and continued development the firmware version in V2 in another repository UC2-ESP32. We emphasize to use the latest firmware V2

      This repository provides the latest (V2) firmware that controls external hardware like Motors, LEDs, Lasers and other customized elements using an ESP32 and an adapter board. It is inspired by the UC2-REST firmware, but features a much more structured way of the code by dividing modules into separated classes. A ModuleController ensures a proper initializiation of individual modules at runtime, which makes the entire code very modular and follows the overall UC2 principle.

      Similar to the legacy UC2-REST Firmware, the microcontroller can communicate using the wired serial and the wireless WiFi protocol. Both rely on a more-less similar REST API that uses endpoints to address an act, get, set command. For example, the information about the state of the ESP can be retrieved by issuing the code:

      {"task":"/state_get"}

      A list of all commands that can be sent via HTTP requests and serial commands (e.g. by using the Arduino IDE-contained Serial monitor at 115200 BAUD) can be found in the RestApi.md-file.

      Setting up the build/develpment environment

      In order to build the code, you have to follow the following steps:

      1. Install Visual Studio Code + the Extension called "Platform.io" => Restart Visual studio code to load PIO
      2. Clone the repository including all the submodules: git clone --recurse-submodules https://github.com/youseetoo/uc2-esp32
      3. Open the main folder in the Visual Studio Code
      4. Adjust the settings in the file platformio.ini-file (mostly the port) 4.1. Go to Platformio Home and navigate to Devices 4.2 Copy the Device port (if connected) and insert that into the platformio.ini, e.g. upload_port = /dev/cu.SLAB_USBtoUART or COM3 for windows
      5. Hit the PlatformIO upload button; The following task will be run: platformio run --target upload; The code is getting compiled and saved into ./.pio/build/ 5.1 The code will be uploaded. If everything goes right the terminal says: `Leaving... Hard resetting via RTS pin...``
      6. open the PlatformIO serial monitor (remember to also change the port in the platform.io accordingly) and check the ESP32's output (eventually hit the reset button)
      7. In case you have any problems: File an issue :-)

      In order to test several commands, you can find a useful list of json files in this file: json_api_BD.txt

      V1: Source-code, Compiling and Binaries (Deprecated)

      The current version of the firmware can be found here: https://github.com/openUC2/UC2-REST/tree/master/ESP32

      Additional information on how to install and compile the board can be found in the README

      Precompiled binaries that can be installed through ImSwitch (more information coming soon) or the esptool.pycan be found here https://github.com/openUC2/UC2-REST/tree/master/ESP32/build

      V1: Install necessary software for UC2 rest (flash and interact) (Deprecated)

      Here you learn how to install the necessary software (Arduino IDE, drivers, ESP-IDF, Arduino libraries) that are necessary for the system to be working. Everything is explained in the video below.

      Additional information about the UC2 electronics and UC2-REST are provided here: https://github.com/openUC2/UC2-REST

      Download and install the software:

      To simplify life, we host a dropbox folder containing all the necessary drivers and Software pieces for this workshop. It will run on a Windows 10 64 Bit system:

      List of relevant files

      for the UC2-REST

      • Arduino IDE: arduino-1.8.18-windows.exe
      • ESP32 USB driver: CH341SER.exe
      • UC2 Rest firmware: UC2-REST.zip

      Alternative GitHub links that provide you with the latest version of the software:

      Steps to install the software

      1. Download all relevant files from the Dropbox folder above
      2. Install the Arduino IDE (including all drivers if you are asked during the installation)
      3. Install the CH340 USB Serial driver https://learn.sparkfun.com/tutorials/how-to-install-ch340-drivers/all
      4. Extract BenesArduinoLibraries-master.zip to /User/$USER$/Documents/Aduino/libraries
      5. Open the Arduino IDE and add the ESP32 board configuration. For this you need to add the following URL to the settings tag: https://dl.espressif.com/dl/package_esp32_index.json, http://arduino.esp8266.com/stable/package_esp8266com_index.json. For additional information please have a look in this tutorial
      6. Once done, open the Board manager and add the ESP32version 2.0.3
      7. Unzip the folder UC2-REST and open the file /ESP32/main/main.ino
      8. Select the board, the port and hit the compile and upload button
      9. IMPORTANT when setting up the build + upload, make sure you add this setting for the partition scheme (and potentially all others if not already set as default):

      The system accepts different hardware configurations (pins, devices, etc.). All of this is defined in the pindef_XXXX.h. Please have a look in the UC2-REST repository for additional information: https://github.com/openUC2/UC2-REST

      VIDEO Tutorial: Steps to install the software

    - + \ No newline at end of file diff --git a/docs/Electronics/uc2e9/index.html b/docs/Electronics/uc2e9/index.html index ffb5e166e..7b157e1a0 100644 --- a/docs/Electronics/uc2e9/index.html +++ b/docs/Electronics/uc2e9/index.html @@ -10,13 +10,13 @@ - + - +
    + \ No newline at end of file diff --git a/docs/ImSwitch/DahengCamera/index.html b/docs/ImSwitch/DahengCamera/index.html index 745ea71d6..af1912b88 100644 --- a/docs/ImSwitch/DahengCamera/index.html +++ b/docs/ImSwitch/DahengCamera/index.html @@ -10,14 +10,14 @@ - +

    Install driver for Daheng Camera

    Windows

    Have a look here: https://www.get-cameras.com/requestdownload and install the drivers. / SDK (newer version of ImSwitch ships drivers.)

    Linux

    ARM

    You can use the camera on the Raspberry Pi or Jetson Nano. For this you can do the following steps:

    cd ~
    cd Downloads
    wget https://dahengimaging.com/downloads/Galaxy_Linux-armhf_Gige-U3_32bits-64bits_1.5.2303.9202.zip
    cd Galaxy_Linux-armhf_Gige-U3_32bits-64bits_1.5.2303.9202
    chmod +x Galaxy_camera.run
    sudo ./Galaxy_camera.run
    # go through questionaire
    sudo reboot

    Install Python bindings

    cd ~/Downlodas
    wget https://dahengimaging.com/downloads/Galaxy_Linux_Python_2.0.2106.9041.tar.gz
    tar -xvf Galaxy_Linux_Python_2.0.2106.9041.tar.gz
    cd ~/Downlodas/Galaxy_Linux_Python_2.0.2106.9041/api
    # conda activate ****ENV
    pip install -e .
    cd ~/Downlodas/Galaxy_Linux_Python_2.0.2106.9041/api
    python ~/Downloads/Galaxy_Linux_Python_2.0.2106.9041/sample/GxSingleCamMono GxSingleCamMono.py

    The result will be:

    /home/uc2/Downloads/Galaxy_Linux_Python_2.0.2106.9041/sample/GxSingleCamMono/GxSingleCamMono.py:19: SyntaxWarning: "is" with a literal. Did you mean "=="?
    if dev_num is 0:

    -------------------------------------------------------------
    Sample to show how to acquire mono image continuously and show acquired image.
    -------------------------------------------------------------

    Initializing......

    Frame ID: 0 Height: 3036 Width: 4024

    Sample Script

    # version:1.0.1905.9051
    import gxipy as gx
    from PIL import Image


    def main():
    # print the demo information
    print("")
    print("-------------------------------------------------------------")
    print("Sample to show how to acquire mono image continuously and show acquired image.")
    print("-------------------------------------------------------------")
    print("")
    print("Initializing......")
    print("")

    # create a device manager
    device_manager = gx.DeviceManager()
    dev_num, dev_info_list = device_manager.update_device_list()
    if dev_num is 0:
    print("Number of enumerated devices is 0")
    return

    # open the first device
    cam = device_manager.open_device_by_index(1)

    # exit when the camera is a color camera
    if cam.PixelColorFilter.is_implemented() is True:
    print("This sample does not support color camera.")
    cam.close_device()
    return

    # set continuous acquisition
    cam.TriggerMode.set(gx.GxSwitchEntry.OFF)

    # set exposure
    cam.ExposureTime.set(10000)

    # set gain
    cam.Gain.set(10.0)

    # start data acquisition
    cam.stream_on()

    # acquire image: num is the image number
    num = 1
    for i in range(num):
    # get raw image
    raw_image = cam.data_stream[0].get_image()
    if raw_image is None:
    print("Getting image failed.")
    continue

    # create numpy array with data from raw image
    numpy_image = raw_image.get_numpy_array()
    if numpy_image is None:
    continue

    # print height, width, and frame ID of the acquisition image
    print("Frame ID: %d Height: %d Width: %d"
    % (raw_image.get_frame_id(), raw_image.get_height(), raw_image.get_width()))

    # stop data acquisition
    cam.stream_off()

    # close device
    cam.close_device()

    if __name__ == "__main__":
    main()
    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchClient/index.html b/docs/ImSwitch/ImSwitchClient/index.html index ccccd794c..24ddf03ce 100644 --- a/docs/ImSwitch/ImSwitchClient/index.html +++ b/docs/ImSwitch/ImSwitchClient/index.html @@ -10,13 +10,13 @@ - +

    ImSwitchClient Documentation

    ImSwitchClient is a Python package designed to connect to the ImSwitch REST API, enabling remote control of ImSwitchUC2 functionalities directly from Jupyter Notebooks. This client facilitates easy integration with the ImSwitch ecosystem, offering programmable access to various features like laser control, stage manipulation, and image acquisition.

    PyPI Version

    Features

    Installation

    To install ImSwitchClient, use the following pip command:

    pip install imswitchclient

    Quick Start Example

    This example demonstrates basic usage of ImSwitchClient for moving a positioner and acquiring an image.

    import imswitchclient.ImSwitchClient as imc
    import numpy as np
    import matplotlib.pyplot as plt
    import time

    # Initialize the client
    client = imc.ImSwitchClient()

    # Retrieve the first positioner's name and current position
    positioner_names = client.positionersManager.getAllDeviceNames()
    positioner_name = positioner_names[0]
    currentPositions = client.positionersManager.getPositionerPositions()[positioner_name]
    initialPosition = (currentPositions["X"], currentPositions["Y"])

    # Define and move to a new position
    newPosition = (initialPosition[0] + 10, initialPosition[1] + 10)
    client.positionersManager.movePositioner(positioner_name, "X", newPosition[0], is_absolute=True, is_blocking=True)
    client.positionersManager.movePositioner(positioner_name, "Y", newPosition[1], is_absolute=True, is_blocking=True)

    # Acquire and display an image
    time.sleep(0.5) # Allow time for the move
    lastFrame = client.recordingManager.snapNumpyToFastAPI()
    plt.imshow(lastFrame)
    plt.show()

    # Return the positioner to its initial position
    client.positionersManager.movePositioner(positioner_name, "X", initialPosition[0], is_absolute=True, is_blocking=True)
    client.positionersManager.movePositioner(positioner_name, "Y", initialPosition[1], is_absolute=True, is_blocking=True)

    Contributing

    Contributions to ImSwitchClient are welcome! Please refer to the project's GitHub repository for contribution guidelines: https://github.com/openUC2/imswitchclient/.

    License

    ImSwitchClient is licensed under the MIT License. For more details, see the LICENSE file in the project repository.

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchConfig/index.html b/docs/ImSwitch/ImSwitchConfig/index.html index ab39f9273..860175789 100644 --- a/docs/ImSwitch/ImSwitchConfig/index.html +++ b/docs/ImSwitch/ImSwitchConfig/index.html @@ -10,13 +10,13 @@ - +

    ImSwitchConfig

    ImSwitch Config File

    This is a sample uc2_hik_histo.jsonconfiguration file:

    {
    "positioners": {
    "ESP32Stage": {
    "managerName": "ESP32StageManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "isEnable": true,
    "enableauto": false,
    "stepsizeX": -0.3125,
    "stepsizeY": -0.3125,
    "stepsizeZ": 0.3125,
    "homeSpeedX": 15000,
    "homeSpeedY": 15000,
    "homeSpeedZ": 15000,
    "isDualaxis": true,
    "homeDirectionX": 1,
    "backlashXOld": 15,
    "backlashYOld": 40,
    "backlashX": 0,
    "backlashY": 0,
    "homeEndstoppolarityY": 0,
    "homeDirectionY": -1,
    "homeDirectionZ": 0,
    "homeXenabled": 1,
    "homeYenabled": 1,
    "homeZenabled": 0,
    "initialSpeed": {
    "X": 15000,
    "Y": 15000,
    "Z": 15000
    }
    },
    "axes": [
    "X",
    "Y",
    "Z"
    ],
    "forScanning": true,
    "forPositioning": true
    }
    },
    "rs232devices": {
    "ESP32": {
    "managerName": "ESP32Manager",
    "managerProperties": {
    "host_": "192.168.43.129",
    "serialport": "COM3"
    }
    }
    },
    "lasers": {
    "LED": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDLaserManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "channel_index": 1
    },
    "wavelength": 635,
    "valueRangeMin": 0,
    "valueRangeMax": 1023
    }
    },
    "detectors": {
    "WidefieldCamera": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "HikCamManager",
    "managerProperties": {
    "isRGB": 1,
    "cameraListIndex": 0,
    "cameraEffPixelsize": 0.2257,
    "hikcam": {
    "exposure": 0,
    "gain": 0,
    "blacklevel": 100,
    "image_width": 1000,
    "image_height": 1000
    }
    },
    "forAcquisition": true,
    "forFocusLock": true
    },
    "Observer": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "OpenCVCamManager",
    "managerProperties": {
    "cameraListIndex": 1,
    "cameraListIndexWIN": 0,
    "isRGB":1,
    "opencvcam": {
    "exposure": 10
    }
    },
    "forAcquisition": true
    }
    },
    "autofocus": {
    "camera": "WidefieldCamera",
    "positioner": "ESP32Stage",
    "updateFreq": 10,
    "frameCropx": 780,
    "frameCropy": 400,
    "frameCropw": 500,
    "frameCroph": 100
    },
    "mct": {
    "monitorIdx": 2,
    "width": 1080,
    "height": 1920,
    "wavelength": 0,
    "pixelSize": 0,
    "angleMount": 0,
    "patternsDirWin": "C:\\Users\\wanghaoran\\Documents\\ImSwitchConfig\\imcontrol_slm\\488\\",
    "patternsDir": "/users/bene/ImSwitchConfig/imcontrol_sim/488"
    },
    "dpc": {
    "wavelength": 0.53,
    "pixelsize": 0.2,
    "NA": 0.3,
    "NAi": 0.3,
    "n": 1.0,
    "rotations": [
    0,
    180,
    90,
    270
    ]
    },
    "webrtc": {},
    "PixelCalibration": {},
    "focusLock": {
    "camera": "WidefieldCamera",
    "positioner": "ESP32StageManager",
    "updateFreq": 4,
    "frameCropx": 0,
    "frameCropy": 0,
    "frameCropw": 0,
    "frameCroph": 0
    },
    "LEDMatrixs": {
    "ESP32 LEDMatrix": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDMatrixManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "Nx": 4,
    "Ny": 4,
    "wavelength": 488,
    "valueRangeMin": 0,
    "valueRangeMax": 32768
    }
    }
    },
    "availableWidgets": [
    "Settings",
    "View",
    "Recording",
    "Image",
    "Laser",
    "Positioner",
    "Autofocus",
    "MCT",
    "UC2Config",
    "ImSwitchServer",
    "PixelCalibration",
    "HistoScan",
    "LEDMatrix",
    "Joystick",
    "Flatfield",
    "ROIScan"
    ],
    "nonAvailableWidgets": [
    "STORMRecon",
    "DPC",
    "Hypha",
    "FocusLock",
    "HistoScan",
    "FocusLock",
    "FOVLock"
    ]
    }

    Configuration File Documentation

    Overview

    This configuration file is designed to manage settings and properties of various components in a complex system, such as positioners, RS232 devices, lasers, detectors, autofocus settings, etc. It is structured in JSON format for ease of reading and editing.

    Sections

    1. Positioners

      • ESP32Stage
        • managerName: Specifies the manager responsible for handling this positioner, in this case, ESP32StageManager.
        • managerProperties: Contains detailed settings for the positioner, such as RS232 device identification, step sizes for different axes, home speeds, axis enable/disable settings, and other mechanical properties.
        • axes: Lists the axes controlled by this positioner (X, Y, Z).
        • forScanning & forPositioning: Boolean flags to indicate if the positioner is used for scanning and/or positioning.
    2. RS232 Devices

      • ESP32
        • managerName: The manager handling RS232 devices, here ESP32Manager.
        • managerProperties: Network and port settings for the RS232 device.
    3. Lasers

      • LED
        • Details for managing LED laser settings, including the manager name (ESP32LEDLaserManager), RS232 device reference, channel index, wavelength, and value range.
    4. Detectors

      • WidefieldCamera & Observer
        • Configuration for different camera detectors, including manager names (HikCamManager, OpenCVCamManager), properties like RGB support, camera indexes, pixel size, and acquisition settings.
    5. Autofocus

      • Configuration for autofocus feature, linking a camera with a positioner and setting parameters like update frequency and frame cropping dimensions.
    6. MCT (Multichannel Tissue)

      • Settings for monitor index, dimensions, wavelength, pixel size, angle mount, and directories for pattern files.
    7. DPC (Differential Phase Contrast)

      • Settings related to DPC imaging, including wavelength, pixel size, numerical aperture, refractive index, and rotation angles.
    8. WebRTC

      • An empty section possibly reserved for WebRTC configuration.
    9. Pixel Calibration

      • An empty section likely intended for pixel calibration settings.
    10. Focus Lock

      • Focus lock settings similar to autofocus but with its distinct configuration.
    11. LED Matrixes

      • ESP32 LEDMatrix
        • Configuration for LED matrixes, specifying manager details, RS232 device, dimensions, wavelength, and value range.
    12. Available Widgets

      • A list of widgets that are available in the system, indicating the features or components that can be controlled or monitored.
    13. Non-Available Widgets

      • A list of widgets that are not available, possibly indicating features not supported or deactivated in the current setup.

    Conclusion

    This configuration file is a comprehensive document that outlines the settings and parameters for various hardware and software components in a specialized system. It is critical for ensuring the correct operation of the equipment it is designed to control.

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchDocker/index.html b/docs/ImSwitch/ImSwitchDocker/index.html index f885f7a4e..0554745e3 100644 --- a/docs/ImSwitch/ImSwitchDocker/index.html +++ b/docs/ImSwitch/ImSwitchDocker/index.html @@ -10,7 +10,7 @@ - + @@ -21,7 +21,7 @@ *The images are build using CI using actions

    The docker Images are hosted on github containers

    Prerequisites

    • Docker installed on your system (Tested on Raspi, Jetson Nano, Mac M1, Windows)
    • Optional: Docker Compose installed on your system

    Docker Quick Start

    ARM64 + X86

    Pull the file from github containers:

    sudo docker pull ghcr.io/openuc2/imswitch-noqt-x64:latest

    Install the docker image and run it:

    sudo docker run -it --rm -p 8001:8001 -p 2222:22 -e HEADLESS=1 -e HTTP_PORT=8001 -e CONFIG_FILE=example_uc2_hik_flowstop.json -e UPDATE_GIT=0 -e UPDATE_CONFIG=0 --privileged ghcr.io/openuc2/imswitch-noqt-x64:latest

    List of arguments:

    HEADLESS=1                # ImSwitch will start without any GUI
    HTTP_PORT=8001 # Port to access e.g. the ImSwitch React GUI
    CONFIG_FILE=example_virtual_microscope.json # default setup configuration
    UPDATE_GIT=true # pull the latest ImSwitch git
    UPDATE_INSTALL_GIT=true # pull and pip install all changes (e.g. new packages)
    UPDATE_UC2=true # pull UC2-REST
    UPDATE_INSTALL_UC2=true # pull and pip install all changes
    UPDATE_CONFIG=true # pull changes for setup configurations
    MODE=terminal # start Docker with bash for better debugging
    CONFIG_PATH=/Users/bene/Downloads # path to the local ImSwitchConfig folder (will use the default inside the container if not specified)
    DATA_PATH=/Users/bene/Downloads # remote path to store data (e.g. USB drive, needs to be mounted via commandline, (will use the default inside the container if not specified))

    External folders for Config and Data

    We can use external path (outside the container) to store and read data. This is helpful if we want to make changes e.g. to the config or want to store data/images. Remember, the docker container gets reseted after the next reboot!

    We have two options.

    1. The Config folder. Let's have the following use case, where the folder should be linked to your Downloads folder. For this you ahve to specify two things in the way you call the docker image:
    -e CONFIG_PATH=/config  -v ~/Downloads:/config

    This means that ImSwitch inside docker will use the folder /config/ImSwitchConfig/config to specify the setup configuration. The -v command will mount the host's Downloads folder as /config inside the docker container.

    1. The same mechanism can be used for specifying the datapath to specify the dataset storage. For this we have to specify:
    -e  DATA_PATH=/dataset  -v ~/Downloads:/dataset

    Images will be stored in that folder. Ensure the folder exists!

    -v ~/Downloads:/config

    Setting up docker on Raspi

    #!/bin/bash

    # Update package lists
    sudo apt update -y

    # Upgrade installed packages
    sudo apt upgrade -y

    # Install Docker
    curl -sSL https://get.docker.com | sh

    # Add current user to the Docker group
    sudo usermod -aG docker $USER

    # Print message to logout and login again
    echo "Please log out and log back in to apply the Docker group changes."

    # Verify group membership (this will not reflect the changes until you log out and log back in)
    groups

    To save this script, you can copy the content above into a file, for example, install_docker.sh, and then run the script using the following command:

    chmod +x install_docker.sh
    ./install_docker.sh

    After running the script, you will need to log out and log back in to apply the Docker group changes. Once you log back in, you can verify your membership in the Docker group by running:

    groups

    Additional Information

    This tutorial will guide you through the process of setting up the ImSwitch React frontend and backend using Docker Compose. The ImSwitch React frontend is exposed on port 3000 and provides access to the REST API via a Swagger UI running in another Docker container on localhost:8001. The Swagger UI is available at localhost:8001/docs. This setup uses a simulated microscope with a line-like sample. The configuration is provided by a JSON file that can be updated if the corresponding flag is set. Additionally, the ImSwitch version can be updated based on a flag. If access to the camera (HIK camera and UC2-REST) is needed, the --privileged flag must be set.

    Docker Compose Configuration

    Create a docker-compose.yml file with the following content or use the file in compose.yaml:

    version: '3.8'

    services:
    imswitch-aiortc-react:
    image: ghcr.io/openuc2/imswitch-aiortc-react:latest
    ports:
    - "5000:5000"
    - "8002:8001"
    environment:
    - NODE_ENV=production
    stdin_open: true
    tty: true

    imswitch-docker-arm64-noqt:
    image: ghcr.io/openuc2/imswitch-docker-arm64-noqt:latest
    privileged: true
    ports:
    - "8001:8001"
    - "2222:22"
    environment:
    - HEADLESS=1
    - HTTP_PORT=8001
    - CONFIG_FILE=example_virtual_microscope.json
    - UPDATE_GIT=1
    - UPDATE_CONFIG=0
    stdin_open: true
    tty: true
    restart: always

    Explanation

    • imswitch-aiortc-react: This service runs the ImSwitch React frontend.

      • image: Specifies the Docker image to use.
      • ports: Maps the container ports to the host ports.
      • environment: Sets environment variables inside the container.
      • stdin_open and tty: Keeps the container running in interactive mode.
    • imswitch-docker-arm64-noqt: This service runs the backend with the Swagger UI.

      • image: Specifies the Docker image to use.
      • privileged: Grants the container privileged access to the hardware.
      • ports: Maps the container ports to the host ports.
      • environment: Sets environment variables inside the container.
      • stdin_open and tty: Keeps the container running in interactive mode.
      • restart: Ensures the container restarts automatically if it stops.

    Running Docker Compose

    1. Save the docker-compose.yml file to a directory on your machine.
    2. Open a terminal and navigate to the directory containing the docker-compose.yml file.
    3. Start the services with Docker Compose:
    docker-compose -f docker-compose.yml up -d
    1. go to your browser and access the microscope server under https://localhost:8001/docs to access the swagger uI
    2. go to your browser and access the microscope control UI under http://localhost:3000 5.1 enter the IP address and port und Connections: https://localhost and 8001 as port

    Accessing the Services individually

    • ImSwitch React Frontend: Open your browser and navigate to http://localhost:3000 to access the ImSwitch React frontend.
    • Swagger UI: Navigate to http://localhost:8001/docs to access the Swagger UI for the backend API.

    Configuration Details

    • Simulated Microscope: The current configuration uses a simulated microscope offering a line-like sample. The configuration is provided by a JSON file specified in the CONFIG_FILE environment variable.
    • Updating Configuration and Version: The JSON configuration and the ImSwitch version can be updated if the UPDATE_CONFIG and UPDATE_GIT flags are set, respectively.
    • Camera Access: If access to the camera (HIK camera and UC2-REST) is needed, the --privileged flag must be set.

    Stopping the Services

    To stop the services, run the following command:

    docker-compose -f docker-compose.yml down

    This command stops and removes all containers defined in the docker-compose.yml file.

    Additional Notes

    • Ensure that Docker and Docker Compose are installed and running on your system.
    • The --privileged flag is necessary for accessing certain hardware components like cameras.

    By following this tutorial, you should be able to set up and run the ImSwitch React frontend and backend with Docker Compose, providing a seamless environment for interacting with the simulated microscope and accessing the API via Swagger UI.

    To make the CONFIG_PATH available as a folder outside the container on the host computer (e.g., in ~/Downloads/ImSwitchConfig), you can use Docker's volume mounting feature. This allows you to mount a directory from the host machine into the container, making it accessible from within the container.

    Here's how you can modify your Docker run command to mount the ~/Downloads/ImSwitchConfig directory from the host to the container:

    1. Modify the Docker run command: Use the -v (or --volume) option to mount the directory.
    sudo docker run -it --rm -p 8001:8001 -p 2222:22 \
    -e HEADLESS=1 \
    -e HTTP_PORT=8001 \
    -e CONFIG_FILE=example_virtual_microscope.json \
    -e UPDATE_GIT=0 \
    -e UPDATE_CONFIG=0 \
    -e CONFIG_PATH=/config \
    --privileged \
    -v ~/Downloads/ImSwitchConfig:/config \
    imswitch_hik
    1. Update the CMD in your Dockerfile to use the CONFIG_PATH environment variable:
    CMD ["/bin/bash", "-c", "\
    if [ \"$MODE\" = \"terminal\" ]; then \
    /bin/bash; \
    else \
    echo 'LSUSB' && lsusb && \
    /usr/sbin/sshd -D & \
    ls /root/ImSwitchConfig && \
    if [ \"$UPDATE_GIT\" = \"true\" ]; then \
    cd /tmp/ImSwitch && \
    git pull; \
    fi && \
    if [ \"$UPDATE_INSTALL_GIT\" = \"true\" ]; then \
    cd /tmp/ImSwitch && \
    git pull && \
    /bin/bash -c 'source /opt/conda/bin/activate imswitch && pip install -e /tmp/ImSwitch'; \
    fi && \
    if [ \"$UPDATE_UC2\" = \"true\" ]; then \
    cd /tmp/UC2-REST && \
    git pull; \
    fi && \
    if [ \"$UPDATE_INSTALL_UC2\" = \"true\" ]; then \
    cd /tmp/UC2-REST && \
    git pull && \
    /bin/bash -c 'source /opt/conda/bin/activate imswitch && pip install -e /tmp/UC2-ESP'; \
    fi && \
    if [ \"$UPDATE_CONFIG\" = \"true\" ]; then \
    cd /root/ImSwitchConfig && \
    git pull; \
    fi && \
    source /opt/conda/bin/activate imswitch && \
    HEADLESS=${HEADLESS:-1} && \
    HTTP_PORT=${HTTP_PORT:-8001} && \
    CONFIG_FILE=${CONFIG_FILE:-/root/ImSwitchConfig/imcontrol_setup/example_virtual_microscope.json} && \
    USB_DEVICE_PATH=${USB_DEVICE_PATH:-/dev/bus/usb} && \
    CONFIG_PATH=${CONFIG_PATH:-None} && \
    echo \"python3 /tmp/ImSwitch/main.py --headless $HEADLESS --config-file $CONFIG_FILE --http-port $HTTP_PORT \" && \
    python3 /tmp/ImSwitch/main.py --headless $HEADLESS --config-file $CONFIG_FILE --http-port $HTTP_PORT --config-folder $CONFIG_PATH; \
    fi"]

    By adding the -v ~/Downloads/ImSwitchConfig:/config option in the docker run command, you mount the host's ~/Downloads/ImSwitchConfig directory to the /config directory inside the container. The -e CONFIG_PATH=/config environment variable makes sure that the container uses this mounted directory as the configuration path.

    Now, any changes you make in ~/Downloads/ImSwitchConfig on your host machine will be reflected inside the container at /config, and the application running inside the container will use this directory for its configuration files.

    Certainly! Here's a summary and explanation of the combined script:

    Autostarting ImSwitch on e.g. the Raspberry Pi

    The script setup_autostart.sh performs the following actions:

    1. Creates a startup script (start_imswitch.sh) that:

      • Waits for the X server to be available.
      • Starts the Docker container in the background.
      • Launches Chromium in fullscreen mode, opening a specific URL and zooming out the page to 70%.
      • Logs output to a file for debugging purposes.
    2. Creates a systemd service (start_imswitch.service) that:

      • Ensures the startup script runs only after the X server is available.
      • Restarts the script on failure.
      • Configures logging to the systemd journal.

    Explanation

    #!/bin/bash

    # Define variables
    START_SCRIPT_PATH="$HOME/start_imswitch.sh"
    SERVICE_FILE_PATH="/etc/systemd/system/start_imswitch.service"

    # Create the startup script
    cat << 'EOF' > $START_SCRIPT_PATH
    #!/bin/bash
    set -x

    LOGFILE=/home/uc2/start_imswitch.log
    exec > $LOGFILE 2>&1

    echo "Starting IMSwitch Docker container and Chromium"

    # Wait for the X server to be available
    while ! xset q &>/dev/null; do
    echo "Waiting for X server..."
    sleep 2
    done

    export DISPLAY=:0

    # Start Docker container in the background
    echo "Running Docker container..."
    nohup sudo docker run --rm -d -p 8001:8001 -p 2222:22 \
    -e HEADLESS=1 -e HTTP_PORT=8001 \
    -e CONFIG_FILE=example_uc2_hik_flowstop.json \
    -e UPDATE_GIT=1 -e UPDATE_CONFIG=0 \
    --privileged ghcr.io/openuc2/imswitch-noqt-x64:latest &

    # Wait a bit to ensure Docker starts
    sleep 10

    # Start Chromium
    echo "Starting Chromium..."
    /usr/bin/chromium-browser --start-fullscreen --ignore-certificate-errors \
    --unsafely-treat-insecure-origin-as-secure=https://0.0.0.0:8001 \
    --app="data:text/html,<html><body><script>window.location.href='https://0.0.0.0:8001/imswitch/index.html';setTimeout(function(){document.body.style.zoom='0.7';}, 3000);</script></body></html>"

    echo "Startup script completed"
    EOF

    # Make the startup script executable
    chmod +x $START_SCRIPT_PATH

    echo "Startup script created at $START_SCRIPT_PATH and made executable."

    # Create the systemd service file
    sudo bash -c "cat << EOF > $SERVICE_FILE_PATH
    [Unit]
    Description=Start IMSwitch Docker and Chromium
    After=display-manager.service
    Requires=display-manager.service

    [Service]
    Type=simple
    ExecStart=$START_SCRIPT_PATH
    User=$USER
    Environment=DISPLAY=:0
    Restart=on-failure
    TimeoutSec=300
    StandardOutput=journal
    StandardError=journal

    [Install]
    WantedBy=graphical.target
    EOF"

    # Reload systemd, enable and start the new service
    sudo systemctl daemon-reload
    sudo systemctl enable start_imswitch.service
    sudo systemctl start start_imswitch.service

    echo "Systemd service created and enabled to start at boot."

    Detailed Steps:

    1. Define Paths:

      • START_SCRIPT_PATH and SERVICE_FILE_PATH are set to the paths where the startup script and the systemd service file will be created.
    2. Create the Startup Script:

      • Logging: Redirects output to a log file (/home/uc2/start_imswitch.log).
      • Wait for X Server: Uses a loop to check if the X server is available (xset q).
      • Start Docker: Runs the Docker container in detached mode (-d), ensuring it runs in the background without expecting a TTY.
      • Start Chromium: Opens Chromium in fullscreen mode, bypassing certificate errors, and zooms the page to 70%.
    3. Make the Script Executable:

      • Sets the start_imswitch.sh script as executable using chmod +x.
    4. Create the Systemd Service File:

      • Dependencies: Ensures the service runs after the display manager service (display-manager.service), which starts the X server.
      • Service Configuration: Specifies the script to run (ExecStart), user to run it as (User), environment variables (Environment), and restart behavior on failure (Restart=on-failure).
      • Logging: Configures logging to the systemd journal (StandardOutput and StandardError).
    5. Enable and Start the Service:

      • Reloads systemd to recognize the new service.
      • Enables the service to start at boot.
      • Starts the service immediately.

    By running the combined script, you ensure that the IMSwitch Docker container and Chromium browser will start automatically after the X server is available, with proper logging and background execution.

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchExperimental/index.html b/docs/ImSwitch/ImSwitchExperimental/index.html index 5cc3f983a..0677316e7 100644 --- a/docs/ImSwitch/ImSwitchExperimental/index.html +++ b/docs/ImSwitch/ImSwitchExperimental/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    ImSwitch Experimental Features Documentation

    Overview

    This document details the new experimental features for the microscopy control software, ImSwitch. These features include a headless version of the software, suitable for resource-constrained environments, and a Docker container setup for easy deployment and testing. The headless version allows operation on resource-limited devices, while the Docker container facilitates easy deployment and testing. Please provide feedback and report any issues encountered to help improve these experimental features.

    Headless Version in Google Colab

    We have developed a headless version of ImSwitch that operates without the need for the QT graphical interface. This version allows remote control and UI element access solely through the REST API or Jupyter Notebook. Please note that some functions are still under development, and this version is experimental.

    Use Cases

    This headless version is particularly useful on devices such as Raspberry Pis and Nvidia Jetsons, which may struggle with the resource demands of installing and running PyQT.

    Getting Started in Google Colab

    To try the headless version of ImSwitch in Google Colab, follow these steps:

    1. Install ImSwitch:

      !pip install https://github.com/openUC2/ImSwitch/archive/refs/heads/NOQT.zip #--no-deps ##--force-reinstall
    2. Access the public version in Google Colab: Google Colab Link

    3. Clone the repository and checkout the NOQT branch:

      %cd ~
      !git clone https://github.com/openUC2/ImSwitch
      !git pull
      %cd ./ImSwitch
      !git checkout NOQT
    4. Install the package:

      !pip install -e .
    5. Configure and run ImSwitch in headless mode:

      from google.colab.output import eval_js
      print(eval_js("google.colab.kernel.proxyPort(8002)"))
      from imswitch.__main__ import main
      import imswitch
      imswitch.IS_HEADLESS = True
      main(is_headless=True, default_config="example_virtual_microscope.json")
      input() # Prevent from closing the cell

    Docker Container

    A Docker container is available for ImSwitch, providing a convenient way to deploy and test the software.

    Docker Container Details

    Running the Docker Container

    1. Launch the Docker container:

      docker run -it --rm -p 9876:9876 -p 8001:8001 -p 2222:22 imswitch
    2. Access the GUI and REST API:

      • Open your browser and go to localhost:9876 to access the GUI.
      • Go to localhost:8001 to access the REST API.

    Notes

    • This Docker setup is primarily a demo version to freeze system dependencies.
    • The next step involves integrating actual hardware for complete functionality.
    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchInstall/index.html b/docs/ImSwitch/ImSwitchInstall/index.html index d699a0f1e..f8d8f3567 100644 --- a/docs/ImSwitch/ImSwitchInstall/index.html +++ b/docs/ImSwitch/ImSwitchInstall/index.html @@ -10,13 +10,13 @@ - +

    Install ImSwitch

    What will you learn?

    • How to install the main GUI software to control the Optics components
    • How to intsall the drivers

    Download the software

    Duration: 1

    Please go to our ImSwitch Repository and download the latest Build-artefact:

    GitHub Actions -> "bundle"

    Extract the ZIP folder

    Duration: 2

    Right click on the downloaded ZIP folder and select "extract all". This may take a while.

    Download and Install the drivers for the Camera/UC2 Electronics board

    Electronics Board (CH340)

    For the CH340 driver, please follow these instructions

    The driver is available here

    Download the Windows CH340 Driver
    Unzip the file
    Run the installer which you unzipped
    In the Device Manager when the CH340 is connected you will see a COM Port in the Tools > Serial Port menu, the COM number for your device may vary depending on your system.

    Electronics Board (CP210x)

    The driver for the CP210x is available here

    Download the Windows CP210x Driver
    Unzip the file
    Run the installer which you unzipped
    In the Device Manager when the CP210x is connected you will see a COM Port in the Tools > Serial Port menu, the COM number for your device may vary depending on your system.

    Daheng Imaging Cameras

    Download the Windows SDK USB2+USB3+GigE (including Directshow + Python) Galaxy V1.18.2208.9301 for the Daheng USB3 Cameras from the Get-Cameras Website

    Install the packages on your computer.

    Duration: 2

    Prepare ImSwitch

    1. Connect the Camera with your computer (optionally test its proper functioning using Daheng Galaxy Viewer(x64)) using the USB3 cable
    2. Connect the UC2 electronics with the computer (USB micro) and hook up the 12V power supply with the power connection
    3. Check if you can see the USB COM Port in your device manager:

    Duration: 2

    Install the UC2 ImSwitch Configurations

    In order to use the UC2 Hardware and the Daheng USB Camera, you need the UC2 config files. Please go to https://github.com/openUC2/ImSwitchConfig/tree/stable and download the Repository as a zip file following this link.

    Once it has been downloaded, unzip it to C:\Users\YOURUSERNAME\Documents\ImSwitchConfig

    It should look like this:

    Duration: 2

    Start ImSwitch

    1. Open a Windows Terminal by typing WIN+R, then type CMD and hit enter.

    1. in the Windows Terminal navigate to the folder where you downloaded the softare - e.g. cd C:\Users\UC2\Downloads\imswitch-windows-latest\ImSwitch and hit enter
    2. start the executable BUT we need to add one comment in advance:set SETUPTOOLS_USE_DISTUTILS=stdlib
    3. Type ImSwitch.exe and hit enter, the executable will open the ImSwitch GUI

    If everything has been configured correctly, the GUI should open and look like this. Additional information on its functionality can be found in the Read-The-Docs: https://imswitch.readthedocs.io/en/stable/

    If you have any additional questions or issues, please post them in the ISSUE section here.

    Explanatory Video on how to get started with ImSwitch

    Duration: 3

    https://www.youtube.com/watch?v=Om6GWZZ_0So

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchInstallUbuntu/index.html b/docs/ImSwitch/ImSwitchInstallUbuntu/index.html index 190afed4a..81d232b82 100644 --- a/docs/ImSwitch/ImSwitchInstallUbuntu/index.html +++ b/docs/ImSwitch/ImSwitchInstallUbuntu/index.html @@ -10,13 +10,13 @@ - +

    ImSwitchInstallUbuntu

    ImSwitch Installation Ubuntu

    Step 1: Install Visual Studio Code (VS Code)

    1. Open a web browser and navigate to the VS Code download page.
    2. Download the Debian package for your 64-bit system.
    3. Once downloaded, open a terminal window and navigate to the directory where the .deb file is located.
    4. Run the following command to install VS Code:
      sudo dpkg -i <filename>.deb
      sudo apt-get install -f

    Step 2: Install Miniconda

    1. Open a terminal window and run the following command to download Miniconda:
      wget https://repo.anaconda.com/miniconda/Miniconda3-py310_23.5.2-0-Linux-x86_64.sh
    2. Make the script executable and run it:
      bash Miniconda3-py310_23.5.2-0-Linux-x86_64.sh
    3. Follow the on-screen instructions to complete the installation.
    4. Create a new environment named imswitch with Python 3.10:
      conda create -n imswitch python=3.10 -y

    Step 3: Clone Necessary Repositories

    1. Navigate to the Downloads directory:
      cd ~/Downloads
    2. Clone the required repositories:
      git clone https://github.com/openUC2/UC2-REST
      git clone https://github.com/openUC2/ImSwitch
      git clone https://gitlab.com/bionanoimaging/nanoimagingpack
    1. Activate the imswitch environment:
      conda activate imswitch
    2. Navigate to the ImSwitch directory and install it:
      cd ~/Downloads/imswitch
      pip install -e .
    3. Repeat for UC2-REST and nanoimagingpack:
      cd ~/Downloads/UC2-REST
      pip install -e .
      cd ~/Downloads/nanoimagingpack # Correcting typo from original logs
      pip install -e .

    Step 5: Install Camera Drivers

    1. Clone the camera drivers:
      cd ~/Downloads
      git clone https://github.com/hongquanli/octopi-research/
    2. Navigate to the camera drivers directory and run the installation script:
      cd octopi-research/software/drivers\ and\ libraries/daheng\ camera/Galaxy_Linux-x86_Gige-U3_32bits-64bits_1.2.1911.9122/
      ./Galaxy_camera.run

    Step 6: Clone ImSwitch Configuration and Set Permissions

    1. Navigate to the Documents directory:
      cd ~/Documents
    2. Clone the ImSwitch configuration:
      git clone https://github.com/openUC2/ImSwitchConfig
    3. Change the ownership of the device:
      sudo chown pi:pi /dev/ttyUSB0

    Congratulations! You have successfully installed ImSwitch and related dependencies.

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchInstallWindows/index.html b/docs/ImSwitch/ImSwitchInstallWindows/index.html index a099e0c16..094d579cf 100644 --- a/docs/ImSwitch/ImSwitchInstallWindows/index.html +++ b/docs/ImSwitch/ImSwitchInstallWindows/index.html @@ -10,14 +10,14 @@ - +

    ImSwitchInstallWindows

    ImSwitch Installation on Windows

    Step 1: Install Visual Studio Code (VS Code)

    1. Open a web browser and go to the VS Code download page.
    2. Download the Windows Installer.
    3. Once the download is complete, locate the installer and double-click to run it.
    4. Follow the on-screen instructions to complete the installation.

    Step 2: Install Miniconda

    1. Open a web browser and navigate to the Miniconda download page.
    2. Download the Windows installer for the 64-bit version of Miniconda.
    3. Run the installer by double-clicking the downloaded file.
    4. Follow the installer prompts to install Miniconda to a directory of your choice (e.g., C:\Miniconda3).
    5. During installation, ensure that the option to "Add Anaconda to my PATH environment variable" is selected.

    Step 3: Clone Necessary Repositories

    1. Ensure you have installed GIT for windows
    2. Open the Command Prompt:
      • Press Win + R, type cmd, and press Enter.
    3. Navigate to your preferred directory where you want to clone the repositories (e.g., C:\Users\<YourUsername>\Downloads):
      cd C:\Users\<YourUsername>\Downloads
    4. Clone the required repositories:
      git clone https://github.com/openUC2/UC2-REST
      git clone https://github.com/openUC2/ImSwitch
      git clone https://gitlab.com/bionanoimaging/nanoimagingpack
    1. Open the Command Prompt.
    2. Create a new Conda environment named imswitch with Python 3.10:
      conda create -n imswitch python=3.10
    3. Activate the imswitch environment:
      conda activate imswitch
    4. Navigate to the ImSwitch directory and install it: (The e just states to install the packages in editable mode)
      cd C:\Users\<YourUsername>\Downloads\ImSwitch
      pip install -e .
    5. Repeat for UC2-REST and nanoimagingpack:
      cd C:\Users\<YourUsername>\Downloads\UC2-REST
      pip install -e .
      cd C:\Users\<YourUsername>\Downloads\nanoimagingpack
      pip install -e .

    Step 5: Install Camera Drivers for Daheng Cameras

    1. Download the galaxy camera sdk for windows here https://www.get-cameras.com/requestdownload
    2. Double-click on the installation executable file (Galaxy_camera.exe) to run it.
    3. Follow the on-screen instructions to complete the installation.

    Step 6: Clone ImSwitch Configuration and Set Permissions

    1. Navigate to the Documents directory using the Command Prompt:
      cd C:\Users\<YourUsername>\Documents
    2. Clone the ImSwitch configuration:
      git clone https://github.com/openUC2/ImSwitchConfig

    Step 7: Start

    conda activate imswitch
    imswitch

    Problems and Solutions

    Very likely, there will be a PyQt issue.

    Try:

    pip install PyQt5 --force-reinstall

    Congratulations! You have successfully installed ImSwitch and related dependencies on Windows.

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchInstaller/index.html b/docs/ImSwitch/ImSwitchInstaller/index.html index 8a0845efe..270f56e0f 100644 --- a/docs/ImSwitch/ImSwitchInstaller/index.html +++ b/docs/ImSwitch/ImSwitchInstaller/index.html @@ -10,7 +10,7 @@ - + @@ -20,7 +20,7 @@ :::

    ImSwitch Installer

    Introduction

    The ImSwitch Installer is an Electron-based application designed to streamline the installation process of the ImSwitch environment. It automates the download and setup of a Mamba environment along with all necessary dependencies from GitHub via pip. This tool simplifies the installation process, reducing it to a few clicks and eliminating the need for executing multiple complex commands.

    Installation Process

    The following youtube video shows you how to install ImSwitch using the installer. IMAGE ALT TEXT HERE

    Features

    • Easy Installation: Downloads and installs all necessary dependencies automatically.
    • Cross-Platform Support: Available for ARM64 and Intel-based Mac, Windows, and Linux systems.
    • Simple Uninstallation: Remove by deleting the ImSwitch folder.
    • Security: Instructions provided for bypassing system security warnings due to unsigned code.
    • Update Mechanism: Integrated update functionality to easily fetch the latest versions.

    Requirements

    • Disk Space: Minimum of 5GB.
    • Memory: 8GB RAM.
    • Processor: Intel i5 or Apple Silicon.

    Installation Guide

    1. Download the Installer: Choose the appropriate installer (ARM64 for Mac, Windows) from the releases section.
    2. Run the Installer: Double-click the downloaded file. Ignore any security warnings as the installer is not yet signed.
      • For macOS, follow Apple's guide for running unsigned code.
      • On Windows, grant permission to run the application.
    3. Installation Process: The installer will set up a Python environment using Mamba in /User/yourname/ImSwitch and install all dependencies. This process may take between 10 to 30 minutes depending on your internet connection.
    4. Starting ImSwitch: Once installed, launch ImSwitch by executing start imswitch. https://camo.githubusercontent.com/d12b826a278a7dcb877c8524a22cc3408b82883a27edc9464f865b7d5403e6ef/68747470733a2f2f69332e7974696d672e636f6d2f76692f4e345031734832453952552f6d617872657364656661756c742e6a7067Installation Screen 1Installation Screen 2

    Updating ImSwitch

    To update, click the update button within the application. This will download the latest ZIP from GitHub, unzip it, and execute pip install -e . within the Mamba base environment.

    Driver Installation

    Drivers for Daheng and HIK Vision cameras can be found under the "Driver Installation" link provided within the installer.

    Upcoming Features

    • Flashing the latest version of the UC2 firmware.
    • Building the ImSwitch Hardware Configuration using a drag-and-drop GUI.

    Install from Source

    For those interested in building from source:

    # Clone the repository
    git clone https://github.com/openuc2/imswitchinstaller.git

    # Install dependencies (skip if you already have yarn)
    npm install

    # Run the Electron app
    npm start

    Debugging

    Python Environment Locations

    • Windows Installation Path: C:\Users\UCadmin2\ImSwitch\miniforge\condabin
    • Python Executable: C:\\Users\\UCadmin2\\ImSwitch\\miniforge\\python.exe
    • ImSwitch Package Location: C:\\Users\\UCadmin2\\ImSwitch\\miniforge\\lib\\site-packages\\imswitch\\__init__.py'
    • Conda Environment /Users/ImSwitch/miniforge/condabin/mamba install devbio-napari -c conda-forge

    Disclaimer

    This installer is based on the BellJar project. Thanks a lot for making it open-source! :)

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchInstallerConda/index.html b/docs/ImSwitch/ImSwitchInstallerConda/index.html index 49d68a2a9..98d53de77 100644 --- a/docs/ImSwitch/ImSwitchInstallerConda/index.html +++ b/docs/ImSwitch/ImSwitchInstallerConda/index.html @@ -10,7 +10,7 @@ - + @@ -22,7 +22,7 @@

  • Choose a path to install the conda environment to (Hint: path should not exceed 40 digits)

    1. Proceed

    1. Wait until the packages are installed

    1. A command prompt will appear and ask you for granting admin rights; Here a python script tries to download and install the silabs USB UART drivers for the ESP32

    2. The installation will tell you if the installation process scussful

    1. In the next steps, all necessary packages in the environemnt for ImSwitch will be downloaded and installed

    1. The installer informs you once it's done

    1. Once everything has been installed, the installer tells you it'S done

    1. Exit the installer by hitting finish

    1. On the desktop a new icon has been created to start the ImSwitch software. Double click and wait until the windows shows up

    Trouble shoot

    The conda installer installs your environment in the location that you have selected previously. To find out, you can open a command line window by hiting the keys WIN+r and type "cmd" and enter. Then enter

    conda env list

    The name imswitchopenuc2 should appear. You can activate this python environemnt by typing

    conda activate imswitchopenuc2

    If this works successfully, you can start imswitch by typing

    imswitch

    Disclaimer

    This is still very early stage and may have errors. Exepect Errors Feel free to file any issues in our repository or write us a mail. :)

    - + \ No newline at end of file diff --git a/docs/ImSwitch/ImSwitchUpdate/index.html b/docs/ImSwitch/ImSwitchUpdate/index.html index a7014cd3b..ead6b48d9 100644 --- a/docs/ImSwitch/ImSwitchUpdate/index.html +++ b/docs/ImSwitch/ImSwitchUpdate/index.html @@ -10,13 +10,13 @@ - +

    ImSwitchUpdate

    Updated openUC2 ImSwitch

    In this guide, we'll walk you through the process of updating ImSwitch after you've installed it using pip. The update consists of three main steps:

    1. Updating the ImSwitch UC2 version
    2. Updating the UC2-REST
    3. Updating the UC2-ESP32 firmware

    1. Updating the ImSwitch UC2 Version

    Assumption: You have previously cloned the ImSwitch repository using git.

    1. Open your terminal.

    2. Activate the ImSwitch environment:

      conda activate imswitch
    3. Navigate to the directory where you cloned ImSwitch:

      cd <DIRECTORY/WHERE/YOU/DOWNLOADED/IMSWITCH>
    4. Pull the latest version from the repository and install:

      git pull https://github.com/openUC2/ImSwitch/
      pip install -e .

    2. Updating the UC2-REST to Interface the UC2 Electronics

    Assumption: You have previously cloned the UC2-REST repository using git.

    1. In the terminal, navigate to the directory where you cloned UC2-REST:

      cd <DIRECTORY/WHERE/YOU/DOWNLOADED/UC2-REST>
    2. Pull the latest version from the repository and install:

      git pull https://github.com/openUC2/UC2-REST/
      pip install -e .

    3. Updating the UC2-ESP32 Firmware

    1. Visit the UC2 Firmware Page.
    2. Select the board you're using. If you're uncertain about this, feel free to reach out via email.
    3. Click on the "Connect" button.
    4. From the browser-provided list, select the COM port.
    5. Click on "Flash Firmware".
    6. Wait for the installation process to complete.
    7. Test the firmware on the UC2 Web Serial Test Page.
    8. Close the browser window to release the serial port.

    Finally, you can start ImSwitch:

    python -m imswitch
    - + \ No newline at end of file diff --git a/docs/Investigator/FlowStopper/index.html b/docs/Investigator/FlowStopper/index.html index 5d121b5a2..15d5cb5a5 100644 --- a/docs/Investigator/FlowStopper/index.html +++ b/docs/Investigator/FlowStopper/index.html @@ -10,13 +10,13 @@ - +

    README

    Setup Wifi Access Point on the Raspi

    from: https://cdn-learn.adafruit.com/downloads/pdf/setting-up-a-raspberry-pi-as-a-wifi-access-point.pdf

    sudo apt update
    sudo apt -y upgrade # takes long
    sudo apt install -y hostapd dnsmasq
    sudo systemctl unmask hostapd
    sudo systemctl enable hostapd
    sudo DEBIAN_FRONTEND=noninteractive apt install -y netfilter-persistent iptablespersistent
    sudo reboot
    sudo nano /etc/dhcpcd.conf
    -------
    interface wlan0
    static ip_address=192.168.4.1/24
    nohook wpa_supplicant
    sudo nano /etc/sysctl.d/routed-ap.conf
    -------
    sudo nano /etc/dnsmasq.conf
    -------
    interface=wlan0 # Listening interface
    dhcp-range=192.168.4.2,192.168.4.20,255.255.255.0,24h
    # Pool of IP addresses served via DHCP
    domain=wlan # Local wireless DNS domain
    address=/gw.wlan/192.168.4.1 # Alias for this router
    - + \ No newline at end of file diff --git a/docs/Investigator/Lightsheet/LightSheet Sample/index.html b/docs/Investigator/Lightsheet/LightSheet Sample/index.html index d130e862e..49bae1ee1 100644 --- a/docs/Investigator/Lightsheet/LightSheet Sample/index.html +++ b/docs/Investigator/Lightsheet/LightSheet Sample/index.html @@ -10,7 +10,7 @@ - + @@ -32,7 +32,7 @@ The openUC2 light-sheet microscope features motorized axes for enhanced control and precision:
    • The Z-axis adjusts the objective lens focus relative to the light-sheet plane.
    • The X axis moves the sample in the vertical direction with respect to the ground surface.
    • The Y axis shifts the sample parallel to the light-sheet plane.
    • The A axis moves the sample along the light-sheet plane, towards or away from the objective lens.
    • Each step corresponds to approximately 300nm in physical units, enabling fine-tuned movement and positioning.

    We appreciate your engagement with the openUC2 light-sheet microscope and hope that these technical details enhance your understanding of the setup and its capabilities. Should you have any inquiries or require further assistance, please don't hesitate to reach out.

    Protocol to align the light-sheet w.r.t. the focus plane

    Alignment Protocol for Light-Sheet Microscope Focus Plane

    Efficient alignment of the light-sheet with the microscope objective lens's focus plane is crucial for optimal imaging results. This protocol outlines the steps to achieve precise alignment using fluorescent markers and manipulation of the kinematic mirror.

    Alignment Steps:

    1. Fluorescent Marker Setup:

      • Begin by ensuring that the light-sheet is coplanar with the microscope's objective lens field of view.
      • Use a fluorescent pen marker to label the embedding media, effectively visualizing the light-sheet.

    2. Activating the Laser:

    1. Visualizing the Light-Sheet:

      • With the laser activated, you should observe the light-sheet within the water chamber. Refer to the provided image for a reference.
    2. Kinematic Mirror Adjustment:

      • The three screws on the kinematic mirror in the right corner control the orientation of the light-sheet in 3D space.
      • Familiarize yourself with the degrees of freedom associated with these screws.

    1. Fundamental Considerations:
      • The cylindrical lens focuses the primary light-sheet in the backfocal plane of the illumination objective (4x, 0.1 NA).
      • Rotating the objective lens adjusts the orientation of the light-sheet.
      • The square orientation of the cylindrical lens ensures proper alignment with the detection objective lens.
      • The primary light-sheet exits the cylindrical lens at the center.
      • The kinematic mirror manipulates the light-sheet's position in the x and y directions, as well as introducing an offset.
      • Correct mirror alignment is crucial, placing it precisely at the diagonal center of the cube.
      • This central placement ensures that the primary light-sheet enters the objective lens's backfocal plane (BFP) at the center.
      • Such alignment results in the secondary illuminating light-sheet being parallel to the detection lens's focus plane.
      • Observe the effects of rotating the screws and adjust accordingly.

    1. Fluorescent Solution Application:
      • Utilize a syringe for convenient application of the fluorescent solution.

    7. Sample Cube Handling:

    • The sample cube is magnetically held, facilitating easy removal for cleaning.

    • Take care as the sample cube's coverslips are relatively thin and can break.

    Achieving precise alignment between the light-sheet and the objective lens's focus plane is critical for obtaining accurate imaging results. This protocol provides a systematic approach to optimizing your light-sheet microscope setup. For further assistance or questions, feel free to reach out to our community and support channels. Your engagement contributes to the ongoing refinement of the openUC2 light-sheet microscope system.

    Finding the the focus (waiste) of the light-sheet

    To effectively align the light-sheet in your setup, it's crucial to follow these two key steps:

    Step 1: Centering the Sheet within the Field of View (FOV)

    1. Begin by ensuring that the waist of the light-sheet is positioned at the center of the microscope's field of view (FOV).
    2. To achieve this, the cylindrical lens needs to be temporarily removed. Carefully release the lower puzzle pieces to detach the cylindrical lens cube.

    1. With the cylindrical lens removed, a collimated beam should enter the back focal plane (BFP) of the illuminating objective lens.
    2. Adjust the kinematic mirror to guide the round beam, approximately 10mm in diameter, into the center of the BFP of the illuminating objective lens. This alignment should be parallel to the optical axis.

    Step 2: Achieving Focus with the Detection Objective Lens

    1. Activate the camera, such as using Galaxy Viewer software that comes with the camera drivers, to observe the light-sheet's focus.
    2. The fluorescently labeled region should now exhibit a focused beam, perceptible to the naked eye.
    3. Initiate axial movement of the objective lens (Axis Z) using the online control website. You'll notice an increase in intensity at either the positive or negative direction until the light-sheet focus becomes visible within the field of view.

    1. To optimize focus, make fine adjustments to the kinematic mirror to direct the light-sheet beam if it's positioned too high or too low.
    2. It's common for the light-sheet's focus not to align precisely with the center of the FOV. In this case, carefully adjust the position of the illuminating objective lens along the cube axis to relocate the focus positions.
    3. Once you're content with the alignment, deactivate the laser and reinsert the cylindrical lens.
    4. Notably, this step doesn't need to be repeated each time the light-sheet is activated. The position of the cylindrical lens is relatively stable and doesn't require frequent recalibration.

    Following these steps meticulously will ensure that the light-sheet is accurately aligned both within the FOV's center and in-focus with the detection objective lens. This alignment process is essential for obtaining reliable and high-quality imaging results with the openUC2 light-sheet microscope.

    Once the cylindrical lens is back in, you can readjust the light-sheet wr.t. to the focus plane of the objective lens since they may be a slight variation after reassembly.

    Brightfield imaging

    In case you want to image the sample in transmisson mode, turn on the Neopixel LED that is connected to the sample cube and optionally remove the fluorescent filter by pulling it up and store it somewhere safe (dust and scratch free!).

    Using the Fully Assembled Light-Sheet Microscope for Sample Imaging

    Now that all components are meticulously aligned, the openUC2 light-sheet microscope is primed for sample imaging. Follow these steps to prepare and capture your fluorescent sample:

    1. Sample Preparation:

      • Begin by preparing your fluorescent sample according to the specified protocols.
      • Carefully follow the steps outlined in the dedicated sample preparation section within this document.
    2. Assembling the Sample Holder:

      • Loosen the nut that secures the syringe and insert the syringe into the sample holder.
      • Gradually lower the syringe so that the tip of the sample barely touches the light-sheet within the sample plane.

    1. Squeezing out the Agarose:
      • Squeeze out the agarose gently from the syringe while observing the sample, starting with brightfield imaging.
      • Monitor the camera's image stream to ensure the sample becomes visible within the field of view.

    1. Observing Brightfield Image:
      • If the sample isn't immediately visible, confirm its positioning within the sample cube and make minor adjustments in XYZ to bring it into view on the camera screen.
      • Once visible in brightfield, deactivate the LED light source.

    1. Switching to Laser Illumination:

      • Turn on the laser source, and initially, remove the fluorescent filter.
      • Adjust the imaging settings to enhance contrast and visibility, increasing intensity, exposure time, and/or camera gain until you obtain a clear, well-exposed image with minimal noise.
    2. Fine-tuning Laser Position:

      • Using bright scattering as a guide, locate the laser's position while ensuring you have reinserted the fluorescent filter.
      • Adjust the intensity as needed.
    3. Sample Positioning:

      • Manipulate the sample's position in XYZ space to center it on a region of interest.
    4. ImSwitch Scan and Reconstruction:

      • Utilize ImSwitch software's scan and reconstruction plugin to perform scans of your sample.
      • The specific scan and reconstruction process details are provided in the ImSwitch documentation.

    This completes the procedure for imaging your fluorescent sample using the fully assembled openUC2 light-sheet microscope. With careful preparation and precise adjustments, you can capture high-quality volumetric images that offer valuable insights into the structure and behavior of your sample. Your engagement with the microscope's capabilities contributes to ongoing advancements in microscopic research and exploration.

    ImSwitch data acquisition and Reconstruction

    We assume the system is running and you were able to install ImSwitch on your computer. The configuration JSONfile that describes the light-sheet system can be found further down this document. A tutorial on how to install our ImSwitch Version (SRC: https://github.com/openUC2/ImSwitch/) can be either found in the imSwitch repository or in the ImSwitch section in this wiki.

    Mount the sample on a metal tip

    Glue the sample on an M5 set screw using super glue or blutek (non-safe, sample can fall off). Insects offer a great level of fluorescent signal due to autofluorescence and act as nice training samples that can simply hang down using this method

    Sample preparation á la agarose-in-syringe method

    SRC

    Sample Preparation Protocol for openUC2 Light-Sheet Microscope Imaging: Fluorescently Labeled Zebrafish

    This simplified protocol outlines the steps to prepare a fluorescently labeled zebrafish sample for imaging using the openUC2 light-sheet microscope. This method involves embedding the sample in an agarose cylinder for stable imaging in an aqueous environment. the "aquarium" or water-filled sample chamber is used to do refractive index matching as the sample would scatter too much light otherwise.

    Materials Required:

    • 1.5% Agar
    • Glass capillary
    • Zebrafish embryo (some other volumetric, mostly clear sample that can be excited at 488nm)
    • Sample medium
    • Falcon tube or small beaker
    • syringe or FEP tube (optional, for increased stability)

    Procedure:

    Mounting in Free-Hanging Agarose Cylinder:

    1. Take the syringe and cut away the tip
    2. Melt 1.5% agar at 70ºC and maintain it at 37ºC.
    3. Insert the plunger into the syringe capillary, ensuring the white end barely protrudes and suck in enough agarose
    4. Gently place the zebrafish embryo into the already solidified agarose, minimizing the water content.
    5. Pull the plunger to draw up about 3cm (1 inch) of melted agarose.
    6. Carefully position the sample close to the capillary's end.
    7. Allow the agarose to set for 1-2 minutes.
    8. When ready to image, gently push the plunger down to extrude the agarose cylinder with the sample, placing it just outside the capillary for imaging.

    Further tweaks for the system

    These steps are not necessary, but help you to customize the microscope to better match your sample configuration.

    Remove the xyz stage from the top

    In case you want to do maintenance on the microscope, the xyz stage can easily be removed by releasing the M3x55mm screws from the bottom part. Therfore, remove the puzzle piece that has be mounted below the objective lens and release the 3 screws that mount the stage plate to the uppoer part of the microscope. You can now release the stage. In order to move it back on, do the reverse process.

    Swap the sample mounting plate

    In principle the XYZ stage can mount any sample geometry. We wanted to start with something and adapted the common syringe mount. Only two screws from below have to be released in order to swap the sample mount plate:

    This part can be customized to adapt e.g. conventional sample slides

    ImSwitch configuration for the ligth-sheet

    {
    "positioners": {
    "ESP32Stage": {
    "managerName": "ESP32StageManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "enableauto": 0,
    "isEnable": 1
    },
    "axes": [
    "X",
    "Y",
    "Z",
    "A"
    ],
    "forScanning": true,
    "forPositioning": true
    }
    },
    "rs232devices": {
    "ESP32": {
    "managerName": "ESP32Manager",
    "managerProperties": {
    "host_": "192.168.43.129",
    "serialport_": "COM3",
    "serialport": "/dev/cu.usbserial-A50285BI"
    }
    }
    },
    "lasers": {
    "488 Laser": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDLaserManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "channel_index":1,
    "filter_change": false,
    "laser_despeckle_period": 10,
    "laser_despeckle_amplitude": 0
    },
    "wavelength": 488,
    "valueRangeMin": 0,
    "valueRangeMax": 1024
    },
    "LED Matrix": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDLaserManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "channel_index": "LED",
    "filter_change": false,
    "filter_axis": 3,
    "filter_position": 32000,
    "filter_position_init": -0
    },
    "wavelength": 635,
    "valueRangeMin": 0,
    "valueRangeMax": 255
    }
    },
    "detectors": {
    "WidefieldCamera": {
    "ExtPackage": "imswitch_det_webcam",
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "GXPIPYManager",
    "managerProperties": {
    "cameraListIndex": 1,
    "gxipycam": {
    "exposure": 20,
    "gain": 0,
    "blacklevel": 10,
    "image_width": 1000,
    "image_height": 1000
    }
    },
    "forAcquisition": true,
    "forFocusLock": true
    }
    },
    "rois": {
    "Full chip": {
    "x": 600,
    "y": 600,
    "w": 1200,
    "h": 1200
    }
    },
    "LEDMatrixs": {
    "ESP32 LEDMatrix": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDMatrixManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "Nx": 4,
    "Ny": 4
    },
    "wavelength": 488,
    "valueRangeMin": 0,
    "valueRangeMax": 32768
    }
    },
    "autofocus": {
    "camera": "WidefieldCamera",
    "positioner": "ESP32Stage",
    "updateFreq": 10,
    "frameCropx": 780,
    "frameCropy": 400,
    "frameCropw": 500,
    "frameCroph": 100
    },
    "uc2Config": {
    "defaultConfig": "pindefWemos.json",
    "defaultConfig2": "pindefUC2Standalon2.json",
    "defaultConfig1": "pindefUC2Standalon.json"
    },
    "mct": {
    "monitorIdx": 2,
    "width": 1080,
    "height": 1920,
    "wavelength": 0,
    "pixelSize": 0,
    "angleMount": 0,
    "patternsDirWin": "C:\\Users\\wanghaoran\\Documents\\ImSwitchConfig\\imcontrol_slm\\488\\",
    "patternsDir": "/users/bene/ImSwitchConfig/imcontrol_sim/488"
    },
    "dpc": {
    "wavelength": 0.53,
    "pixelsize": 0.2,
    "NA":0.3,
    "NAi": 0.3,
    "n": 1.0,
    "rotations": [0, 180, 90, 270]
    },
    "webrtc":{},
    "PixelCalibration": {},
    "availableWidgets": [
    "Settings",
    "Positioner",
    "View",
    "Recording",
    "Image",
    "Laser",
    "UC2Config",
    "Joystick",
    "Lightsheet",
    "LEDMatrix"
    ],
    "nonAvailableWidgets":[
    "STORMRecon",
    "LEDMatrix",
    "MCT",

    "ImSwitchServer",
    "PixelCalibration",
    "Hypha",
    "FocusLock",
    "HistoScan",

    "FocusLock"]
    }
    - + \ No newline at end of file diff --git a/docs/Investigator/Lightsheet/LightSheet/index.html b/docs/Investigator/Lightsheet/LightSheet/index.html index de306b5af..033f54409 100644 --- a/docs/Investigator/Lightsheet/LightSheet/index.html +++ b/docs/Investigator/Lightsheet/LightSheet/index.html @@ -10,7 +10,7 @@ - + @@ -27,7 +27,7 @@ Z-stage for the objective lens

    Almost Fully assembled UC2 Lighthseet microscope

    Step 2: Light-Sheet Generation and Sample Preparation

    The fiber-coupled laser emits light at a wavelength of 488 nanometers, which is ideal for exciting fluorescent molecules commonly used in biological imaging, such as green fluorescent protein (GFP). The collimated laser beam passes through a cylindrical lens, creating a one-dimensional focus with a width of approximately 10 mm.

    The kinematic mirror allows precise control of the laser beam position, ensuring proper alignment. The lens further shapes the laser beam into an optical sheet, which is then directed into the sample plane by the four-fold objective lens.

    The sample, such as a zebrafish embryo, is held in a small aquarium filled with water. The sample is positioned such that the light sheet intersects it, and fluorescence signals are emitted only where the light sheet illuminates.

    Step 3: Image Acquisition

    Using the XYZ stage, move the sample in the focal plane of the ten-fold objective lens. The camera will capture images as the sample is moved, allowing you to create a three-dimensional stack of the object. The long working distance of the objective lens allows sufficient space between the lens and the sample, reducing the potential for photodamage and phototoxicity.

    Benefits of Light-Sheet Microscopy

    Light-sheet microscopy offers several advantages for imaging biological samples:

    • Optical sectioning: The light-sheet illuminates only the focal plane, minimizing background noise and out-of-focus signals.
    • Reduced phototoxicity: With the sample illuminated only in the focal plane, light-sheet microscopy reduces photodamage and photobleaching, allowing long-term imaging of live samples.
    • High-speed imaging: Light-sheet microscopy enables rapid volumetric imaging, capturing dynamic processes in real-time.
    • High resolution: The combination of optical sectioning and minimal scattering allows for high-resolution imaging, revealing fine cellular structures.

    Bill-of-Material

    This is a list of components that are used in the latest version of the openUC2 light-sheet microscope. This is subject to changes. If you are interested to build one of these devices and need a kit, please, don't hesitate to contact us via Mail, Github or other channels :) Find more information on www.openuc2.com

    CategoryAmountPartShopPrice (€)CommentQuantityURL/SourceAlternative
    External Parts1Cylindrical lens, comarThorolabs1501Link
    1Camera, monochrome, CMOSDaheng3501Link
    1Focusing stage, micrometer, motorized (NEMA12)China1001Haoran
    110x objective, NA0.3, long-working distanceUSA2501Link
    1XYZ stage, AliExpress, micrometerChina2501LinkLink
    3Motor for stageChina803LinkLink
    1Tube lensChina2001Link
    1Fiber laserChina2001HaoranLink
    1MirrorPGI51Link
    14x objective lens finiteChina101Haoran
    1Fiber CollimatorChina1001Haoran
    14BaseplatesopenUC2314
    8CubesopenUC2510
    1Solid baseplate (aluminium)openUC2-1
    1Excitation filter (Thorlabs)Thorolabs1201Link
    Inserts1Fiber Collimator MountopenUC251
    1Cylindrical Lens MountopenUC251
    145° Mirror Mount (kinematic)openUC2251
    1RMS Lens MountopenUC251
    1Sample mount (printed)openUC2301
    1Base for XYZ StageopenUC221
    1Sample mount for XYZ StageopenUC2151
    1Holder for Z-stage motorizedopenuc2301
    1Holder for TubelensopenUC2101
    1Holder for UC2 ElectronicsopenUC2301
    Electronics1Electronics, Powersupply, Stepper driveropenUC21001
    2USB cables (camera, micro)Germany602
    1Playstation ControllerGermany501
    1Box + Foam insertopenUC21001Link
    Labour & Shipping-Labour + Shipping-5001
    TOTAL---2790-

    The 3D printing files can be found here

    Conclusion

    Congratulations! You have successfully built a light-sheet microscope using the UC2 modular toolbox. This powerful technique allows you to acquire high-resolution three-dimensional images of samples like zebrafishes. With the ability to perform optical sectioning and minimal phototoxicity, light-sheet microscopy is a valuable tool for studying biological structures in 3D. You can now explore the fascinating world of 3D biological imaging and discover new insights into the complexities of life at the microscopic level. Happy imaging!

    - + \ No newline at end of file diff --git a/docs/Investigator/Lightsheet/LightSheetOld/index.html b/docs/Investigator/Lightsheet/LightSheetOld/index.html index 364f2ed54..ebca10530 100644 --- a/docs/Investigator/Lightsheet/LightSheetOld/index.html +++ b/docs/Investigator/Lightsheet/LightSheetOld/index.html @@ -10,7 +10,7 @@ - + @@ -34,7 +34,7 @@

    5. Use of filters
    • When using a correct filter between the Z-stage and the camera, it's possible to observe a fluorescent image of the sample.
    • Without filters you capture only the scattering image.

    Imaging with the light sheet microscope

    • The focus of the detection path can be finely adjusted using the Z-stage motor (GUI - Z).
    • Z-series can be acquired by moving the sample (GUI - X) through the focused light sheet plane - Move the sample-stage in both directions, using the lens tissue as a sample, to observe how the camera image changes.
    • To acquire an image: Choose "Start experiment" on the right side of the screen, click "Custom" on the top right side and then "Snap" on the bottom right side.
    • To acquire a z-stack use the tomographic mode:

    Results

    What can you see with the simplest possible light sheet setup:

    The result could look like this:

    Zebra fish embryo

    Participate!

    Do you want to show your own results? Do you have ideas for improvements? Let us know!

    - + \ No newline at end of file diff --git a/docs/Investigator/Lightsheet/LightsheetCalibration/index.html b/docs/Investigator/Lightsheet/LightsheetCalibration/index.html index 4f5797bfa..b9f6bc72c 100644 --- a/docs/Investigator/Lightsheet/LightsheetCalibration/index.html +++ b/docs/Investigator/Lightsheet/LightsheetCalibration/index.html @@ -10,13 +10,13 @@ - + - + \ No newline at end of file diff --git a/docs/Investigator/STORM/Electronics/index.html b/docs/Investigator/STORM/Electronics/index.html index f7c122f9e..7c64785a1 100644 --- a/docs/Investigator/STORM/Electronics/index.html +++ b/docs/Investigator/STORM/Electronics/index.html @@ -10,13 +10,13 @@ - +

    Electronics

    Here we make use of the ESP32 Wemos D1 R32 microcontroller board in combination with the CNC Shield v3. The wiring of the different components is straight forward as the Stepper Motors are attached to the stepper drivers and the Laser is triggered by the SpinEn pin. The NeoPixel LED mounts to the Hold pin.

    Flashing the firmware

    Go to the website https://youseetoo.github.io/ and choose the CNC board as the hardware configuration to flash the latest version of the Firmware. The PS3 controller's MAC address has to be setup with the PS Pairing tool. The actual MAC Address is printed out on the Serial monitor while the Board is booting up.

    - + \ No newline at end of file diff --git a/docs/Investigator/STORM/Illumination/index.html b/docs/Investigator/STORM/Illumination/index.html index da552fd2c..471917c7a 100644 --- a/docs/Investigator/STORM/Illumination/index.html +++ b/docs/Investigator/STORM/Illumination/index.html @@ -10,7 +10,7 @@ - + @@ -112,7 +112,7 @@ represents 10 µm. Two CCPs have been zoomed in to plot the profiles along the red transparent line. Scale bar for the magnified regions of interest represents 200 nm.

    - + \ No newline at end of file diff --git a/docs/Investigator/STORM/Main/index.html b/docs/Investigator/STORM/Main/index.html index b21ba25a1..6c3d558ce 100644 --- a/docs/Investigator/STORM/Main/index.html +++ b/docs/Investigator/STORM/Main/index.html @@ -10,7 +10,7 @@ - + @@ -83,7 +83,7 @@

    Mechanical stability of the setup

    See the mechanical stability section of the repository.

    Wide-field imaging, Live-cell imaging, Single molecule applications

    See the Results section of the repository.

    Get Involved

    This project is open so that anyone can get involved. You don't even have to learn CAD designing or programming. Find ways you can contribute in CONTRIBUTING

    License and Collaboration

    This project is open-source and is released under the CERN open hardware license. Our aim is to make the kits commercially available. We encourage everyone who is using our Toolbox to share their results and ideas, so that the Toolbox keeps improving. It should serve as a easy-to-use and easy-to-access general purpose building block solution for the area of STEAM education. All the design files are generally for free, but we would like to hear from you how is it going.

    You're free to fork the project and enhance it. If you have any suggestions to improve it or add any additional functions make a pull-request or file an issue.

    Please find the type of licenses here

    REMARK: All files have been designed using Autodesk Inventor 2019 (EDUCATION)

    Collaborating

    If you find this project useful, please like this repository, follow us on Twitter and cite the webpage or the publication! :-)

    - + \ No newline at end of file diff --git a/docs/Investigator/STORM/Results/index.html b/docs/Investigator/STORM/Results/index.html index 2f9cdf493..e5a4858b1 100644 --- a/docs/Investigator/STORM/Results/index.html +++ b/docs/Investigator/STORM/Results/index.html @@ -10,7 +10,7 @@ - + @@ -57,7 +57,7 @@ represents 10 µm. Two CCPs have been zoomed in to plot the profiles along the red transparent line. Scale bar for the magnified regions of interest represents 200 nm.

    - + \ No newline at end of file diff --git a/docs/Investigator/STORM/Software/index.html b/docs/Investigator/STORM/Software/index.html index 09218186e..abbf57e00 100644 --- a/docs/Investigator/STORM/Software/index.html +++ b/docs/Investigator/STORM/Software/index.html @@ -10,13 +10,13 @@ - +

    Software

    For the control and acquisition software, we use ImSwitch. This is an open-source software centered around Napari as a multi-layer viewer and a rich framework for QT-based widgets. We make use of the open-source localization framework "microEye" ()

    Installation

    For the installation we advise you to have a look at the ImSwitch repository here https://github.com/kasasxav/ImSwitch/

    After setting up ImSwitch, you can enable STORM reconstruction in real time using the MicroEye Plugin by adding the following configuration to the ImSwitch config file that is located in ~/Documents/ImSwitchConfig/config/imcontrol_options.json

    {
    "setupFileName": "example_uc2_storm_alliedvision.json",
    "recording": {
    "outputFolder": "./ImSwitch/ImSwitch/recordings",
    "includeDateInOutputFolder": true
    },
    "watcher": {
    "outputFolder": "/Users/bene/ImSwitchConfig/scripts"
    }
    }

    The setup file with the actual hardware configuration can be placed here:

    ~/Documents/ImSwitchConfig/imcontrol_setups/example_uc2_storm_alliedvision.json

    {
    "positioners": {
    "ESP32Stage": {
    "managerName": "ESP32StageManager",
    "managerProperties": {
    "rs232device": "ESP32"
    },
    "axes": [
    "X",
    "Y",
    "Z"
    ],
    "forScanning": true,
    "forPositioning": true
    }
    },
    "rs232devices": {
    "ESP32": {
    "managerName": "ESP32Manager",
    "managerProperties": {
    "host_": "192.168.43.129",
    "serialport_windows": "COM5",
    "serialport": "/dev/cu./dev/cu.SLAB_USBtoUART"
    }
    }
    },
    "lasers": {
    "488 Laser": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDLaserManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "channel_index": 1,
    "filter_change": false,
    "laser_despeckle_period": 10,
    "laser_despeckle_amplitude": 0
    },
    "wavelength": 488,
    "valueRangeMin": 0,
    "valueRangeMax": 1024
    },
    "635 Laser": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDLaserManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "channel_index": 2,
    "filter_change": false,
    "laser_despeckle_period": 10,
    "laser_despeckle_amplitude": 0
    },
    "wavelength": 635,
    "valueRangeMin": 0,
    "valueRangeMax": 1024
    },
    "LED": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDLaserManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "channel_index": "LED",
    "filter_change": false,
    "filter_axis": 3,
    "filter_position": 32000,
    "filter_position_init": -0
    },
    "wavelength": 635,
    "valueRangeMin": 0,
    "valueRangeMax": 255
    }
    },
    "detectors": {
    "WidefieldCamera": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "AVManager",
    "managerProperties": {
    "cameraListIndex": 1,
    "mocktype": "STORM",
    "mockstackpath": "/Users/bene/Downloads/New_SMLM_datasets/ROI_cos7MT_AF647fluopaint.tif",
    "avcam": {
    "exposure": 0,
    "gain": 0,
    "blacklevel": 100,
    "image_width": 1000,
    "image_height": 1000,
    "pixel_format": "Mono12"
    }
    },
    "forAcquisition": true,
    "forFocusLock": false
    }
    },
    "rois": {
    "Full chip": {
    "x": 600,
    "y": 600,
    "w": 1200,
    "h": 1200
    }
    },
    "LEDMatrixs": {
    "ESP32 LEDMatrix": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "ESP32LEDMatrixManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "Nx": 4,
    "Ny": 4
    },
    "wavelength": 488,
    "valueRangeMin": 0,
    "valueRangeMax": 32768
    }
    },
    "autofocus": {
    "camera": "WidefieldCamera",
    "positioner": "ESP32Stage",
    "updateFreq": 10,
    "frameCropx": 780,
    "frameCropy": 400,
    "frameCropw": 500,
    "frameCroph": 100
    },
    "availableWidgets": [
    "Settings",
    "View",
    "Recording",
    "Image",
    "Laser",
    "Positioner",
    "Autofocus",
    "STORMRecon"
    ]
    }

    ImSwitch in Action

    Here you can find a tour on Youtube how to set up everything and what it can do.

    https://www.youtube.com/watch?v=r8f-wmeq5i0

    - + \ No newline at end of file diff --git a/docs/Investigator/STORM/Stability/index.html b/docs/Investigator/STORM/Stability/index.html index 98bfd9486..5d75c824a 100644 --- a/docs/Investigator/STORM/Stability/index.html +++ b/docs/Investigator/STORM/Stability/index.html @@ -10,7 +10,7 @@ - + @@ -24,7 +24,7 @@ sigma value of the localized beads over the measurement duration. The sigma value correlates with the defocusing of the beads i.e. low changes in sigma suggest small fluctuations of the samples axial position.

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/AlignLaser/index.html b/docs/Investigator/XYZMicroscope/AlignLaser/index.html index 2d6e466d6..32817ef9e 100644 --- a/docs/Investigator/XYZMicroscope/AlignLaser/index.html +++ b/docs/Investigator/XYZMicroscope/AlignLaser/index.html @@ -10,13 +10,13 @@ - +

    Aligning the Beamsplitter Cube

    The new xyz microscope has a special 2x1 cube that holds the fluorescence optics. Inside the beamsplitter cube is mounted kinematically and can be adjusted with 3 set screws. It's important that the fiber coupled laser is focussed / reimaged in the back focal plane of the objective lens. Therefore, we have created a little tutorial to get you starting how this works.

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/FiveD_v1/index.html b/docs/Investigator/XYZMicroscope/FiveD_v1/index.html index c59f32ae7..f4d5def6a 100644 --- a/docs/Investigator/XYZMicroscope/FiveD_v1/index.html +++ b/docs/Investigator/XYZMicroscope/FiveD_v1/index.html @@ -10,13 +10,13 @@ - +

    openUC2 FiveD v1

    Unpacking the microscope

    The hardcover plastic case contains all you need for the microscope:

    • USB micro cable
    • USB3 camera cable
    • 12V power-supply
    • Sweet treat (optional ;D)
    • The actual microscope
    • The objective lens
    • The Illumination unit
    • A heavy Box

    The actual Box looks like this:

    Optional Please also find the treat and make sure you provide yourself with enough sugar throughout this unpacking routine :-)

    The foam holds the microscope in place (the actual colour may differ from what you may see):

    Remove the foam parts (please keep them for later reuse) to end up like this here:

    Getting started

    Mounting the illumination unit

    For this you need a 2.5mm Hex key and the M3 cylindrical screws. Mount the LED Arm like so:

    It should look like this:

    Wiring up the microscope

    First of all we need to wire up the microscope. For this we will start with the 12V power supply. Unfortunately the powersocket is inside the case, hence you have to first eat some candy in order to better find the spot ;-)

    The USB Cable is permanently mounted to the ESP32 UC2e unit:

    Note: Please make sure you have sufficient USB Power. In case the full LED array is turning on, it may happen that the ESP's voltage drops and the USB serial connection fails. A reconnect will help.

    The same holds true for the USB connection to the microcontroller board. You need to hook it up like that:

    Once done, we continue with inserting the objective lens. Eventually the lens is already inserted and you just need to check if the lens is centered correctly

    Wire up the microscope to your computer

    In order to get the microscope working, we first need to install additional drivers. For the Daheng Camera, this would be:

    For additional information and an in-depth explanation for the UC2e system, please have a look here

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/FiveD_v2/index.html b/docs/Investigator/XYZMicroscope/FiveD_v2/index.html index fdc62dece..b0a44b779 100644 --- a/docs/Investigator/XYZMicroscope/FiveD_v2/index.html +++ b/docs/Investigator/XYZMicroscope/FiveD_v2/index.html @@ -10,7 +10,7 @@ - + @@ -40,7 +40,7 @@

    Carefully take the lens out:

    In motion:

    Veritcal Operation

    Troubleshoot

    We learn from mistakes. So lets start learning. The system is fully open, meaning, you can adjust and change the vast majority of the parts on your own. The entire system consists of the openUC2 frame / skeleton and the 3D printed housing to shield it from dust and light. By removing all M3 cylindrical screws, you can detach the housing from the inner structure to eventually repair or alter the system.

    You can find a full description of how to dissassemble the microscope here: https://openuc2.github.io/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope

    In Action

    We scanned arabidopsis in darkfield (LEDs >9 on):

    Connecting the microscope to the browser and controll it

    We encourage you to use the UC2ified ImSwitch software to control the microscope. You can find it in this repository: https://github.com/openUC2/ImSwitch/

    However, if you want to quick-start the microscope and see if it works, you can open your browser and use the WEB-Serial interface to interact with the microscope.

    Go to https://youseetoo.github.io/ and connect to your board (most right option saying ESP32 DEV-based UC2 standalone board V2). Select the COM Port which is holding the ESP32 and hit the LOG option, once the dialog opens. The alternative option will help you updating the firmware on the device. An in-depth explanation on how the firmware works can be found here.

    In general, you need to send JSON strings in order to control the system. The strings relevant for the Z-microscope are:

    Home the XY-axis

    It's important to always home the Motors in order to avoid them from getting stuck in an end position (ATTENTION!). The following string will move the motor until the endstop is hit. Afterwards it will release the switch:

    {"task":"/home_act", "home": {"steppers": [{"stepperid":1, "timeout": 2000, "speed": 15000, "direction":1, "endposrelease":3000}]}}

    and

    {"task":"/home_act", "home": {"steppers": [{"stepperid":2, "timeout": 2000, "speed": 15000, "direction":1, "endposrelease":3000}]}}

    Afterwards the internal position is set to 0. You can check that by entering:

    {"task": "/motor_get"}

    Move the Z-axis:

    The motor (Nema12) with 200 steps/revolution runs with 16 microstepps and offers a leadscrew with 1mm/revolution. Hence, one step corresponds to 312.5nm. Running the motor can be issued with the following command:

    {"task":"/motor_act",
    "motor":
    {
    "steppers": [
    { "stepperid": 3, "position": 1000, "speed": 15000, "isabs": 3, "isaccel":0}
    ]
    }
    }
    • stepperid: 3 correpsonds to the Z-axis
    • position: steps to go (not physical units!)
    • speed: steps / minute (do not exceed 20000)
    • isabs: absolute or relative motion
    • isaccel: for now, use only non-accelerated motion!

    Safety

    • in case of shattered glass, make sure you don't cut yourself
    • Make sure you don't hurt yourself
    • The moving parts can potentially hurt your finger
    • The electronics - if used in a wrong way - can harm you
    • edges may be sharp, make sure you don't cut yourself
    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/FiveD_v3/index.html b/docs/Investigator/XYZMicroscope/FiveD_v3/index.html index 913b1a55f..6b8557176 100644 --- a/docs/Investigator/XYZMicroscope/FiveD_v3/index.html +++ b/docs/Investigator/XYZMicroscope/FiveD_v3/index.html @@ -10,7 +10,7 @@ - + @@ -25,7 +25,7 @@ Stepper motors connected to the board

    If the zero point is incorrect after installing the firmware, you can reverse the connections accordingly:

    Reversing the connections if necessary

    To connect the endstops, first rewire them and then connect them to the board.

    Rewiring the endstops Connecting the endstops to the board

    Next, you need to attach additional components. If necessary, drill new holes. The placement will depend on the objective, which needs to be positioned at the bottom left corner. After that, secure the components with two screws each.

    Drilling new holes if necessary Securing additional components

    Finally, attach the slide holder with 8 screws.

    Attaching the slide holder

    This completes the assembly of the UC2 FiveD v3 microscope. Follow each step carefully to ensure proper assembly and functionality.

    Troubleshoot

    We learn from mistakes. So lets start learning. The system is fully open, meaning, you can adjust and change the vast majority of the parts on your own. The entire system consists of the openUC2 frame / skeleton and the 3D printed housing to shield it from dust and light. By removing all M3 cylindrical screws, you can detach the housing from the inner structure to eventually repair or alter the system.

    You can find a full description of how to dissassemble the microscope here: https://openuc2.github.io/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope

    Connecting the microscope to the browser and controll it

    We encourage you to use the UC2ified ImSwitch software to control the microscope. You can find it in this repository: https://github.com/openUC2/ImSwitch/

    However, if you want to quick-start the microscope and see if it works, you can open your browser and use the WEB-Serial interface to interact with the microscope.

    Go to https://youseetoo.github.io/ and connect to your board (most right option saying ESP32 DEV-based UC2 standalone board V2). Select the COM Port which is holding the ESP32 and hit the LOG option, once the dialog opens. The alternative option will help you updating the firmware on the device. An in-depth explanation on how the firmware works can be found here.

    In general, you need to send JSON strings in order to control the system. The strings relevant for the Z-microscope are:

    Home the XY-axis

    It's important to always home the Motors in order to avoid them from getting stuck in an end position (ATTENTION!). The following string will move the motor until the endstop is hit. Afterwards it will release the switch:

    {"task":"/home_act", "home": {"steppers": [{"stepperid":1, "timeout": 2000, "speed": 15000, "direction":1, "endposrelease":3000}]}}

    and

    {"task":"/home_act", "home": {"steppers": [{"stepperid":2, "timeout": 2000, "speed": 15000, "direction":1, "endposrelease":3000}]}}

    Afterwards the internal position is set to 0. You can check that by entering:

    {"task": "/motor_get"}

    Move the Z-axis:

    The motor (Nema12) with 200 steps/revolution runs with 16 microstepps and offers a leadscrew with 1mm/revolution. Hence, one step corresponds to 312.5nm. Running the motor can be issued with the following command:

    {"task":"/motor_act",
    "motor":
    {
    "steppers": [
    { "stepperid": 3, "position": 1000, "speed": 15000, "isabs": 3, "isaccel":0}
    ]
    }
    }
    • stepperid: 3 correpsonds to the Z-axis
    • position: steps to go (not physical units!)
    • speed: steps / minute (do not exceed 20000)
    • isabs: absolute or relative motion
    • isaccel: for now, use only non-accelerated motion!

    Safety

    • in case of shattered glass, make sure you don't cut yourself
    • Make sure you don't hurt yourself
    • The moving parts can potentially hurt your finger
    • The electronics - if used in a wrong way - can harm you
    • edges may be sharp, make sure you don't cut yourself
    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/FiveD_v4/index.html b/docs/Investigator/XYZMicroscope/FiveD_v4/index.html index e847f858f..9b98bfed2 100644 --- a/docs/Investigator/XYZMicroscope/FiveD_v4/index.html +++ b/docs/Investigator/XYZMicroscope/FiveD_v4/index.html @@ -10,7 +10,7 @@ - + @@ -31,7 +31,7 @@ the second spring ball now plays along

    The last two fixed balls find their groove almost immediately before the front ball moves up its ramp to the pins

    Final position. All fixed balls in their "pin yokes" and both spring balls press the optics module against them.

    Final result

    Safety

    • in case of shattered glass, make sure you don't cut yourself
    • Make sure you don't hurt yourself
    • The moving parts can potentially hurt your finger
    • The electronics - if used in a wrong way - can harm you
    • edges may be sharp, make sure you don't cut yourself
    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/HistoScan/index.html b/docs/Investigator/XYZMicroscope/HistoScan/index.html index 39ac1ac15..c7116a734 100644 --- a/docs/Investigator/XYZMicroscope/HistoScan/index.html +++ b/docs/Investigator/XYZMicroscope/HistoScan/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    Histo Scanner Plugin Documentation

    Welcome to the documentation page for the Histo Scanner Plugin, a powerful tool for scanning large areas and stitching images onto a large canvas. This page provides detailed information on how to configure and use the plugin effectively.

    Overview

    The Histoscanner Plugin integrates with the ImSwitch widget and controller to facilitate the scanning of large sample areas. Users can select a sample geometry and initiate scanning, which captures images and stitches them together to form a comprehensive view.

    Initial Setup

    Before starting a scan, ensure the following settings are configured correctly:

    • Pixel Size: Set in the setup.json file. This size must be calibrated, possibly using a ruler.
    • Step Size of Axis: Also set in the setup.json. It typically depends on the steps/mm defined by the leadscrew.
    • Sample Configuration File: An example file can be found here.

    Scanning Process

    The microscope will compute the scan area and the necessary scan stepsize on its own and will perform a snake scan. Alternatively you can provide a list of coordinates.

    Once the scan is successfully initiated, the final output is displayed in a downscaled version on napari to conserve memory.

    ImSwitch Configuration

    The configuration settings for the detector and stage are crucial. Here are the JSON settings for both:

    For the Stage

      "positioners": {
    "ESP32Stage": {
    "managerName": "ESP32StageManager",
    "managerProperties": {
    "rs232device": "ESP32",
    "isEnable": true,
    "enableauto": false,
    "stepsizeX": -0.3125,
    "stepsizeY": -0.3125,
    "stepsizeZ": 0.3125,
    "homeSpeedX": 15000,
    "homeSpeedY": 15000,
    "homeSpeedZ": 15000,
    "isDualaxis": true,
    "homeDirectionX": 1,
    "backlashXOld": 15,
    "backlashYOld": 40,
    "backlashX": 0,
    "backlashY": 0,
    "homeEndstoppolarityY": 0,
    "homeDirectionY": -1,
    "homeDirectionZ": 0,
    "homeXenabled": 1,
    "homeYenabled": 1,
    "homeZenabled": 0,
    "initialSpeed": {
    "X": 15000,
    "Y": 15000,
    "Z": 15000
    }
    },
    "axes": [
    "X",
    "Y",
    "Z"
    ],
    "forScanning": true,
    "forPositioning": true
    }
    }

    For the Detector

      "detectors": {
    "WidefieldCamera": {
    "analogChannel": null,
    "digitalLine": null,
    "managerName": "HikCamManager",
    "managerProperties": {
    "isRGB": 1,
    "cameraListIndex": 0,
    "cameraEffPixelsize": 0.2257,
    "hikcam": {
    "exposure": 0,
    "gain": 0,
    "blacklevel": 100,
    "image_width": 1000,
    "image_height": 1000
    }
    },
    "forAcquisition": true,
    "forFocusLock": true
    }

    File Handling

    • Storing Metadata: All metadata is stored in the OME.TIF format.
    • Opening in Fiji: Files can be easily opened and stitched in Fiji by importing them as OME.TIF.
    • Opening in ASHLAR: Use the script developed during the openUC2 hackathon available here as a starting point for handling files in Ashlar.

    Hardware/Software Setup

    Correct orientation of the stage coordinates and camera coordinates is essential. The configuration ensures that the camera orientation matches the stage scanning positions.

    In order to have correct orientation it's important that the stage coordinates and the camera coordaintes are matching. The below image shows how the camera has to be orietned w.r.t. the stage scanning positions

    Tutorials and Demonstrations

    • Tutorial on Matching Axes: A tutorial explaining the matching of different axes is available on YouTube.
    • Full Plugin in Action: Watch the plugin in action here.

    Feel free to reach out with any queries or suggestions to enhance this documentation. Happy scanning with Histo Scanner! uUlJuI&ab_channel=openUC2

    Tutorial that explains how the different axes can be matched

    The Full plugin in action

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/MCTPlugin/index.html b/docs/Investigator/XYZMicroscope/MCTPlugin/index.html index eeca50388..e93159ecc 100644 --- a/docs/Investigator/XYZMicroscope/MCTPlugin/index.html +++ b/docs/Investigator/XYZMicroscope/MCTPlugin/index.html @@ -10,13 +10,13 @@ - + - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/ROIScanner/index.html b/docs/Investigator/XYZMicroscope/ROIScanner/index.html index a7b1a74b3..9efbf0fc2 100644 --- a/docs/Investigator/XYZMicroscope/ROIScanner/index.html +++ b/docs/Investigator/XYZMicroscope/ROIScanner/index.html @@ -10,13 +10,13 @@ - +

    ROI Scanner

    Starting ImSwitch on Ubuntu and Start the ROI Scanner

    First of all: Open the terminal. Type the following (all case sensitive):

    conda activate imswitch

    sudo chown user:user /dev/ttyUSB0 # where user is the current user you're logged into (then enter password)

    The USB port may differ, so perhaps also try this:

    sudo chown veo:veo /dev/ttyUSB1
    sudo chown veo:veo /dev/ttyUSB2

    Then:

    python -m imswitch

    The images are stored in the working directory of the terminal.

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/SetupPhasecontrast/index.html b/docs/Investigator/XYZMicroscope/SetupPhasecontrast/index.html index 512d3206e..042c405a0 100644 --- a/docs/Investigator/XYZMicroscope/SetupPhasecontrast/index.html +++ b/docs/Investigator/XYZMicroscope/SetupPhasecontrast/index.html @@ -10,7 +10,7 @@ - + @@ -29,7 +29,7 @@ The cheek cells are relatively thick and cause some additional image degradation (i.e. multiple scattering)

    Using only the green line of the ring improves overall contrast and reduces the artifact

    Multiple cell layers visualized with the UC2 XYZ phase-contrast microscope

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/SetupTubelens/index.html b/docs/Investigator/XYZMicroscope/SetupTubelens/index.html index 9bda0f770..154ccf750 100644 --- a/docs/Investigator/XYZMicroscope/SetupTubelens/index.html +++ b/docs/Investigator/XYZMicroscope/SetupTubelens/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    openUC2 Setting up the tube lens

    Introduction and Explanation

    The tube lens in the openUC2 setup is a critical component responsible for transforming an object that is far away (approximately infinitely far away) into an image on the camera sensor. Proper calibration of the tube lens is essential to ensure that it produces a sharp and clear image. In this documentation, we will guide you through the process of setting up the tube lens for your openUC2 system.

    To achieve optimal performance, you will need to follow these key steps:

    1. Adjust the CCTV lens to focus on objects at infinity.
    2. Fully open the aperture to maximize light collection.

    Additionally, there is a small tool called "the crown" that facilitates the alignment of the ring components. You can download "the crown" tool from this link.

    Procedure

    Follow these steps to set up the tube lens for your openUC2 system:

    1. Mount the Camera

    Begin by mounting the camera using the C-mount onto the CCTV lens. Ensure that you include the spacer ring to align the image plane of the CCTV lens with the sensor plane of the camera.

    2. Adjust the Image Plane

    Make sure that the camera is securely attached to the CCTV lens, preventing it from turning or becoming loose.

    3. Install "The Crown"

    Insert "the crown" tool between the camera and the CCTV lens. This tool will assist in aligning the rings correctly.

    4. Secure the Camera

    Turn the camera clockwise, typically 2-3 full rotations, to fully secure it to the CCTV lens.

    5. Open the Aperture

    Rotate the camera counter-clockwise until the aperture is fully open. Ensure that the camera is parallel to the ground during this step to maximize light intake.

    6. Focus to Infinity

    Now, use "the crown" tool to turn the outer focus ring of the CCTV lens counter-clockwise. This adjustment will ensure that the CCTV lens is focused at infinity.

    By following these steps, you will successfully set up the tube lens for your openUC2 system, allowing it to capture sharp and well-focused images. Remember to check and fine-tune your settings as needed to achieve the best results for your specific imaging requirements.

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/SmartMicroscopy/index.html b/docs/Investigator/XYZMicroscope/SmartMicroscopy/index.html index 1f1274ac6..96b6cb0b8 100644 --- a/docs/Investigator/XYZMicroscope/SmartMicroscopy/index.html +++ b/docs/Investigator/XYZMicroscope/SmartMicroscopy/index.html @@ -10,13 +10,13 @@ - +

    Smart Microscopy Using openUC2 and ImSwitch

    This tutorial will guide you through setting up a smart microscopy workflow using the openUC2 microscope and the ImSwitch software. We will perform a closed-loop experiment where the microscope follows a line based on image processing results.

    Prerequisites

    1. ImSwitch Software: Ensure that ImSwitch is running and accessible. For example, if running on the same computer, the URL might be https://localhost:8002 (check logs for the exact port).
    2. SSL Certificate: Access the REST API (e.g., https://localhost:8002/docs) in a browser and accept the security warning to use the web viewer (https://youseetoo.github.io/imswitch/index.html). Enter the URL and port under connections.

    Closed-Loop Feedback Pipeline

    The pipeline will:

    1. Snap an image
    2. Create a background image
    3. Subtract the background
    4. Compute edges using the Canny filter
    5. Perform Hough transform to find straight lines
    6. Determine the mean orientation of the lines
    7. Compute the next XY coordinate to move
    8. Return to the initial position

    Installation

    Install the necessary package:

    pip install https://github.com/openUC2/imswitchclient/archive/refs/heads/main.zip

    Code Implementation

    You can run the following code in a Jupyter notebook or Visual Studio Code. Adjust the client initialization to match your setup.

    # Load dependencies
    import cv2
    import numpy as np
    import tifffile as tif
    import matplotlib.pyplot as plt
    import os
    import imswitchclient.ImSwitchClient as imc
    import numpy as np
    import matplotlib.pyplot as plt
    import time
    from simple_pid import PID

    # Setup PID controller
    controller = PID(2, 0.1, 2)
    controller.send(None)

    # Initialize the client
    client = imc.ImSwitchClient(host="192.168.137.1", port=8002)

    # Retrieve the first positioner's name and current position
    positioner_names = client.positionersManager.getAllDeviceNames()
    positioner_name = positioner_names[0]
    currentPositions = client.positionersManager.getPositionerPositions()[positioner_name]
    initialPosition = (currentPositions["X"], currentPositions["Y"])

    # Loop through the process
    for iimage in range(10):
    # Snap image
    scalingFactor = .5
    pixel_to_stage = 1 / scalingFactor
    gaussianKernel = 201
    print("Taking image")
    iImage = client.recordingManager.snapNumpyToFastAPI(scalingFactor)
    mCrop = np.max(iImage.shape)
    Ny, Nx = iImage.shape

    # Remove background
    mBackground = cv2.GaussianBlur(iImage, (gaussianKernel, gaussianKernel), 0)
    iImage = iImage / mBackground
    iImage = iImage[Nx//2-mCrop:Nx//2+mCrop, Ny//2-mCrop:Ny//2+mCrop]

    # Process image
    image = np.uint8(iImage * 255)[:, :, np.newaxis]
    image[image > 100] = 0
    edges = cv2.Canny(image, 50, 150, apertureSize=3)
    lines = cv2.HoughLines(edges, 1, np.pi / 180, 100)

    # Calculate main orientation
    angles = [np.degrees(theta) for rho, theta in lines[:, 0]] if lines is not None else []
    main_orientation = np.mean(angles)
    dy = np.cos(np.radians(main_orientation)) * Nx / 2
    dx = np.sin(np.radians(main_orientation)) * Ny / 2

    # Handle NaN values
    dx = dx if not np.isnan(dx) else np.random.randint(-100, 100)
    dy = dy if not np.isnan(dy) else np.random.randint(-100, 100)

    newPosition = (dx * pixel_to_stage, dy * pixel_to_stage)
    print(f"We are moving the microscope in x:/y: {round(newPosition[0], 2)} / {round(newPosition[1], 2)}")

    client.positionersManager.movePositioner(positioner_name, "X", newPosition[0], is_absolute=False, is_blocking=True)
    client.positionersManager.movePositioner(positioner_name, "Y", newPosition[1], is_absolute=False, is_blocking=True)

    # Return to the initial position
    client.positionersManager.movePositioner(positioner_name, "X", initialPosition[0], is_absolute=True, is_blocking=True)
    client.positionersManager.movePositioner(positioner_name, "Y", initialPosition[1], is_absolute=True, is_blocking=True)

    Result

    The microscope will follow a line for 10 steps and then return to the initial position.

    Result

    This workflow demonstrates a basic smart microscopy setup using openUC2 and ImSwitch, allowing for closed-loop experiments based on real-time image processing. Adapt and expand this pipeline for your specific experiments and applications.

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/StageCalibration/index.html b/docs/Investigator/XYZMicroscope/StageCalibration/index.html index 2ddf2de22..b12bbbec0 100644 --- a/docs/Investigator/XYZMicroscope/StageCalibration/index.html +++ b/docs/Investigator/XYZMicroscope/StageCalibration/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    Stage Mapping and Stage Calibration

    Stage Coordinates

    In this tutorial, we will guide you through the process of aligning the coordinate systems for the UC2 microscope stage. Proper alignment ensures that the movement of the stage corresponds accurately with the image displayed on the screen, facilitating an intuitive user experience. In principle all of this can be handled in software (e.g. flipping the camera image, changing stage axis), but it's always good to start with a common ground from the hardware side.

    This is the microscope (UC2 XYZ v3) with the ingredients controlled by ImSwitch

    Alignment of Axes

    The goal of aligning the coordinate systems is to ensure they are correctly matched. The alignment of the stage is considered from the origin point (zero point). The desired behavior is as follows:

    • When the stage moves to the right (x+), the image on the screen should also move to the right.
    • When the stage moves upwards (y+), the image on the screen should move upwards as well.

    This is illustrated in the following Figure. When viewing the sample from above with the microscope positioned in front, the image should match what is shown in Imswitch. This is also represented by the VirtualMicroscope with the VirtualStage and VirtualCamera in this config (Config: https://github.com/openUC2/ImSwitchConfig/blob/master/imcontrol_setups/example_virtualmicroscope.json).

    Understanding Axes in NumPy

    It's important to note the labeling of axes. In NumPy, x = 1 and y = 0. This means:

    • The x-axis is the second axis (index 1) of an array.
    • The y-axis is the first axis (index 0) of an array.

    NumPy arrays are multidimensional, with axes numbered as follows:

    • Axis 0 is the first axis (often the vertical direction).
    • Axis 1 is the second axis (often the horizontal direction).

    Stage Coordinate System

    When viewing the stage from above, the coordinate system is arranged as follows:

    • X-Axis (Horizontal)
      • Positive direction: Right
      • Negative direction: Left
    • Y-Axis (Vertical)
      • Positive direction: Up
      • Negative direction: Down

    Aligning Axes with Imswitch

    To enable intuitive operation, the stage and camera axes must be correctly aligned with the coordinate system in Imswitch. To achieve this, the commands "flip x" and "flip y" are used. These commands invert the direction of the axes in the coordinate system, meaning that movement or position along the axes is reversed.

    Steps for Aligning Axes:

    1. Initial Setup:

      • Ensure the microscope and stage are properly connected to the control software (e.g., Imswitch).
    2. Define Origin:

      • Identify the origin (zero point) of the stage coordinate system. (in Hardware this would be defined by the Endstops that are used for homing the axes; The motor will run - if the direction is set correctly - until it hits the switch)
    3. Test Movement:

      • Move the stage to the right and observe the direction of the image on the screen.
        • If the image moves left, apply the "flip x" command.
      • Move the stage upwards and observe the direction of the image on the screen.
        • If the image moves down, apply the "flip y" command.
    4. Adjust Axes:

      • Use the following commands as needed to align the axes:

        # Flip the x-axis if necessary
        if x_movement_incorrect:
        stage.flip_x()

        # Flip the y-axis if necessary
        if y_movement_incorrect:
        stage.flip_y()
    5. Verify Alignment:

      • After applying the flips, verify that the stage movements correspond correctly with the image movements on the screen.
    6. Save Configuration:

      • Save the configuration settings to ensure the alignment persists across sessions.

    Stage Calibration

    Richard Bowman and his team provided a very nice way to calibrate stage coordinates to camera pixel coordinates. We burtally integrated the open-source software which you can find here: https://gitlab.com/openflexure/microscope-extensions/camera-stage-mapping into ImSwitch. If you activate the HistoScan Controller and Widget you can start it either by the GUI or using the HTTP interface by calling http://localhost:8002/HistoScanController/startStageMapping (URL and PORT may differ). What the stage will do is moving a certain series of steps in XY, performs a cross-correlation of the images and computes the shift in XY of the mciroscope image on the camera, compares it to the expected shift on and returns the Image-To-Stage-Displacement Matrix as well as the Backlashvector. Both matrices/vectors are microscope specificand will help you matching e.g. stage coordinates for stitching software such as ASHLAR or OFM Stitching. This document should give you a rough idea of what's happening.

    Some terminology:

    • Combine X and Y calibrations: The calibration involves combining two separate measurements or calibration runs for the x and y directions, ensuring that the directions are orthogonal (at right angles to each other).

    • 2x2 transformation matrix: The image_to_stage_displacement matrix maps image displacements to stage displacements. This ensures that movements in the image coordinate system are accurately translated to movements in the stage coordinate system.

    • backlash_vector: This is a vector estimating the backlash (mechanical slack or play in the system) in each direction. In this case, the estimated backlash is zero, indicating a precise calibration with no noticeable mechanical play.

    • backlash: The function is expected to return the highest element of backlash_vector as a scalar value, which would be zero in this case.

    Interpreation of the Matrix

    Calibration Matrix image_to_stage_displacement

    The entries of the calibration matrix image_to_stage_displacement can be given specific names and meanings based on their positions within the matrix. Let's denote the matrix as follows:

    image_to_stage_displacement=(01.01.00)\text{image\_to\_stage\_displacement} = \begin{pmatrix} 0 & -1.0 \\ -1.0 & 0 \end{pmatrix}
    (abcd)\begin{pmatrix} a & b \\ c & d \end{pmatrix}

    Entries and Their Names

    1. a (0):

      • Name: a
      • Meaning: Represents the scaling factor from the x-coordinate in the image to the x-coordinate in the stage. Here, it is 0, indicating no direct mapping from image x to stage x.
    2. b (-1.0):

      • Name: b
      • Meaning: Represents the scaling factor from the y-coordinate in the image to the x-coordinate in the stage. The value -1.0 indicates an inverse and slightly scaled mapping from image y to stage x.
    3. c (-1.0):

      • Name: c
      • Meaning: Represents the scaling factor from the x-coordinate in the image to the y-coordinate in the stage. The value -1.0 indicates an inverse and slightly scaled mapping from image x to stage y.
    4. d (0):

      • Name: d
      • Meaning: Represents the scaling factor from the y-coordinate in the image to the y-coordinate in the stage. Here, it is 0, indicating no direct mapping from image y to stage y.

    Summary of the Matrix Entries

    • a (0): No direct mapping from image x to stage x.
    • b (-1.0): Inverse mapping from image y to stage x.
    • c (-1.0): Inverse mapping from image x to stage y.
    • d (0): No direct mapping from image y to stage y.

    This calibration matrix indicates that there is a transformation involving a 90-degree rotation combined with an inverse scaling factor slightly above 1 between the image coordinates and the stage coordinates. The exact interpretation may depend on the specific application, but generally, it implies that movements in one direction in the image are mapped to movements in the perpendicular direction on the stage with a slight scaling adjustment.

    Interpretation of the Values

    The following simulation of the VirtualMicroscope inside ImSwitch (Config: https://github.com/openUC2/ImSwitchConfig/blob/master/imcontrol_setups/example_virtualmicroscope.json):

    The result of the stage mapping is a json file containing (under /ImSwitch/calibFile.json) the following important element:

        "camera_stage_mapping_calibration": {
    "backlash": 0.0,
    "backlash_vector": [
    0.0,
    0.0,
    0.0
    ],
    "image_to_stage_displacement": [
    [
    0.0,
    -1.0
    ],
    [
    -1.0,
    0.0
    ]
    ]
    }

    The provided matrices explains transforming image coordinates to stage coordinates and estimating backlash. Let's break down the interpretation of the entries:

    1. image_to_stage_displacement Matrix

    mData["camera_stage_mapping_calibration"]["image_to_stage_displacement"] =
    array([[ 0. , -1.00135997],
    [-1.00135997, 0. ]])

    This matrix is a 2x2 transformation matrix used to map image coordinates to stage coordinates. Each entry in this matrix has a specific meaning:

    • [0,0] = 0: There is no direct transformation of the x-coordinate in the image to the x-coordinate in the stage.
    • [0,1] = -1.00135997: The y-coordinate in the image inversely affects the x-coordinate in the stage.
    • [1,0] = -1.00135997: The x-coordinate in the image inversely affects the y-coordinate in the stage.
    • [1,1] = 0: There is no direct transformation of the y-coordinate in the image to the y-coordinate in the stage.

    The presence of -1.00135997 off-diagonal elements indicates that the transformation involves a negative and approximately unit scaling between the coordinates, implying a possible 90-degree rotation combined with a scaling factor close to -1.

    2. backlash_vector Matrix

    mData["camera_stage_mapping_calibration"]["backlash_vector"] =
    array([ 0., 0., 0.])

    This vector represents the estimated backlash in each direction (x, y, and possibly z, though z is not utilized in a 2D context). Here, all elements are zero, indicating no measurable backlash in the x and y directions.

    - + \ No newline at end of file diff --git a/docs/Investigator/XYZMicroscope/StageScanning/index.html b/docs/Investigator/XYZMicroscope/StageScanning/index.html index beef479b1..6b9789c3f 100644 --- a/docs/Investigator/XYZMicroscope/StageScanning/index.html +++ b/docs/Investigator/XYZMicroscope/StageScanning/index.html @@ -10,14 +10,14 @@ - +

    Stage Scanning and Image Stitching (ASHLAR)

    We have multiple ways to perform stage scanning and stitching using ImSwitch. Below you can find dedicated documentation for some of them:

    1. Use ImScripting to take snapshots, save them locally and perform stitching using ASHLAR offline
    2. Perform GUI-based stitching and perform stitching using ASHLAR / simple pixel assignmens online (e.g. within/after the scanning process)
    3. Stitching using the Chatbot

    1. Using Imswitch for Image Retrieval and Stitching with Interactive ImScripting

    Overview

    This documentation outlines the steps to use Imswitch and Interactive IMScripting to retrieve images and stitch them together. The process involves configuring the motor settings, capturing images in a grid pattern, and stitching these images using ASHLAR.

    The following code can be executed inside the ImSwitch's ImScripting editor.

    # install a modified version of ashlar that enables loading numpy arrays directly without going through a file
    # python -m pip install https://github.com/openUC2/ashlar/archive/refs/heads/master.zip
    import numpy as np
    import time
    import threading
    import os
    import tifffile
    import re
    from ashlar.scripts import ashlar
    from ashlar.scripts.ashlar import process_images
    from pathlib import Path

    # Calculate the image size and the overlap of the images based on pixel size and resolution.
    mPixelSize = 1.0 # micron - use a calibration chart to get this right!
    input_dir = "./mScanImages"
    output_dir = "./mStitchedImage"
    input_name = "TmpTileFile.ome.tif"
    output_name = "ResultingStitchedImage.ome.tif"
    initialPosX = 0
    initialPosY = 0
    maximum_shift_microns = 1000
    Nx = 5
    Ny = 5
    # please try changing these two values to make it match!
    flip_x=True
    flip_y=False

    # create the folders and names
    Path(input_dir).mkdir(parents=True, exist_ok=True)
    Path(output_dir).mkdir(parents=True, exist_ok=True)
    ashlar_output_file = os.path.join(output_dir, output_name)
    ashlar_input_file = os.path.join(input_dir, input_name)

    mFrameShape = api.imcontrol.snapImage(True, False).shape
    xDim = mFrameShape[1] * mPixelSize
    yDim = mFrameShape[0] * mPixelSize
    mOverlap = 0.8 # 90% overlap at the edges

    # Set the motor control to 0 and define the motor speeds for the X, Y, and Z axes.
    positionerName = api.imcontrol.getPositionerNames()[0]
    api.imcontrol.setPositionerSpeed(positionerName, "X", 20000)
    api.imcontrol.setPositionerSpeed(positionerName, "Y", 20000)
    api.imcontrol.setPositionerSpeed(positionerName, "Z", 2000)

    ## Capture images in a 2x3 grid pattern. The stage moves to the start position and captures images at each step. Each image is saved with coordinates as the filename.
    iiter = 0

    USE_OME = False
    if USE_OME:# on MAC ARM M1 it may not work..
    with tifffile.TiffWriter(input_name) as tif: ## Define the input and output directories, and the pixel size. Open a new TIFF file to write the collected tiles, read each image, extract the position from the filename, prepare metadata, and write the image with metadata into the TIFF file. Finally, use ASHLAR to stitch the images together.
    for ix in np.arange(Nx):
    for iy in np.arange(Ny):
    mPos = (ix * xDim * mOverlap + initialPosX, iy * yDim * mOverlap + initialPosY)
    api.imcontrol.movePositioner(positionerName, "XY", mPos, True, True)
    time.sleep(0.5)
    mFrame = api.imcontrol.snapImage(True, False)
    metadata = {
    'Pixels': {'PhysicalSizeX': mPixelSize, 'PhysicalSizeXUnit': 'm', 'PhysicalSizeY': mPixelSize, 'PhysicalSizeYUnit': 'm'},
    'Plane': {'PositionX': ix, 'PositionY': iy}
    }
    tif.write(mFrame, metadata=metadata)
    iiter += 1
    ashlar.main(['', ashlar_input_file, '-o', ashlar_output_file, '--pyramid', '-m%s' % maximum_shift_microns, "-flip_x", flip_x, "-flip_y", flip_y])

    else: # this is a workaround with a numpy reader instead
    mImageList = []
    position_list = []
    for ix in range(Nx):
    for iy in range(Ny):
    mPos = (ix * xDim * mOverlap + initialPosX, iy * yDim * mOverlap + initialPosY)
    api.imcontrol.movePositioner(positionerName, "XY", mPos, True, True)
    time.sleep(0.5)
    mFrame = api.imcontrol.snapImage(True, False)
    mImageList.append(mFrame)
    position_list.append(mPos)
    print(mPos)
    arrays = [np.expand_dims(np.array(mImageList),1)] # (num_images, num_channels, height, width)
    # create a 2D list of xy positions
    position_list = np.array(position_list)

    # Process numpy arrays
    process_images(filepaths=arrays,
    output='ashlar_output.tif',
    align_channel=0,
    flip_x=flip_x,
    flip_y=flip_y,
    flip_mosaic_x=False,
    flip_mosaic_y=False,
    output_channels=None,
    maximum_shift=maximum_shift_microns,
    stitch_alpha=0.01,
    maximum_error=None,
    filter_sigma=0,
    filename_format='cycle_{cycle}_channel_{channel}.tif',
    pyramid=False,
    tile_size=1024,
    ffp=None,
    dfp=None,
    barrel_correction=0,
    plates=False,
    quiet=False,
    position_list=position_list,
    pixel_size=mPixelSize)
    mImage = tifffile.imread('ashlar_output.tif')

    #display the resulting tiles
    api.imcontrol.displayImageNapari("Tiles", arrays[0], isRGB=False)

    print(position_list)
    # display the resulting image
    api.imcontrol.displayImageNapari("StitchedImage", mImage, isRGB=False)

    Additional Image Processing

    Additional commands can be used to manipulate the stitched image if needed (depends if the image looks weird or not..):

    Flip X-Axis

    ashlar.main(['', collected_tiles_file, '-o', ashlar_output_file, '--pyramid', '-m%s' % maximum_shift_microns, "--flip-x"])

    Flip Both Axes and Mirror Images in X-Direction

    ashlar.main(['', collected_tiles_file, '-o', ashlar_output_file, '--pyramid', '-m%s' % maximum_shift_microns, "--flip-mosaic-x"])

    In action

    Here we use the loading of numpy images inside ImSwitch and process them with Ashlar to directly dipslay them in Napari. Make sure the orientation is set as in the animation below according to the Stage Calibration results.

    # keep this number low (e.g. 1-2 to check the correction direction of X/Y)
    Nx = 2
    Ny = 2
    # please try changing these two values to make it match!
    flip_x=True
    flip_y=False

    2. Perform GUI-based stitching and perform stitching using ASHLAR in the Main GUI

    Navigate to the HistoScan Menu and perform the grid-based scanning. Select ASHLAR stitching and the appropriate flipping of the axes (will be suggested by the previously performed stage mapping) and run the scanning. The stitched result will be displayed after some computational time which may vary depending on your CPU and memory availabililty.

    3. Stitching using the Chatbot

    A recent experimental feature is to use the BioImage.io chatbot and provide a customized extension to interact with the microscope. The extension is implemented in the HyphaController and exposes certain functions to the chatbot interface. This lets us formulate prompts that will then interact with the microscope. Below we formulate a simple query:

    Can you turn on the light of the uc2 microscope to 512 and perform a slide scan with default parameters?

    which gets interpreted on the microscope side

    Tool Call: U2MicroscopeSetIllumination
    Arguments:
    - channel: 0

    - intensity: 512

    Result: Set the illumination!
    Tool Call: U2MicroscopeSlideScan
    Arguments:
    - numberTilesX: 3

    - numberTilesY: 3

    - stepSizeX: 0

    - stepSizeY: 0

    - nTimes: 1

    - tPeriod: 1

    - illuSource: ``

    - initPosX: 0

    - initPosY: 0

    - isStitchAshlar: true

    - isStitchAshlarFlipX: true

    - isStitchAshlarFlipY: false

    Result: Started slide scanning!

    The result is a scan and a following stitching routine using ASHLAR:

    - + \ No newline at end of file diff --git a/docs/Investigator/ZMicroscope/UpackZMicroscope/index.html b/docs/Investigator/ZMicroscope/UpackZMicroscope/index.html index fa619ab6c..8379a670d 100644 --- a/docs/Investigator/ZMicroscope/UpackZMicroscope/index.html +++ b/docs/Investigator/ZMicroscope/UpackZMicroscope/index.html @@ -10,13 +10,13 @@ - +

    Unpack the openUC2 Z-Microscope

    Unpacking the microscope

    The hardcover plastic case contains all you need for the microscope:

    • USB micro cable
    • USB3 camera cable
    • 12V power-supply
    • Sweet treat
    • The actual microscope
    • The objective lens

    The actual Box looks like this:

    Please also find the treat and make sure you provide yourself with enough sugar throughout this unpacking routine :-)

    The foam holds the microscope in place (the actual colour may differ from what you may see):

    The cables are hidden behind the foam:

    Check if you find the content of the box:

    Getting started

    First of all we need to wire up the microscope. For this we will start with the 12V power supply. Unfortunately the powersocket is inside the case, hence you have to first eat some candy in order to better find the spot ;-)

    The same holds true for the USB connection to the microcontroller board. You need to hook it up like that:

    Once done, we continue with inserting the objective lens. Eventually the lens is already inserted and you just need to check if the lens is centered correctly

    The microscope should look like this:

    Wire up the microscope to your computer

    In order to get the microscope working, we first need to install additional drivers. For the Daheng Camera, this would be:

    For additional information and an in-depth explanation for the UC2e system, please have a look here

    Troubleshoot

    We learn from mistakes. So lets start learning. The system is fully open, meaning, you can adjust and change the vast majority of the parts on your own. The entire system consists of the openUC2 frame / skeleton and the 3D printed housing to shield it from dust and light. By removing all M3 cylindrical screws, you can detech the housing from the inner structure to eventually repair or alter the system.

    A 2.5m hex key will help you for finishing this job:

    Lift the lid and the microscpe will follow (make sure all cables are detached):

    Now you can start working on the "inner bits":

    In Action

    Here you see the extended focussing of the objective lens:

    Connecting the microscope to the browser and controll it

    We encourage you to use the UC2ified ImSwitch software to control the microscope. You can find it in this repository: https://github.com/openUC2/ImSwitch/

    However, if you want to quick-start the microscope and see if it works, you can open your browser and use the WEB-Serial interface to interact with the microscope.

    Go to https://youseetoo.github.io/ and connect to your board (most right option saying ESP32 DEV-based UC2 standalone board V2). Select the COM Port which is holding the ESP32 and hit the LOG option, once the dialog opens. The alternative option will help you updating the firmware on the device. An in-depth explanation on how the firmware works can be found here.

    In general, you need to send JSON strings in order to control the system. The strings relevant for the Z-microscope are:

    Home the Z-axis

    It's important to always home the Motors in order to avoid them from getting stuck in an end position (ATTENTION!). The following string will move the motor until the endstop is hit. Afterwards it will release the switch:

    {"task":"/home_act", "home": {"steppers": [{"stepperid":3, "timeout": 2000, "speed": 15000, "direction":1, "endposrelease":3000}]}}

    Afterwards the internal position is set to 0. You can check that by entering:

    {"task": "/motor_get"}

    Move the Z-axis:

    The motor (Nema12) with 200 steps/revolution runs with 16 microstepps and offers a leadscrew with 1mm/revolution. Hence, one step corresponds to 312.5nm. Running the motor can be issued with the following command:

    {"task":"/motor_act",
    "motor":
    {
    "steppers": [
    { "stepperid": 3, "position": 1000, "speed": 15000, "isabs": 3, "isaccel":0}
    ]
    }
    }
    • stepperid: 3 correpsonds to the Z-axis
    • position: steps to go (not physical units!)
    • speed: steps / minute (do not exceed 20000)
    • isabs: absolute or relative motion
    • isaccel: for now, use only non-accelerated motion!

    Safety

    • in case of shattered glass, make sure you don't cut yourself
    • Make sure you don't hurt yourself
    • The moving parts can potentially hurt your finger
    • The electronics - if used in a wrong way - can harm you
    • edges may be sharp, make sure you don't cut yourself
    - + \ No newline at end of file diff --git a/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope/index.html b/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope/index.html index d56a3f8ed..b524c90a6 100644 --- a/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope/index.html +++ b/docs/PRODUCTION/INVESTIGATOR/ProductionXYZMicroscope/index.html @@ -10,7 +10,7 @@ - + @@ -27,7 +27,7 @@

    Prepare the Z-stage

    The documentation for the motorized 25mm Z-stage can be found here: https://openuc2.github.io/docs/PRODUCTION/PG_12_STAGE_Z_NEMA

    Once done, lock the Stage with the Puzzle piece with M5x8 worm screws:

    Prepare Electronics

    Bill of Material

    • UC2e v2 electronics
    • 3x A4988 Stepper driver
    • 12V power supply
    • USB micro cable
    • 3D printed case
    • 2x puzzle pieces
    • 8 M5x8 thread-only screws
    • 4x M3x8mm screws

    Assembly

    Attach the electronics board to the 3D printed assembly and tighten it with the M3 screws (cylindrical, Din906) Attach the puzzle pieces to the distal ends of the assembly and lock it with the M5 screws. For this the yet closed holes have to be opened by "drilling" it through.

    Prepare Triangle Structure

    Tubelens

    Bill of Material

    • Berrybase 100mm CCTV Lens
    • Daheng Vision IMX226 sensor
    • USB 3 Camera Cable
    • 2x Puzzlepieze
    • 8x M5x8 mm worm screw
    • 4x M3x18mm screw

    Assembly

    Adding the Baseplate

    Endstops and Illumination

    Skeleton

    Fully Assembled

    Fluo Extension

    Improvements

    Stage does not run smoothly

    You can release the pressure on the linear bearings by loosening the screws carefully. Make sure you don't introduce unneccesary play. The stage works with two v-grooves and balls in between.

    Additional images (have to be sorted)

    Safety

    TODO: Add additional information!

    • in case of shattered glass, make sure you don't cut yourself
    • Make sure you don't hurt yourself
    • The moving parts can potentially hurt your finger
    • The electronics - if used in a wrong way - can harm you
    • edges may be sharp, make sure you don't cut yourself
    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/APERTURES/index.html b/docs/PRODUCTION/Modules/APERTURES/index.html index 653e18223..ca83405ee 100644 --- a/docs/PRODUCTION/Modules/APERTURES/index.html +++ b/docs/PRODUCTION/Modules/APERTURES/index.html @@ -10,13 +10,13 @@ - +

    Apertures

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/BEAMSPLITTER/index.html b/docs/PRODUCTION/Modules/BEAMSPLITTER/index.html index e738c8088..e13408c17 100644 --- a/docs/PRODUCTION/Modules/BEAMSPLITTER/index.html +++ b/docs/PRODUCTION/Modules/BEAMSPLITTER/index.html @@ -10,13 +10,13 @@ - +

    KINEMATIC MIRROR (90°)

    This page describes how to assemble the partially transparent mirror (45°) module. It uses a Frontsurface 50% mirror that.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-01150% MirrorAstromedia 40x30mm PArtially Transparent Mirror8,00€Astromedia

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    caution

    Insert the beam splitter in such a way that the mirroring/coated surface points away from the printed part!

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/Camera/index.html b/docs/PRODUCTION/Modules/Camera/index.html index f0dd5a831..41497e13c 100644 --- a/docs/PRODUCTION/Modules/Camera/index.html +++ b/docs/PRODUCTION/Modules/Camera/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    USB Camera

    Duration: 1

    This page describes how to assemble the camera module. It has two different variations:

    • IMX214 (Arducam, LINK)
    • IMX179 (Waveshare, LINK)

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    TypeDetailsLink
    Camera holderholds IMX179/214Part.stl

    Additional parts

    This is used in the current version of the setup

    TypeDetailsPriceLink
    USB CameraArducam USB IMX21440 €Amazon
    or---------
    USB CameraArducam USB IMX21440 €Amazon
    Mounting screwsDIN912 M3x12mm Screws0.40 €Würth

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    IMX219 (Arducam)

    STEP 1

    All parts you need to assemble the module:

    Remove the lens (M12/cellphone) from the camera board.

    STEP 2

    Use the DIN912 M3x12mm screws to mount the camera securely. ATTENTION: Use the decentered holes such that the Camera is placed in the inserts's center!

    STEP 3

    Add the insert to the cube, close it and store the cable safely.

    IMX179 (Waveshare)

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    Remove the lens (M12) from the camera board using a cross key.

    STEP 3

    Use the DIN912 M3x12mm screws to mount the camera securely. ATTENTION: Use the centered screws to have the camera's lens in the center of the module!

    STEP 4

    To remove the lens take a cloth/tissue and pliers and knock it of with some force. Don't break the part!

    STEP 5

    Add the part to the cube and you're done.

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/Eyepiece/index.html b/docs/PRODUCTION/Modules/Eyepiece/index.html index 66074d7e1..2a6bd1af0 100644 --- a/docs/PRODUCTION/Modules/Eyepiece/index.html +++ b/docs/PRODUCTION/Modules/Eyepiece/index.html @@ -10,13 +10,13 @@ - +

    Eyepiece

    Duration: 1

    This page describes how to assemble the Eyepiece module. It uses a standard eyepiece with 22mm diameter.

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    TypeDetailsLink
    Holderholds EyepiecePart.stl

    Additional parts

    This is used in the current version of the setup

    TypeDetailsPriceLink
    Eyepiece10x, 18mm Eyepiece10 €Aliexpress

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    Remove XX

    STEP 2

    Use Hotglue to permanently mount the eyepiece to the insert.

    STEP 3

    Add the insert to the cube, close it and store it safely.

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/KIN_MIR_45/index.html b/docs/PRODUCTION/Modules/KIN_MIR_45/index.html index 10cb57dbd..551704453 100644 --- a/docs/PRODUCTION/Modules/KIN_MIR_45/index.html +++ b/docs/PRODUCTION/Modules/KIN_MIR_45/index.html @@ -10,13 +10,13 @@ - +

    KINEMATIC MIRROR (45°)

    This page describes how to assemble the kinematic mirror (45°) module. It uses a Frontsurface mirror that can be tuned in 3 axis

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds screwsPart.stl
    #01-021Mirror Mountholds mirror and is movablePart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011FS MirrorAstromedia 40x30mm Frontsurface Mirror5,00€Astromedia
    #01-024Screw (orings)M3x12, DIN9125,00€Astromedia
    #01-033Screw (Pushing)M3x20, NOT DECIDED!15,00€Link
    #01-042O-Ringr=8mm0,10€Link
    #01-051Cube5,00€Link

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    STEP 4

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/KIN_MIR_90/index.html b/docs/PRODUCTION/Modules/KIN_MIR_90/index.html index 61ae4c5be..534b4a5c8 100644 --- a/docs/PRODUCTION/Modules/KIN_MIR_90/index.html +++ b/docs/PRODUCTION/Modules/KIN_MIR_90/index.html @@ -10,13 +10,13 @@ - +

    KINEMATIC MIRROR (90°)

    This page describes how to assemble the kinematic mirror (90) module. It uses a Frontsurface mirror that can be tuned in 3 axis

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds screwsPart.stl
    #01-021Mirror Mountholds mirror and is movablePart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011FS MirrorAstromedia 40x30mm Frontsurface Mirror5,00€Astromedia
    #01-024Screw (orings)M3x12, DIN9125,00€Astromedia
    #01-033Screw (Pushing)M3x20, NOT DECIDED!15,00€Link
    #01-042O-Ringr=8mm0,10€Link
    #01-051Cube5,00€Link

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    STEP 4

    STEP 5

    STEP 6

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/KIN_XY_LASER/index.html b/docs/PRODUCTION/Modules/KIN_XY_LASER/index.html index 87755e24d..026d99543 100644 --- a/docs/PRODUCTION/Modules/KIN_XY_LASER/index.html +++ b/docs/PRODUCTION/Modules/KIN_XY_LASER/index.html @@ -10,13 +10,13 @@ - +

    Kinematic XY Mount

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    STEP 4

    STEP 6

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/LENS/index.html b/docs/PRODUCTION/Modules/LENS/index.html index c297cd5a7..dd4362198 100644 --- a/docs/PRODUCTION/Modules/LENS/index.html +++ b/docs/PRODUCTION/Modules/LENS/index.html @@ -10,14 +10,14 @@ - +

    LENS

    Duration: 1

    This page describes how to assemble the LEns module. It uses a 40mm biconvex/biconcave glasslens with varyin diameter

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDTypeDetailsLink
    #01-01Holderholds LensPart.stl
    #01-02Lidfixes lens using screw mechanismPart.stl

    Additional parts

    This is used in the current version of the setup

    IDTypeDetailsPriceLink
    #01-03Lens (50mm)f'=50mm, Diameter 40mm2 €Aliexpress
    or
    #01-04Lens (100mm)f'=100mm, Diameter 40mm2 €Aliexpress
    or
    #01-05Lens (-50mm)f'=-50mm, Diameter 40mm2 €Aliexpress

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    Thread in the round piece tand fix the lens. Make sure you use the right ring for the right lens!

    caution

    Clean the lenses with a cotton cloth.

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/MIR_45/index.html b/docs/PRODUCTION/Modules/MIR_45/index.html index a5901f576..e41e049b4 100644 --- a/docs/PRODUCTION/Modules/MIR_45/index.html +++ b/docs/PRODUCTION/Modules/MIR_45/index.html @@ -10,13 +10,13 @@ - +

    Mirror (45°)

    This page describes how to assemble the standard fold mirror (45°) module. It uses a Frontsurface mirror.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011MirrorAstromedia 40x30mm PArtially Transparent Mirror8,00€Astromedia

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/POLARIZER_ROTATING/index.html b/docs/PRODUCTION/Modules/POLARIZER_ROTATING/index.html index 414c97528..634e4a034 100644 --- a/docs/PRODUCTION/Modules/POLARIZER_ROTATING/index.html +++ b/docs/PRODUCTION/Modules/POLARIZER_ROTATING/index.html @@ -10,13 +10,13 @@ - +

    Polarization Rotator

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/SAMPLE_HOLDEr/index.html b/docs/PRODUCTION/Modules/SAMPLE_HOLDEr/index.html index 84324d74b..f758c0eb6 100644 --- a/docs/PRODUCTION/Modules/SAMPLE_HOLDEr/index.html +++ b/docs/PRODUCTION/Modules/SAMPLE_HOLDEr/index.html @@ -10,13 +10,13 @@ - +

    Sample Holder

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    :::alert

    this has been updated with magnets! Make sure you label the magnets with prior to insertion so that polarity is correct.

    :::

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/STAGE_Z_MANUAL/index.html b/docs/PRODUCTION/Modules/STAGE_Z_MANUAL/index.html index 3bd137524..59b64cea3 100644 --- a/docs/PRODUCTION/Modules/STAGE_Z_MANUAL/index.html +++ b/docs/PRODUCTION/Modules/STAGE_Z_MANUAL/index.html @@ -10,13 +10,13 @@ - +

    Kinematic XY Mount / Laser

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    STEP 4

    STEP 5

    STEP 6

    STEP 7

    STEP 8

    STEP 9

    STEP 10

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/STAGE_Z_NEMA/index.html b/docs/PRODUCTION/Modules/STAGE_Z_NEMA/index.html index 5853fe5e0..dc9fc82a9 100644 --- a/docs/PRODUCTION/Modules/STAGE_Z_NEMA/index.html +++ b/docs/PRODUCTION/Modules/STAGE_Z_NEMA/index.html @@ -10,13 +10,13 @@ - +

    Z-Stage Motorized NEMA12 25mm

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    STEP 3

    STEP 4

    STEP 5

    STEP 6

    STEP 7

    STEP 8

    STEP 9

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/PRODUCTION/Modules/TORCH/index.html b/docs/PRODUCTION/Modules/TORCH/index.html index b53f33d76..d49000a69 100644 --- a/docs/PRODUCTION/Modules/TORCH/index.html +++ b/docs/PRODUCTION/Modules/TORCH/index.html @@ -10,13 +10,13 @@ - +

    Torch

    This page describes how to assemble the kinematic XY mount for moving a laser/ pinhole.

    Duration: 1

    Bill of material

    Below you will find all components necessary to build this device.

    3D printing files

    All these files need to be printed.

    Printing parameters:

    InfillLayerheightSpecial ProfileMaterial
    ~20%0.25/0.3mmmini/i3PLA (black)
    IDAmountTypeDetailsLink
    #01-011BaseHolds MirrorPart.stl
    #01-012BaseSpring LocksPart.stl

    Additional parts

    This is used in the current version of the setup

    IDAmountTypeDetailsPriceLink
    #01-011Spring0,20€NONE
    #01-011Threaded Inserts M30,20€NONE
    #01-011Screw Ball head0,20€NONE

    Assembly

    Duration: 1

    Below we describe how the device can be build and assembled in order to replicate the whole system.

    STEP 1

    All parts you need to assemble the module:

    STEP 2

    Design files

    The original design files are in the INVENTOR folder.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/CHINESE/uc2miniboxCN/index.html b/docs/Toolboxes/DiscoveryCore/CHINESE/uc2miniboxCN/index.html index c708e83d6..27527f219 100644 --- a/docs/Toolboxes/DiscoveryCore/CHINESE/uc2miniboxCN/index.html +++ b/docs/Toolboxes/DiscoveryCore/CHINESE/uc2miniboxCN/index.html @@ -10,7 +10,7 @@ - + @@ -23,7 +23,7 @@ 显微镜物镜: 一种特殊的透镜系统,可以放大物体

    接下来将通过一个视频介绍一个MiniBox(2022年10月的版本)里面有什么:

    什么是UC2?

    UC2项目的核心元素是一个简单的立方体。 这个立方体由两半部分组成,通常内有一个可以滑动的插件。 插件可以容纳各种光学元件(例如透镜、反光镜),这意味着每个立方体可以通过安装不同的插件来实现不同的功能。

    立方体类型1:带插头连接注塑成型件

    底板

    底板

    立方体可以安装在底板上。底板模块可以像拼图一样组合在一起。

    自行打印UC2

    UC2立方体也可以3D打印。它看起来与注塑模型相同,但这里它由立方体盖和立方体体组成,用螺丝固定在一起。螺丝非常适合放在磁性板上。通过结合不同的立方体模块,可以轻松组装不同的光学结构。每个骰子可以增加一个新功能。你的创造力没有限制。

    立方体类型2:带磁性连接的3D打印件

    立方体

    带磁铁的底板

    在3D打印的底板中有小的球形磁铁,立方体就放在这些磁铁上。

    想要更多的立方体?那你可以自行3D打印。你可以在这里找到所有信息

    这就是立方体如何组合在一起

    持续时间:1分钟

    确保立方体正确放置在板上,并且没有倾斜。最后重要的是插件放置在正确的位置。

    如果你没有看到清晰的图像,移动插件(例如透镜),直到你看到它很清楚。图片中的绿色箭头显示了如何做。

    这里你可以找到一个小视频,解释了立方体的核心概念

    文档内容:

    符号是什么意思?

    实验 如果你看到这个方块,说明有实验可做!你可以在这个方块上放置一个UC2立方体。
    解释:如果你看到这个图标,说明有东西可以学习!
    账单:这里有东西需要计算。拿起笔和纸开始解谜。
    注意:不要用手指触摸玻璃表面!
    清洁镜头:如果你已经触摸了镜头,你可以用眼镜布来清洁它。

    透镜能做什么?

    持续时间:2分钟

    拿一个或多个内有透镜的立方体,看着这里展示的UC2符号。手持立方体,改变透镜和图像之间的距离。

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreLens/index.html b/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreLens/index.html index b050dd6d8..28fbabf84 100644 --- a/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreLens/index.html +++ b/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreLens/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    Lens

    Lenses

    In ray optics, light is represented as a bundle of rays (arrows), which simplifies the physical properties of light. A ray has a direction and is therefore drawn with an arrow. A lens "refracts" the beam, changing its direction.

    The focal length of a lens corresponds to the distance from the lens to the focal plane on which the focal point lies. It is given in millimeters (f = mm).

    Converging (positive) and diverging (negative) lenses

    Converging lenses refract the rays of light traveling parallel to the optical axis at a point called the focal point.

    The diverging lenses refract the rays of light traveling parallel to the optical axis as if they originated from a point called the "virtual" focus.

    Lenses “refract” the rays of light

    You can find the focal length of the lens as a printed number on the lens holder. The MiniBOX receives a 100mm converging lens, two 40mm converging lenses and a -50mm negative lens. The numbers indicate the focal length.

    The converging lens is also called a positive or convex lens. The middle part of the lens is always thicker than the edge.

    The converging lens enlarges the image. The magnification is different for the 40mm lens and the 100mm lens. The image can be upright or inverted.

    The negative lens (spreading lens) is sometimes also called a negative or concave lens. The middle part of the lens is always thinner than the edge.

    With the negative lens (here: -50 mm lens) the image is always reduced and always upright

    We assume that our lenses are so-called "thin lenses". This means we can consider them as one plane and not care about their thickness. This makes explanations and calculations much easier.

    Did the answers raise any more questions? Then drive to find out exactly how lenses work...

    Lens image

    Now take the lentil cubes. With the right lens, try to decipher the focal length information in the cubes shown. Move the lens over the writing until it is the same size as the "UC2" text.

    Can you see the text the same size and orientation as the "UC2"? What happens when you change the distance between the lens and the image?

    What happens if you use a lens with the wrong focal length?

    Image of an object through a positive lens

    Let's take the converging lens as an example. We start with an object (green arrow) and see what happens to the rays that start from the top. There are infinitely many rays in all directions, but for drawing the figure the following three rays will suffice:

    1. The centre beam (orange) passes undisturbed through the center of the lens.
    2. The focus ray (yellow) also starts from the tip of the arrow, but goes through the object-side focus at focal length f. After the lens, it continues at the same height, but now parallel to the optical axis.
    3. The parallel beam (red) initially runs parallel to the optical axis, but is then refracted at the lens in such a way that it passes through the focal point on the image side at focal length f.

    The image is formed where all the rays intersect. The principle is used for all points or the rays of an object emanating from them. Depending on which lens is used and depending on the position of the object, the properties of the image change, such as size, orientation and position.

    Image of an object through a negative lens

    In the case of the negative lens, we use the same method to image the ray path. Unlike the case of the converging lens, the image is always reduced and virtual. Magnification depends on the position of the object in front of the lens. Unlike the converging lens, the image is created on the object side and is therefore called a virtual image. You can see it directly with your eyes but not project it onto a screen.

    The way a lens creates an image is predictable by knowing the focal length of that lens. Therefore, a certain distance must be maintained so that you can see the writing with the specified lens on the previous sheet.

    The magnification and the location where the image is formed depend on the focal length of the lens and the distance between the lens and the object.

    With the diverging lens (f = -50 mm) you always see a reduced virtual image. A virtual image can only be viewed with the eye. So far we only have virtual ones seen pictures.

    The converging lens as a magnifying glass

    Take the UC2 lens cube with focal length f=40mm and use it as a magnifying glass.

    Can you read the small letters through the converging lens? What is written there?

    A lens in action can be found here:

    That's what converging lenses do

    With the converging lenses, the image and the magnification depend on the position of the object.

    If the distance between the object and the lens is more than twice the focal length of the lens, then the image is...

    • Vice versa
    • Swapped sides
    • Reduced
    • Real

    If the distance between the object and the lens is exactly twice the focal length of the lens, then the image is...

    • Vice versa
    • Swapped sides
    • Same size
    • Real

    If the distance between the object and the lens is more than the focal length and less than twice the focal length of the lens, then the image is...

    • Vice versa
    • Swapped sides
    • Magnified
    • real

    Object distance (g)

    The distance between the object and the lens plane is called g.

    Image width (b)

    The distance between the lens plane and the image formed by the lens is denoted as b.

    The converging lens can produce a real image. The real image can then be seen on a screen.

    That's why the magnifying glass enlarges

    Magnifying glass effect!

    If the distance between the object and the lens is less than the focal length of the lens, then the image is...

    • upright
    • right side up
    • Magnified
    • Virtual

    The magnifying glass is the simplest of all optical devices, since it consists only of a simple converging lens with a suitable focal length. Why does the cube with the 50 𝑚𝑚 enlarge the small text? If the object is in front of the focal length of the lens - i.e. less than 50 𝑚𝑚 in front of the lens - the lens creates a virtual image which is behind the actual object. The eye perceives it enlarged. Check out the diagram above.

    Calculate the magnification of the magnifying glass using the following formula:

    250 𝑚𝑚 is the distance of clear visual range - i.e. the distance between the object and the eye at which most people can read well. More on this later in the “accommodation” of the eye.

    How does a cinema projector work?

    Take the UC2 lens cube with focal length 𝑓 =40 𝑚𝑚 and place it behind the sample holder cube. The distance between the object and the lens (i.e. the object distance g) should be approx. 50 mm. If you now illuminate the object with the flashlight, you will see it sharply at a distance of approx. 200 mm on the wall. A cinema projector has a film strip instead of the object and of course a much stronger light source.

    Use a flashlight (e.g. from your cell phone) as a light source and hold it in front of the object

    Use the image or text on the microscope slide as the object

    How is the image oriented? Slide the lens back and forth in the cube and see when the image is in focus. Find the image for g = 50mm, 60mm, 65mm and measure the distance between the lens and the image.

    How does a cinema projector work?

    Where is the picture?

    When an object is imaged through a converging lens, the position and size of the image depend on the distance (g) of the object to the lens and its focal length (f). The lens equation describes the relationship between image distance (b) and object distance (g):

    How big is the picture?

    The magnification of the object on the screen can easily be calculated using the following formula:

    How the projector works

    Check if your observation agrees with the calculation

    Calculate the magnification of the projector for the different values of g and b.

    Our lens has a focal length of f= 40 mm.

    For g = 50mm → b = 200mm

    For g = 60 mm → b = 120 mm\

    For g = 65 mm → b = 104 mm\


    The projector always produces an enlarged, inverted (reversed) image. The position of the image and its magnification depend on the position and size of the object.

    Tutorial: Determining the Focal Distance of a Positive Lens

    Materials needed:

    • Light source (e.g., room's illumiation)
    • Positive lens
    • Screen (e.g. table, piece of paper, etc.)

    Instructions:

    1. Position the positive lens so that it faces the light source. Align a screen parallel to the focal plane of the lens.
    2. Modify the distance between the lens and the screen.
    3. Carefully observe and record the position at which the light source forms a clear image on the surface of the screen.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreTelescope/index.html b/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreTelescope/index.html index d18428c81..ccbcd7477 100644 --- a/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreTelescope/index.html +++ b/docs/Toolboxes/DiscoveryCore/ENGLISH/CoreTelescope/index.html @@ -10,7 +10,7 @@ - + @@ -28,7 +28,7 @@ Search for an object to the distance and use Galileo's telescope to look at it.

    What is a Kepler telescope?

    Set the lenses in the correct positions as shown in the diagram. Then look through the telescope into the distance.

    What does the picture look like? How is the image oriented?

    As you look through the telescope, vary the distances between the components to see such a sharp image!

    This is a Kepler telescope

    This type of telescope is often used in astronomy.

    This is how the Kepler telescope works

    What is the magnification of this Kepler telescope?

    Formula for calculating magnification

    This telescope can achieve a higher magnification than the Galilean telescope. But it creates the opposite picture. However, this is not a problem for observing the stars.

    The picture is always
    • Magnified by the magnification from the formula above
    • Vice versa
    • Sides reversed

    The field of view is larger than with the Galileo telescope.


    Tutorial: Kepler's Telescope

    Materials needed:

    • Eight base plates
    • 100 mm positive lens (in cube)
    • 50 mm positive lens (in cube)
    • Two empty cubes

    Diagram (side view):

    Instructions for assembling Kepler's telescope:

    Step 1: Align the cubes

    Align the cubes such that the two lenses lay at the extremes and the two empty cubes in the middle.

    Step 2: Fix the cubes with base plates

    Fix the cubes with the base plates placing them on top and on the bottom.

    Step 3: Adjust the distance

    Adjust the distance between the lenses as shown in the image.

    Step 4: Use Kepler's telescope

    Look for an object to the distance and use Kepler's telescope to look at it.

    What is a spotting scope?

    The spotting scope is long, so the scheme is not the same size. Set the lenses in the correct positions as shown in the diagram and look into the distance through the telescope.

    which results into

    How does the image here compare to the Kepler telescope?

    As you look through the telescope, adjust the distances between the components to see a sharp image!

    This is how the spotting scope works

    The magnification is like that of the Kepler telescope. The erecting lens only changes the orientation (the image is reversed), not the magnification.

    An upright image is necessary for terrestrial observations. True terrestrial telescopes use prism systems to rotate the image and keep it compact.

    The picture is
    • Magnified at the same magnification as the Keppler telescope
    • Upright
    • mirrored

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/ENGLISH/coreMicroscope/index.html b/docs/Toolboxes/DiscoveryCore/ENGLISH/coreMicroscope/index.html index f2202ddf8..d28e7a63c 100644 --- a/docs/Toolboxes/DiscoveryCore/ENGLISH/coreMicroscope/index.html +++ b/docs/Toolboxes/DiscoveryCore/ENGLISH/coreMicroscope/index.html @@ -10,7 +10,7 @@ - + @@ -30,7 +30,7 @@

    Materials needed:

    • Microscope objective 4x
    • Microscope objective long mount with gear
    • Ramsden-Eyepiece (in cube)
    • Two non-kinematic mirrors (in cubes)
    • Sample holder (in cube)
    • Three empty cubes
    • 11 base plates
    • Smartphone base plate
    • Torch lamp
    • 50 mm lens (in cube)

    Diagram (Side view):

    Instructions for assembling the Smartphone Microscope:

    Step 1: Build a four-base plate line

    Step 2: Assemble the components

    Place the Microscope objective mount on one extreme followed by the two mirrors facing each other and one empty cube in the other extreme. Fix them with base plates.

    Step 3: Adjust the objective

    Build one cube with the microscope objective inside. Adjust the objective's height if necessary by using the gear.

    Step 4: Place the eyepiece

    Place the eyepiece next to the microscope objective and one empty cube next to it. Mind the right orientation of the eyepiece.

    Step 5: Align the smartphone base

    Place the smartphone base with the hole aligned with the eyepiece. Note: You can adjust the orientation of the smartphone base to adapt your smartphone's size.

    Step 6: Set up the sample holder

    Place the sample holder cube on top of the microscope objective. Mind the distance between them. You can adjust the coarse distance by sliding the sample holder inside the cube and the finer distance by using the gear.

    Step 7: Add the converging lens and lamp

    Place a converging lens cube on top of the sample holder cube and place the torch lamp on top. Place the smartphone aligned to the eyepiece.

    Step 8: Adjust for clarity

    Try to move the smartphone such that the whole eyepiece circle appears illuminated. Then, turn the gear to focus and get a sharp image of the specimen.

    Better with smartphone or eye?

    The smartphone camera has a lens with a very short focal length because it has to fit into the thin smartphone. The lens then creates an image on the camera sensor whose properties are similar to those of the human eye.

    The eye can see objects from both a distance and near. This property is called accommodation.

    The smartphone camera can also do this, but it is called autofocus. It describes the ability to sharply image objects at different distances on the sensor.

    The image from the eyepiece comes in parallel rays, as if coming from infinity. You observed with a relaxed eye (looking into the distance) or with a camera focused at infinity.


    Calculation results

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/ENGLISH/uc2miniboxEN/index.html b/docs/Toolboxes/DiscoveryCore/ENGLISH/uc2miniboxEN/index.html index 751475a5f..1eaaa9b70 100644 --- a/docs/Toolboxes/DiscoveryCore/ENGLISH/uc2miniboxEN/index.html +++ b/docs/Toolboxes/DiscoveryCore/ENGLISH/uc2miniboxEN/index.html @@ -10,7 +10,7 @@ - + @@ -24,7 +24,7 @@ docs/01_Toolboxes/01_DiscoveryCore/IMAGES/MINIBOX/2.png

    If you don't see a sharp image, move the inserts (e.g. lens) until you see it clearly. The green arrow in the picture shows you how to do this.

    Here you can find a small video that explains the core concept of the cube

    What do the symbols mean?

    Duration: 2

    Experiment If you see this block, there is something to experiment with! You can place a UC2 cube on this block.
    Explanations: If you see this icon, there's something to learn!
    Invoices: There is something to calculate here. Take a pen and paper and start puzzles.
    Caution: Do not touch the glass surfaces with your fingers!
    Cleaning the lenses: If you have already touched the lens, you can clean it with a glasses cloth.

    What can a lens do?

    Duration: 2

    Take one or more of the cubes that have a lens in them and look at the UC2 symbol shown here. Hold the cube in your hand and change the distance between the lens and the image.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/FRENCH/CoreLensFR/index.html b/docs/Toolboxes/DiscoveryCore/FRENCH/CoreLensFR/index.html index 7c238836a..a016cfc5f 100644 --- a/docs/Toolboxes/DiscoveryCore/FRENCH/CoreLensFR/index.html +++ b/docs/Toolboxes/DiscoveryCore/FRENCH/CoreLensFR/index.html @@ -10,13 +10,13 @@ - +

    Lentille

    Lentilles

    En optique géométrique, la lumière est représentée comme un faisceau de rayons (flèches), ce qui simplifie les propriétés physiques de la lumière. Un rayon a une direction et est donc dessiné avec une flèche. Une lentille "réfracte" le faisceau, changeant sa direction.

    La distance focale d'une lentille correspond à la distance entre la lentille et le plan focal sur lequel se trouve le point focal. Elle est donnée en millimètres (f = mm).

    Lentilles convergentes (positives) et divergentes (négatives)

    Les lentilles convergentes réfractent les rayons lumineux se déplaçant parallèlement à l'axe optique en un point appelé le point focal.

    Les lentilles divergentes réfractent les rayons lumineux se déplaçant parallèlement à l'axe optique comme s'ils provenaient d'un point appelé le foyer "virtuel".

    Les lentilles “réfractent” les rayons lumineux

    Vous pouvez trouver la distance focale de la lentille comme un numéro imprimé sur le support de lentille. La MiniBOX contient une lentille convergente de 100 mm, deux lentilles convergentes de 40 mm et une lentille négative de -50 mm. Les numéros indiquent la distance focale.

    La lentille convergente est également appelée lentille positive ou convexe. La partie centrale de la lentille est toujours plus épaisse que le bord.

    La lentille convergente agrandit l'image. Le grossissement est différent pour la lentille de 40 mm et celle de 100 mm. L'image peut être droite ou inversée.

    La lentille négative (lentille divergente) est parfois aussi appelée lentille négative ou concave. La partie centrale de la lentille est toujours plus fine que le bord.

    Avec la lentille négative (ici : lentille de -50 mm), l'image est toujours réduite et toujours droite.

    Nous supposons que nos lentilles sont des "lentilles minces". Cela signifie que nous pouvons les considérer comme un plan unique sans nous soucier de leur épaisseur. Cela rend les explications et les calculs beaucoup plus simples.

    Les réponses ont-elles suscité d'autres questions ? Alors découvrez exactement comment fonctionnent les lentilles...

    Image de la lentille

    Prenez maintenant les cubes de lentilles. Avec la bonne lentille, essayez de déchiffrer les informations sur la distance focale dans les cubes montrés. Déplacez la lentille sur l'écriture jusqu'à ce qu'elle soit de la même taille que le texte "UC2".

    Pouvez-vous voir le texte de la même taille et orientation que le "UC2" ? Que se passe-t-il lorsque vous changez la distance entre la lentille et l'image ?

    Que se passe-t-il si vous utilisez une lentille avec la mauvaise distance focale ?

    Image d'un objet à travers une lentille positive

    Prenons la lentille convergente comme exemple. Nous commençons par un objet (flèche verte) et voyons ce qui arrive aux rayons qui partent du sommet. Il y a une infinité de rayons dans toutes les directions, mais pour dessiner la figure, les trois rayons suivants suffiront :

    1. Le rayon central (orange) passe sans être perturbé par le centre de la lentille.
    2. Le rayon focal (jaune) commence également à partir de la pointe de la flèche, mais passe par le foyer côté objet à une distance focale f. Après la lentille, il continue à la même hauteur, mais maintenant parallèlement à l'axe optique.
    3. Le rayon parallèle (rouge) se déplace d'abord parallèlement à l'axe optique, mais est ensuite réfracté à la lentille de manière à passer par le point focal côté image à une distance focale f.

    L'image se forme là où tous les rayons se croisent. Le principe est utilisé pour tous les points ou les rayons d'un objet émanant d'eux. Selon la lentille utilisée et selon la position de l'objet, les propriétés de l'image changent, telles que la taille, l'orientation et la position.

    Image d'un objet à travers une lentille négative

    Dans le cas de la lentille négative, nous utilisons la même méthode pour imager le chemin des rayons. Contrairement au cas de la lentille convergente, l'image est toujours réduite et virtuelle. Le grossissement dépend de la position de l'objet devant la lentille. Contrairement à la lentille convergente, l'image est créée du côté objet et est donc appelée une image virtuelle. Vous pouvez la voir directement avec vos yeux, mais ne pas la projeter sur un écran.

    La façon dont une lentille crée une image est prévisible en connaissant la distance focale de cette lentille. Par conséquent, une certaine distance doit être maintenue afin que vous puissiez voir l'écriture avec la lentille spécifiée sur la feuille précédente.

    Le grossissement et l'emplacement où l'image est formée dépendent de la distance focale de la lentille et de la distance entre la lentille et l'objet.

    Avec la lentille divergente (f = -50 mm), vous voyez toujours une image virtuelle réduite. Une image virtuelle ne peut être vue qu'avec l'œil. Jusqu'à présent, nous n'avons vu que des images virtuelles.

    La lentille convergente comme loupe

    Prenez le cube de lentilles UC2 avec une distance focale f = 40 mm et utilisez-le comme une loupe.

    Pouvez-vous lire les petites lettres à travers la lentille convergente ? Qu'est-ce qui est écrit là ?

    Une lentille en action peut être trouvée ici :

    Ce que font les lentilles convergentes

    Avec les lentilles convergentes, l'image et le grossissement dépendent de la position de l'objet.

    Si la distance entre l'objet et la lentille est supérieure à deux fois la distance focale de la lentille, alors l'image est...

    • Inversée
    • Côté opposé
    • Réduite
    • Réelle

    Si la distance entre l'objet et la lentille est exactement deux fois la distance focale de la lentille, alors l'image est...

    • Inversée
    • Côté opposé
    • Même taille
    • Réelle

    Si la distance entre l'objet et la lentille est supérieure à la distance focale et inférieure à deux fois la distance focale de la lentille, alors l'image est...

    • Inversée
    • Côté opposé
    • Agrandie
    • Réelle

    Distance de l'objet (g)

    La distance entre l'objet et le plan de la lentille est appelée g.

    Largeur de l'image (b)

    La distance entre le plan de la lentille et l'image formée par la lentille est désignée comme b.

    La lentille convergente peut produire une image réelle. L'image réelle peut ensuite être vue sur un écran.

    Pourquoi la loupe agrandit-elle ?

    Effet de loupe !

    Si la distance entre l'objet et la lentille est inférieure à la distance focale de la lentille, alors l'image est...

    • Droite
    • Côté droit
    • Agrandie
    • Virtuelle

    La loupe est le plus simple de tous les dispositifs optiques, car elle consiste uniquement en une simple lentille convergente avec une distance focale appropriée. Pourquoi le cube avec les 50 mm agrandit-il le petit texte ? Si l'objet est devant la distance focale de la lentille - c'est-à-dire à moins de 50 mm devant la lentille - la lentille crée une image virtuelle qui se trouve derrière l'objet réel. L'œil la perçoit agrandie. Consultez le schéma ci-dessus.

    Calculez le grossissement de la loupe en utilisant la formule suivante :

    250 mm est la distance de vision distincte - c'est-à-dire la distance entre l'objet et l'œil à laquelle la plupart des gens peuvent bien lire. Plus d'informations à ce sujet plus tard dans la section “accommodation” de l'œil.

    Comment

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/FRENCH/CoreTelescopeFR/index.html b/docs/Toolboxes/DiscoveryCore/FRENCH/CoreTelescopeFR/index.html index 5911f1cf2..80ac7b837 100644 --- a/docs/Toolboxes/DiscoveryCore/FRENCH/CoreTelescopeFR/index.html +++ b/docs/Toolboxes/DiscoveryCore/FRENCH/CoreTelescopeFR/index.html @@ -10,7 +10,7 @@ - + @@ -27,7 +27,7 @@ Cherchez un objet au loin et utilisez le télescope de Galilée pour le regarder.

    Qu'est-ce qu'un télescope de Kepler ?

    Placez les lentilles dans les positions correctes comme indiqué dans le schéma. Ensuite, regardez au loin à travers le télescope.

    À quoi ressemble l'image ? Comment est l'image orientée ?

    En regardant à travers le télescope, variez les distances entre les composants pour voir une image nette !

    Voici un télescope de Kepler

    Ce type de télescope est souvent utilisé en astronomie.

    Voici comment fonctionne le télescope de Kepler

    Quelle est l'augmentation de ce télescope de Kepler ?

    Formule pour calculer l'agrandissement

    Ce télescope peut atteindre un grossissement plus élevé que le télescope de Galilée. Mais il crée une image inversée. Cependant, ce n'est pas un problème pour observer les étoiles.

    L'image est toujours
    • Agrandie par le grossissement de la formule ci-dessus
    • Inversée
    • Côtés inversés

    Le champ de vision est plus grand qu'avec le télescope de Galilée.


    Tutoriel : télescope de Kepler

    Matériaux nécessaires :

    • Huit plaques de base
    • Lentille positive de 100 mm (dans un cube)
    • Lentille positive de 50 mm (dans un cube)
    • Deux cubes vides

    Schéma (vue latérale) :

    Instructions pour assembler le télescope de Kepler :

    Étape 1 : Alignez les cubes

    Alignez les cubes de sorte que les deux lentilles soient aux extrémités et les deux cubes vides au milieu.

    Étape 2 : Fixez les cubes avec des plaques de base

    Fixez les cubes avec les plaques de base en les plaçant en haut et en bas.

    Étape 3 : Ajustez la distance

    Ajustez la distance entre les lentilles comme montré dans l'image.

    Étape 4 : Utilisez le télescope de Kepler

    Cherchez un objet au loin et utilisez le télescope de Kepler pour le regarder.

    Qu'est-ce qu'une longue-vue ?

    La longue-vue est longue, donc le schéma n'est pas à la même taille. Placez les lentilles dans les positions correctes comme indiqué dans le schéma et regardez au loin à travers le télescope.

    ce qui donne

    Comment l'image ici se compare-t-elle au télescope de Kepler ?

    En regardant à travers le télescope, ajustez les distances entre les composants pour voir une image nette !

    Voici comment fonctionne la longue-vue

    Le grossissement est comme celui du télescope de Kepler. La lentille redressante ne change que l'orientation (l'image est inversée), pas le grossissement.

    Une image droite est nécessaire pour les observations terrestres. Les véritables télescopes terrestres utilisent des systèmes de prismes pour tourner l'image et la garder compacte.

    L'image est
    • Agrandie au même grossissement que le télescope de Kepler
    • Droite
    • Miroir

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/FRENCH/coreMicroscopeFR/index.html b/docs/Toolboxes/DiscoveryCore/FRENCH/coreMicroscopeFR/index.html index c29ad1c83..e959a31ff 100644 --- a/docs/Toolboxes/DiscoveryCore/FRENCH/coreMicroscopeFR/index.html +++ b/docs/Toolboxes/DiscoveryCore/FRENCH/coreMicroscopeFR/index.html @@ -10,7 +10,7 @@ - + @@ -30,7 +30,7 @@

    Matériaux nécessaires :

    • Objectif de microscope 4x
    • Support long d'objectif de microscope avec engrenage
    • Oculaire de Ramsden (dans un cube)
    • Deux miroirs non cinématiques (dans des cubes)
    • Porte-échantillon (dans un cube)
    • Trois cubes vides
    • 11 plaques de base
    • Plaque de base pour smartphone
    • Lampe torche
    • Lentille de 50 mm (dans un cube)

    Schéma (vue latérale) :

    Instructions pour assembler le microscope pour smartphone :

    Étape 1 : Construire une ligne de quatre plaques de base

    ![](../IMAGES/MINIBOXTUTORIAL/image49

    .jpg)

    Étape 2 : Assembler les composants

    Placez le support d'objectif de microscope à une extrémité suivi des deux miroirs se faisant face et d'un cube vide à l'autre extrémité. Fixez-les avec des plaques de base.

    Étape 3 : Ajuster l'objectif

    Construisez un cube avec l'objectif de microscope à l'intérieur. Ajustez la hauteur de l'objectif si nécessaire en utilisant l'engrenage.

    Étape 4 : Placer l'oculaire

    Placez l'oculaire à côté de l'objectif de microscope et un cube vide à côté. Respectez la bonne orientation de l'oculaire.

    Étape 5 : Aligner la base du smartphone

    Placez la base du smartphone avec le trou aligné avec l'oculaire. Remarque : Vous pouvez ajuster l'orientation de la base du smartphone pour adapter la taille de votre smartphone.

    Étape 6 : Installer le porte-échantillon

    Placez le cube porte-échantillon au-dessus de l'objectif de microscope. Respectez la distance entre eux. Vous pouvez ajuster la distance approximative en faisant glisser le porte-échantillon à l'intérieur du cube et la distance plus fine en utilisant l'engrenage.

    Étape 7 : Ajouter la lentille convergente et la lampe

    Placez un cube de lentille convergente au-dessus du cube porte-échantillon et placez la lampe torche au-dessus. Placez le smartphone aligné avec l'oculaire.

    Étape 8 : Ajuster pour la clarté

    Essayez de déplacer le smartphone de manière à ce que tout le cercle de l'oculaire apparaisse illuminé. Ensuite, tournez l'engrenage pour focaliser et obtenir une image nette de l'échantillon.

    Mieux avec le smartphone ou l'œil ?

    L'appareil photo du smartphone a une lentille avec une distance focale très courte car elle doit s'adapter à l'épaisseur du smartphone. La lentille crée alors une image sur le capteur de la caméra dont les propriétés sont similaires à celles de l'œil humain.

    L'œil peut voir des objets à la fois de loin et de près. Cette propriété est appelée accommodation.

    L'appareil photo du smartphone peut également faire cela, mais cela s'appelle autofocus. Cela décrit la capacité à imager nettement des objets à différentes distances sur le capteur.

    L'image de l'oculaire provient de rayons parallèles, comme si elle venait de l'infini. Vous avez observé avec un œil détendu (regardant au loin) ou avec une caméra mise au point à l'infini.


    Résultats des calculs

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/FRENCH/uc2miniboxFR/index.html b/docs/Toolboxes/DiscoveryCore/FRENCH/uc2miniboxFR/index.html index 2eec4cce8..6f6d2ecba 100644 --- a/docs/Toolboxes/DiscoveryCore/FRENCH/uc2miniboxFR/index.html +++ b/docs/Toolboxes/DiscoveryCore/FRENCH/uc2miniboxFR/index.html @@ -10,7 +10,7 @@ - + @@ -24,7 +24,7 @@ docs/01_Toolboxes/01_DiscoveryCore/IMAGES/MINIBOX/2.png

    Si vous ne voyez pas une image nette, déplacez les inserts (par exemple, la lentille) jusqu'à ce que vous la voyiez clairement. La flèche verte sur l'image vous montre comment faire.

    Ici, vous pouvez trouver une petite vidéo qui explique le concept de base du cube

    Que signifient les symboles ?

    Durée : 2

    Expérience : Si vous voyez ce bloc, il y a quelque chose à expérimenter ! Vous pouvez placer un cube UC2 sur ce bloc.
    Explications : Si vous voyez cette icône, il y a quelque chose à apprendre !
    Calculs : Il y a quelque chose à calculer ici. Prenez un stylo et du papier et commencez les puzzles.
    Attention : Ne touchez pas les surfaces en verre avec vos doigts !
    Nettoyage des lentilles : Si vous avez déjà touché la lentille, vous pouvez la nettoyer avec un chiffon pour lunettes.

    Que peut faire une lentille ?

    Durée : 2

    Prenez un ou plusieurs cubes qui contiennent une lentille et regardez le symbole UC2 montré ici. Tenez le cube dans votre main et changez la distance entre la lentille et l'image.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/GERMAN/CoreLens/index.html b/docs/Toolboxes/DiscoveryCore/GERMAN/CoreLens/index.html index 7dc30361e..3e21c0828 100644 --- a/docs/Toolboxes/DiscoveryCore/GERMAN/CoreLens/index.html +++ b/docs/Toolboxes/DiscoveryCore/GERMAN/CoreLens/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    Linse

    Linsen

    In der Strahlenoptik wird Licht als Bündel von Strahlen (Pfeilen) dargestellt, was die physikalischen Eigenschaften des Lichts vereinfacht. Ein Strahl hat eine Richtung und wird daher mit einem Pfeil gezeichnet. Eine Linse „bricht“ den Strahl und ändert dessen Richtung.

    Die Brennweite einer Linse entspricht der Entfernung von der Linse zur Brennebene, auf der der Brennpunkt liegt. Sie wird in Millimetern angegeben (f = mm).

    Sammellinsen (positiv) und Zerstreuungslinsen (negativ)

    Sammellinsen brechen die Lichtstrahlen, die parallel zur optischen Achse verlaufen, in einem Punkt, der Brennpunkt genannt wird.

    Die Zerstreuungslinsen brechen die Lichtstrahlen, die parallel zur optischen Achse verlaufen, so, als ob sie von einem Punkt, dem „virtuellen“ Brennpunkt, ausgingen.

    Linsen „brechen“ die Lichtstrahlen

    Sie können die Brennweite der Linse als aufgedruckte Zahl am Linsenhalter finden. Die MiniBOX erhält eine 100mm Sammellinse, zwei 40mm Sammellinsen und eine -50mm negative Linse. Die Zahlen geben die Brennweite an.

    Die Sammellinse wird auch positive oder konvexe Linse genannt. Der mittlere Teil der Linse ist immer dicker als der Rand.

    Die Sammellinse vergrößert das Bild. Die Vergrößerung unterscheidet sich zwischen der 40mm Linse und der 100mm Linse. Das Bild kann aufrecht oder invertiert sein.

    Die negative Linse (Streulinse) wird manchmal auch negative oder konkave Linse genannt. Der mittlere Teil der Linse ist immer dünner als der Rand.

    Mit der negativen Linse (hier: -50 mm Linse) wird das Bild immer verkleinert und immer aufrecht dargestellt.

    Wir gehen davon aus, dass unsere Linsen sogenannte „dünne Linsen“ sind. Das bedeutet, dass wir sie als eine Ebene betrachten können und uns nicht um ihre Dicke kümmern müssen. Das macht Erklärungen und Berechnungen viel einfacher.

    Haben die Antworten weitere Fragen aufgeworfen? Dann fahren Sie fort, um genau zu verstehen, wie Linsen funktionieren...

    Linsenbild

    Nehmen Sie jetzt die Linsenwürfel. Versuchen Sie mit der richtigen Linse, die Brennweiteninformation in den Würfeln zu entziffern. Bewegen Sie die Linse über die Schrift, bis sie dieselbe Größe wie der Text "UC2" hat.

    Können Sie den Text in derselben Größe und Ausrichtung wie das "UC2" sehen? Was passiert, wenn Sie den Abstand zwischen Linse und Bild ändern?

    Was passiert, wenn Sie eine Linse mit der falschen Brennweite verwenden?

    Bild eines Objekts durch eine positive Linse

    Nehmen wir die Sammellinse als Beispiel. Wir starten mit einem Objekt (grüner Pfeil) und sehen, was mit den Strahlen passiert, die von der Spitze ausgehen. Es gibt unendlich viele Strahlen in alle Richtungen, aber für die Zeichnung der Figur reichen die folgenden drei Strahlen aus:

    1. Der Zentralstrahl (orange) passiert ungestört das Zentrum der Linse.

    2. Der Fokusstrahl (gelb) startet ebenfalls von der Spitze des Pfeils, geht aber durch den objektseitigen Fokus bei Brennweite f. Nach der Linse geht er in gleicher Höhe, aber jetzt parallel zur optischen Achse weiter.

    3. Der Parallelstrahl (rot) verläuft zunächst parallel zur optischen Achse, wird dann aber an

      der Linse so gebrochen, dass er durch den bildseitigen Brennpunkt bei Brennweite f verläuft.

    Das Bild entsteht dort, wo alle Strahlen sich schneiden. Dieses Prinzip wird für alle Punkte oder die von ihnen ausgehenden Strahlen eines Objekts verwendet. Je nach verwendeter Linse und Position des Objekts ändern sich die Eigenschaften des Bildes, wie Größe, Orientierung und Position.

    Bild eines Objekts durch eine negative Linse

    Im Fall der negativen Linse verwenden wir dieselbe Methode, um den Strahlengang abzubilden. Anders als bei der Sammellinse ist das Bild immer verkleinert und virtuell. Die Vergrößerung hängt von der Position des Objekts vor der Linse ab. Anders als bei der Sammellinse wird das Bild auf der Objektseite erzeugt und daher als virtuelles Bild bezeichnet. Sie können es direkt mit Ihren Augen sehen, aber nicht auf einen Bildschirm projizieren.

    Die Art, wie eine Linse ein Bild erzeugt, ist vorhersehbar, wenn man die Brennweite dieser Linse kennt. Daher muss ein bestimmter Abstand eingehalten werden, damit Sie die Schrift mit der angegebenen Linse auf dem vorherigen Blatt sehen können.

    Die Vergrößerung und der Ort, an dem das Bild entsteht, hängen von der Brennweite der Linse und dem Abstand zwischen Linse und Objekt ab.

    Mit der Zerstreuungslinse (f = -50 mm) sehen Sie immer ein verkleinertes virtuelles Bild. Ein virtuelles Bild kann nur mit dem Auge betrachtet werden. Bisher haben wir nur virtuelle Bilder gesehen.

    Die Sammellinse als Lupe

    Nehmen Sie den UC2-Linsenwürfel mit einer Brennweite von f=40mm und verwenden Sie ihn als Lupe.

    Können Sie die kleinen Buchstaben durch die Sammellinse lesen? Was steht dort?

    Eine Linse in Aktion finden Sie hier:

    Das machen Sammellinsen

    Mit den Sammellinsen hängen Bild und Vergrößerung von der Position des Objekts ab.

    Wenn der Abstand zwischen Objekt und Linse mehr als das Doppelte der Brennweite der Linse beträgt, dann ist das Bild...

    • Umgekehrt
    • Seitlich getauscht
    • Verkleinert
    • Real

    Wenn der Abstand zwischen Objekt und Linse genau das Doppelte der Brennweite der Linse beträgt, dann ist das Bild...

    • Umgekehrt
    • Seitlich getauscht
    • Gleiche Größe
    • Real

    Wenn der Abstand zwischen Objekt und Linse mehr als die Brennweite und weniger als das Doppelte der Brennweite der Linse beträgt, dann ist das Bild...

    • Umgekehrt
    • Seitlich getauscht
    • Vergrößert
    • Real

    Objektabstand (g)

    Der Abstand zwischen dem Objekt und der Linsenebene wird als g bezeichnet.

    Bildweite (b)

    Der Abstand zwischen der Linsenebene und dem durch die Linse gebildeten Bild wird als b bezeichnet.

    Die Sammellinse kann ein reales Bild erzeugen. Das reale Bild kann dann auf einem Schirm gesehen werden.

    Deshalb vergrößert die Lupe

    Lupeneffekt!

    Wenn der Abstand zwischen dem Objekt und der Linse weniger als die Brennweite der Linse beträgt, dann ist das Bild...

    • Aufrecht
    • Richtig herum
    • Vergrößert
    • Virtuell

    Die Lupe ist das einfachste aller optischen

    Geräte, da sie nur aus einer einfachen Sammellinse mit geeigneter Brennweite besteht. Warum vergrößert der Würfel mit 50 𝑚𝑚 den kleinen Text? Wenn das Objekt vor der Brennweite der Linse liegt – also weniger als 50 𝑚𝑚 vor der Linse – erzeugt die Linse ein virtuelles Bild, das hinter dem eigentlichen Objekt liegt. Das Auge nimmt es vergrößert wahr. Schauen Sie sich das obenstehende Diagramm an.


    Berechnen Sie die Vergrößerung der Lupe mit der folgenden Formel:

    250 𝑚𝑚 ist der Abstand der klaren Sehweite – d. h. der Abstand zwischen dem Objekt und dem Auge, bei dem die meisten Menschen gut lesen können. Mehr dazu später bei der „Akkommodation“ des Auges.

    Wie funktioniert ein Kinoprojektor?

    Nehmen Sie den UC2-Linsenwürfel mit einer Brennweite von 𝑓 =40 𝑚𝑚 und platzieren Sie ihn hinter dem Probenhalterwürfel. Der Abstand zwischen dem Objekt und der Linse (also der Objektabstand g) sollte ca. 50 mm betragen. Wenn Sie das Objekt jetzt mit der Taschenlampe beleuchten, sehen Sie es in etwa 200 mm Entfernung scharf an der Wand. Ein Kinoprojektor hat anstelle des Objekts einen Filmstreifen und natürlich eine viel stärkere Lichtquelle.

    Verwenden Sie eine Taschenlampe (z. B. von Ihrem Handy) als Lichtquelle und halten Sie sie vor das Objekt

    Verwenden Sie das Bild oder den Text auf dem Mikroskopobjektträger als Objekt

    Wie ist das Bild ausgerichtet? Schieben Sie die Linse hin und her im Würfel und sehen Sie, wann das Bild scharf ist. Finden Sie das Bild für g = 50mm, 60mm, 65mm und messen Sie den Abstand zwischen der Linse und dem Bild.

    Wie funktioniert ein Kinoprojektor?

    Wo ist das Bild?

    Wenn ein Objekt durch eine Sammellinse abgebildet wird, hängen Position und Größe des Bildes von der Entfernung (g) des Objekts zur Linse und deren Brennweite (f) ab. Die Linsengleichung beschreibt die Beziehung zwischen Bildweite (b) und Objektabstand (g):

    Wie groß ist das Bild?

    Die Vergrößerung des Objekts auf der Leinwand kann einfach mit der folgenden Formel berechnet werden:

    Wie der Projektor funktioniert

    Überprüfen Sie, ob Ihre Beobachtung mit der Berechnung übereinstimmt

    Berechnen Sie die Vergrößerung des Projektors für die verschiedenen Werte von g und b.

    Unsere Linse hat eine Brennweite von f= 40 mm.

    Für g = 50mm → b = 200mm

    Für g = 60 mm → b = 120 mm\

    Für g = 65 mm → b = 104 mm\


    Der Projektor erzeugt immer ein vergrößertes, invertiertes (umgekehrtes) Bild. Die Position des Bildes und seine Vergrößerung hängen von der Position und Größe des Objekts ab.

    Tutorial: Bestimmung der Brennweite einer positiven Linse

    Benötigte Materialien:

    • Lichtquelle (z. B. Raumbeleuchtung)
    • Positive Linse
    • Schirm (z

    . B. Tisch, Stück Papier usw.)

    Anleitung:

    1. Positionieren Sie die positive Linse so, dass sie der Lichtquelle zugewandt ist. Richten Sie einen Schirm parallel zur Brennebene der Linse aus.
    2. Ändern Sie den Abstand zwischen Linse und Schirm.
    3. Beobachten und dokumentieren Sie sorgfältig die Position, an der die Lichtquelle ein klares Bild auf der Oberfläche des Schirms bildet.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/GERMAN/CoreTelescope/index.html b/docs/Toolboxes/DiscoveryCore/GERMAN/CoreTelescope/index.html index 2716a75a3..77eb83565 100644 --- a/docs/Toolboxes/DiscoveryCore/GERMAN/CoreTelescope/index.html +++ b/docs/Toolboxes/DiscoveryCore/GERMAN/CoreTelescope/index.html @@ -10,7 +10,7 @@ - + @@ -28,7 +28,7 @@ Suche ein Objekt in der Ferne und betrachte es durch Galileis Teleskop.

    Was ist ein Kepler-Teleskop?

    Setze die Linsen gemäß dem Diagramm in die richtigen Positionen. Dann schaue durch das Teleskop in die Ferne.

    Wie sieht das Bild aus? Wie ist die Bildausrichtung?

    Während du durch das Teleskop schaust, variiere die Abstände zwischen den Komponenten, um ein solch scharfes Bild zu sehen!

    Das ist ein Kepler-Teleskop

    Dieser Teleskoptyp wird oft in der Astronomie verwendet.

    So funktioniert das Kepler-Teleskop

    Welche Vergrößerung hat dieses Kepler-Teleskop?

    Formel zur Berechnung der Vergrö

    ßerung

    Dieses Teleskop kann eine höhere Vergrößerung als das Galilei-Teleskop erreichen. Es erzeugt jedoch ein umgekehrtes Bild. Das ist jedoch kein Problem bei der Beobachtung von Sternen.

    Das Bild ist immer
    • Vergrößert durch die Vergrößerung aus der obigen Formel
    • Umgekehrt
    • Seitlich vertauscht

    Das Sichtfeld ist größer als beim Galilei-Teleskop.


    Tutorial: Keplers Teleskop

    Benötigte Materialien:

    • Acht Bodenplatten
    • 100 mm positive Linse (im Würfel)
    • 50 mm positive Linse (im Würfel)
    • Zwei leere Würfel

    Diagramm (Seitenansicht):

    Anleitung zum Zusammenbau von Keplers Teleskop:

    Schritt 1: Würfel ausrichten

    Richte die Würfel so aus, dass die beiden Linsen an den Extremen liegen und die beiden leeren Würfel in der Mitte.

    Schritt 2: Würfel mit Bodenplatten fixieren

    Fixiere die Würfel mit den Bodenplatten, indem du sie oben und unten platzierst.

    Schritt 3: Abstand justieren

    Justiere den Abstand zwischen den Linsen, wie im Bild gezeigt.

    Schritt 4: Keplers Teleskop verwenden

    Suche ein Objekt in der Ferne und betrachte es durch Keplers Teleskop.

    Was ist ein Spektiv?

    Das Spektiv ist lang, daher ist das Schema nicht gleich groß. Setze die Linsen gemäß dem Diagramm in die richtigen Positionen und schaue durch das Teleskop in die Ferne.

    was zu folgendem führt

    Wie vergleicht sich das Bild hier mit dem Kepler-Teleskop?

    Während du durch das Teleskop schaust, justiere die Abstände zwischen den Komponenten, um ein scharfes Bild zu sehen!

    So funktioniert das Spektiv

    Die Vergrößerung ist wie beim Kepler-Teleskop. Die Umkehrlinse ändert nur die Orientierung (das Bild wird umgekehrt), nicht die Vergrößerung.

    Ein aufrechtes Bild ist für terrestrische Beobachtungen notwendig. Echte terrestrische Teleskope verwenden Prismensysteme, um das Bild zu drehen und kompakt zu halten.

    Das Bild ist
    • Mit der gleichen Vergrößerung wie das Kepler-Teleskop vergrößert
    • Aufrecht
    • Gespiegelt

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/GERMAN/coreMicroscope/index.html b/docs/Toolboxes/DiscoveryCore/GERMAN/coreMicroscope/index.html index 0ed01b12e..faad79db8 100644 --- a/docs/Toolboxes/DiscoveryCore/GERMAN/coreMicroscope/index.html +++ b/docs/Toolboxes/DiscoveryCore/GERMAN/coreMicroscope/index.html @@ -10,7 +10,7 @@ - + @@ -20,7 +20,7 @@ ![](../IMAGES/MINIBO

    XTUTORIAL/image33.png)

    Mikroskop mit "Unendlichkeitsoptik" und Okular

    Kannst du das mikroskopische Bild durch die Okularlinse mit deinen Augen sehen? Welchen Effekt hat der Spiegel? Richte das Mikroskop ohne den Spiegel ein. Stelle sicher, dass du immer noch zwei leere Räume zwischen der Tubuslinse und dem Okular hast. Was beobachtest du dann?

    Eine kurze Einführung in Spiegel und deren Anwendungen findest du hier:

    Wozu dient das Okular?

    Neuere Mikroskope sind mit sogenannten "Unendlichkeitsoptiken" ausgestattet. In diesem Fall erzeugt die Linse kein reales Zwischenbild. Das Licht verlässt die Linse als unendliche parallele Strahlen. Am Ende des "unendlichen" Tubus befindet sich eine Tubuslinse. Diese erzeugt ein Zwischenbild, das dann durch das Okular erneut vergrößert wird.

    Das Bild hinter dem Okular ist umgekehrt, umgekehrt, vergrößert und virtuell. Das virtuelle Bild kann mit dem Auge gesehen werden.

    Diese Konfiguration ist sehr nützlich in modernen Mikroskopen, da sie das Einfügen zusätzlicher Komponenten wie Filter zwischen Objektiv und Tubuslinse erlaubt, ohne den optischen Weg zu beeinflussen.

    Ein Filter kann verwendet werden, um die Helligkeit und Farbe des Bildes zu ändern.


    Das Okular ist dafür gut

    Wie groß ist die Vergrößerung nach dem Okular?

    Gesamtvergrößerung

    Ein Okular ist eigentlich nur eine Linse, die das Zwischenbild vergrößert. Es bildet das virtuelle Bild so ab, dass du es mit deinen Augen sehen kannst.

    Mit dem Spiegel kannst du nicht nur dich selbst sehen, sondern auch das einfallende Licht in jede Richtung reflektieren. So kannst du den optischen Weg falten und die Arbeit komfortabler gestalten. Der Spiegel beeinflusst zwar nicht die Vergrößerung, dreht aber das Bild in eine Richtung.

    Tutorial: Lichtmikroskop mit Unendlichkeitsoptik und Okular

    Benötigte Materialien:

    • Keplers Teleskop
    • Taschenlampe
    • Acht Bodenplatten
    • Probenhalter (im Würfel) mit Probe
    • Spiegel (im Würfel)
    • Leerwürfel
    • Okular (im Würfel)

    Diagramm (Seitenansicht):

    Anleitung zum Zusammenbau des Lichtmikroskops mit Unendlichkeitsoptik und Okular:

    Schritt 1: Probenhalter-Würfel hinzufügen

    Füge den Probenhalter-Würfel im Keplers Teleskop neben der 50-mm-Sammellinse hinzu.

    Schritt 2: Neben der 100-mm-Linse zusammenbauen

    Montiere neben der 100-mm-Sammellinse einen Leerwürfel und daneben den Spiegelwürfel.

    Schritt 3: Das Okular platzieren

    Platziere das Okular oben auf dem Spiegelwürfel mit der richtigen Orientierung. Beleuchte die Probe aus ein

    iger Entfernung.

    Schritt 5: Für ein scharfes Bild justieren

    Schaue durch das Okular. Justiere den Linsenabstand, bis du ein fokussiertes scharfes Bild siehst. Hinweis: Wenn du das Präparat nicht siehst, versuche vorsichtig die Position der Probe zu justieren, bis du das Präparat siehst.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/GERMAN/uc2miniboxDE/index.html b/docs/Toolboxes/DiscoveryCore/GERMAN/uc2miniboxDE/index.html index 55c3afe04..c9816d04e 100644 --- a/docs/Toolboxes/DiscoveryCore/GERMAN/uc2miniboxDE/index.html +++ b/docs/Toolboxes/DiscoveryCore/GERMAN/uc2miniboxDE/index.html @@ -10,7 +10,7 @@ - + @@ -27,7 +27,7 @@ ||Rechnungen: Hier gibt es etwas zu rechnen. Nehmen Sie einen Stift und Papier und beginnen Sie mit den Rätseln. | ||Vorsicht: Berühren Sie die Glasoberflächen nicht mit Ihren Fingern! | ||Reinigung der Linsen: Wenn Sie die Linse bereits berührt haben, können Sie sie mit einem Brillentuch reinigen. |

    Was kann eine Linse bewirken?

    Nehmen Sie einen oder mehrere der Würfel, die eine Linse enthalten, und betrachten Sie das hier gezeigte UC2-Symbol. Halten Sie den Würfel in Ihrer Hand und ändern Sie den Abstand zwischen der Linse und dem Bild.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/Opticsintro/index.html b/docs/Toolboxes/DiscoveryCore/Opticsintro/index.html index 99f694d0e..f8c7cb64c 100644 --- a/docs/Toolboxes/DiscoveryCore/Opticsintro/index.html +++ b/docs/Toolboxes/DiscoveryCore/Opticsintro/index.html @@ -10,7 +10,7 @@ - + @@ -36,7 +36,7 @@

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/SPANISH/core_intro/index.html b/docs/Toolboxes/DiscoveryCore/SPANISH/core_intro/index.html index e52afde06..5331d0c19 100644 --- a/docs/Toolboxes/DiscoveryCore/SPANISH/core_intro/index.html +++ b/docs/Toolboxes/DiscoveryCore/SPANISH/core_intro/index.html @@ -10,7 +10,7 @@ - + @@ -21,7 +21,7 @@ Objetivo del microscopio: un sistema de lentes especial que agranda un objeto

    Un recorrido rápido a través de una caja ejemplar y cómo se ven los cubos (estado de octubre de 2022) se puede encontrar aquí:

    ¿Qué es UC2?

    El elemento central del proyecto UC2 es un cubo simple. El cubo consta de dos mitades y alberga un inserto deslizable. El inserto puede contener varios componentes ópticos (por ejemplo, lentes, espejos), lo que significa que se pueden implementar diferentes funciones con cada cubo.

    Tipo de cubo 1: moldeado por inyección con conexión de enchufe

    Base

    Base

    El cubo se puede montar en una placa base. Los módulos de la placa base se pueden juntar como un rompecabezas.

    UC2 para imprimir tú mismo

    El cubo UC2 también se puede imprimir en 3D. Se ve igual que el modelo moldeado por inyección, pero aquí consiste en una tapa de cubo y el cuerpo del cubo, que se mantienen unidos con tornillos. Los tornillos son excelentes para colocarlos en la placa magnética. Al combinar diferentes módulos de cubos, se pueden ensamblar fácilmente diferentes estructuras ópticas. Una nueva función se puede agregar con cada dado. Tu creatividad no tiene límites.

    Tipo de cubo 2: impreso en 3D con conexión magnética

    Dados

    Placa base con imanes

    En la placa base impresa en 3D hay pequeños imanes esféricos en los que se colocan los cubos.

    ¿Quieres más dados? Entonces puedes construirlos tú mismo. Puedes encontrar todo aquí

    Así es como encajan los dados

    Duración: 1

    Asegúrate de que los cubos estén colocados correctamente en la placa y no estén inclinados. Al final es importante que los insertos estén en el lugar correcto.

    Si no ves una imagen nítida, mueve los insertos (por ejemplo, lente) hasta que la veas claramente. La flecha verde en la imagen te muestra cómo hacerlo.

    Aquí puedes encontrar un pequeño video que explica el concepto central del cubo

    Of course, here's the translation of the provided text into Spanish:


    ¿Qué significan los símbolos?

    Duración: 2

    Experimenta Si ves este bloque, ¡hay algo con lo que experimentar! Puedes colocar un cubo UC2 en este bloque.
    Explicaciones: Si ves este icono, ¡hay algo que aprender!
    Facturas: Aquí hay algo que calcular. Toma un lápiz y papel y comienza a resolver rompecabezas.
    Precaución: ¡No toques las superficies de vidrio con tus dedos!
    Limpieza de las lentes: Si ya has tocado la lente, puedes limpiarla con un paño para gafas.

    ¿Qué puede hacer una lente?

    Duración: 2

    Toma uno o más de los cubos que tienen una lente y observa el símbolo UC2 mostrado aquí. Sostén el cubo en tu mano y cambia la distancia entre la lente y la imagen.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryCore/Smartphone Microscope/index.html b/docs/Toolboxes/DiscoveryCore/Smartphone Microscope/index.html index 6bab04246..10b95d6e0 100644 --- a/docs/Toolboxes/DiscoveryCore/Smartphone Microscope/index.html +++ b/docs/Toolboxes/DiscoveryCore/Smartphone Microscope/index.html @@ -10,13 +10,13 @@ - +

    openUC2 Smartphone Microscope with a finite corrected objective lens

    This video shows you how to build the UC2 smartphone microscope as also indicated in the PDF manual. It shows some tricks how to make it more stable and how to operate the Z-stage

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_FinOptics/index.html b/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_FinOptics/index.html index a071cd40a..308756a52 100644 --- a/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_FinOptics/index.html +++ b/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_FinOptics/index.html @@ -10,13 +10,13 @@ - +

    The Course BOX Alignment Procedure (Finite Optics)

    This is the alignment procedure of the experiments with finite-corrected optics. If you are looking for the infinity-corrected setups click here.

    First experiment: Finite-corrected Microscope with Köhler Illumination

    This experiment demonstrates the essential parts of a microscope and explains the concept of conjugate planes. The key components are: Light source, Collector lens, Field Diaphragm, Aperture Diaphragm, Condenser lens, Sample, Objective lens and Eyepiece lens.

    The Aperture set of conjugate planes: Lamp filament, Aperture diaphragm, Back Focal Plane of the objective, Exit pupil of the eye.

    The Field set of conjugate planes: Field diaphragm, Sample plane, Primary Image Plane, Retina.

    Second experiment: Abbe Diffraction Experiment

    The famous Abbe Diffraction Experiments shows how diffraction of light by a specimen (and interference with the illuminating light) creates an image and how collection of diffracted light defines the resolution of the microscope. With this setup it is possible to view both sets of conjugate planes at the same time, with one's eye or a camera.

    The Aperture set of conjugate planes: Lamp filament, Aperture diaphragm, Back Focal Plane of the objective, Mirror surface in the side arm, Retina.

    The Field set of conjugate planes: Field diaphragm, Sample plane, Primary Image Plan, Retina.

    We propose to use a diffraction grating as a sample and spatial filter in the BFP.

    This tutorial will lead you step-by-step through the alignment of the Finite-corrected Microscope, Köhler Illumination and Abbe Diffraction Experiment.

    1. Start with 1×13 baseplate and all the cubes:
    • Flashlight Cube (1)
    • Collector Lens Cube (2)
    • 2× Circular Aperture Cube (3)
    • Condenser Lens Cube (4)
    • Sample Cube (5)
    • Objective Lens Cube (6)
    • Objective in Z-Stage Cube (7)
    • Eyepiece Lens Cube (8)
    • 4× Screen Cube - 1× with white paper, 3× with lens tissue (9)

    2. Start by placing the sample - we will build the microscope around it.

    3. Place the Primary Image Plane (PIP) by definition: the distance is 200 mm from sample to PIP when using the 4× objective (finite-corrected for 160 mm, 40 mm working distance). Use the Sample cube with white paper as a screen.

    1. Place the objective lens. It is a single plano-convex lens with f' = 35 mm.

    2. Use direct illumination from the flashlight with! its lens. Adjust the position of the objective lens - focus the image on PIP by moving the lens back or forth.

    • Focussing Trick: Firstly move the whole objective lens cube in one direction (away from the sample). If the image sharpness in PIP improves, slide the insert in that direction. If the image sharpness in PIP get worse, slide the insert in the opposite direction, towards the sample. Continue until you get a focussed image of your sample on the PIP.

    1. Place the eyepiece lens behind the PIP. It is a single plano-convex lens with f' = 40 mm. Exchange the PIP screen with a semitransparent screen (lens tissue). While looking through the eyepiece, focus it on the PIP. Use the Focussing Trick again. The position within the cube of the sample holder for the paper screen and for the semitransparent screen has to be identical.

    2. Take away the screen from PIP. To dim the flashlight, put a piece of lens tissue in front of it. Look through the eyepiece - you should see a sharp image of your sample.

    3. Place the Field Diaphragm (FD). The position was chosen in order to work well with the availible lenss.

    1. Place the condenser lens. It is a single plano-convex lens with f' = 40 mm.

    2. Place the PIP screen back to its position.

    3. Place the flashlight on one end of the baseplate. Close the FD.

    4. Adjust the position of the condenser lens - focus the image of the FD on PIP by moving the lens back or forth (Focussing Trick). Once you see a sharp image of the closed FD on the screen in PIP, open and close the aperture and observe its effect.

    5. Remove the screen, look throught the eyepiece and check whether you see a sharp image of the closed FD.

    6. Place the Aperture Diaphragm (AD) into the Front Focal Plane (FFP) of the condenser lens (40 mm).

    • Focal Plane Trick - In case you don't know where exactly the FFP is, use this:

    • Use the Laser Cube with Beam Expander Cube to produce a collimated beam. Place the condenser lens in the collimated beam and find focus.

    • Place the AD into the same plane - slide in within the cube. Careful - push the Aperture from one side to keep it together.

    1. Remove the lens of the flashlight. The position of the flashlight remains.

    1. Place the collector lens. It is a single plano-convex lens with f' = 50 mm.

    2. Remove the FD. Close the AD. Center the flashlight with respect to the AD. Focus the image of the LED on the AD by adjusting the position of the collector lens (Focussing Trick).

    3. Place the FD back in the illumination path. Now the Köhler illumination is properly aligned.

    4. Place a semitransparent screen into the Back Focal Plane (BFP) of the objective lens. Close the AD and check that you see a focussed image of the AD in the BFP.

    5. Remove all screens. Use a lens tissue to dim the light and look through the eyepiece. Observe the effect of opening and closing the apertures.

    • Left: both apertures open. Middle: FD closed. Right: AD closed.

    1. Exhange objective lens with 4× objective in Z-Stage. Place a screen in PIP and observe the effect of opening and closing the apertures in the PIP.

    • Top: both apertures open. Middle: AD closed. Bottom: FD closed.

    1. Remove the screen. This is an aligned finite-corrected microscope with Köhler illumination.

    1. By adding 4 more cubes and another baseplate, we will now create the Abbe Diffraction Experiment. The extra cubes are:
    • Beamsplitter Cube (1)
    • Mirror 45° Cube (2)
    • Eyepiece Lens Cube (different f' than the previously used one!) (3)
    • Relay Lens Cube (4)

    24. Remove the eyepiece (40 mm) and add the 4×2 baseplate. Exchange the 4× objective with the objective lens (35 mm).

    25. Place the Beamsplitter Cube.

    1. Place the screen into the PIP, in front of the Beamsplitter. Place a new eyepiece lens behind the Beamsplitter. It is a single plano-convex lens with f' = 100 mm. While looking through the eyepiece, focus it on the PIP (Focussing Trick).

    2. In the other arm we will observe the BFP. There are two options, depending on how you place the Mirror in the next step.
    • Option 1: Place the mirror as shown in the picture.
    • Place the eyepiece lens (40 mm).

    • Option 2: Place the mirror as shown in the picture.
    • Place the baseplate connector on the mirror cube.
    • Place the eyepiece cube (40 mm) on the connector on the mirror cube.

    1. Place the relay lens after the Beamsplitter. It is a single plano-convex lens with f' = 75 mm.

    2. Place a semitransparent screen in the BFP of the objective lens. Close AD and look through the eyepiece of the side arm. You should see a sharp image of the AD on the BFP.

    3. This is the Abbe Difraction Experiment. Through the eyepieces you can see both sets of conjugate planes at the same time.

    Participate

    If you have a cool idea, please don't hesitate to write us a line, we are happy to incorporate it in our design to make it even better.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_InfOptics/index.html b/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_InfOptics/index.html index fdd9a7826..8e2436fcc 100644 --- a/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_InfOptics/index.html +++ b/docs/Toolboxes/DiscoveryDiffraction/ALIGNMENT_InfOptics/index.html @@ -10,7 +10,7 @@ - + @@ -52,7 +52,7 @@ We use a laser pointer as a light source and expand it using two lenses. The imaging path is the same as in the microscope in the first experiment and therefore we can observe the image of our sample in the main arm and the image of the BFP in the side arm.

    1. We use a very fine fish net as a sample here. You could try a net like this one. Another idea is to try one of these plastic tea bags. Or a diffraction grating.

    2. We provide a circular aperture and a rectangular aperture to be used in the BFP. We also suggest to use an opaque dot (a small dot made with some marker or pain on a microscope slide) and a thin line object (like this tiny hex key here). The apertures block the light from the outside while the dot and line can block the center of the light path.

    3. As mentioned earlier, we keep the imaging path in both main arm and side arm. Remove the illumination part of the microscope and also the Eyepiece.

    4. Besides the apertures that we already prepared, we will need :

    • Laser Cube with laser pointer (1)
    • 1× Lens Cube with 50 mm lens (2)

    For now, place the camera in PIP.

    1. The laser is equipped with a cap that holds a lens from the RasPi camera. Make sure to put it on, otherwise you won't be able to create an expanded parallel beam.

    2. Place the Laser cube on the baseplate as shown in the picture.

    Careful! Do not hit anybody's eyes with the laser beam. Keep the laser off if you're not using it at the moment. Always point the laser away from people. Block the light if it's leaving the table you're working on.

    1. Place the lens for beam expansion behind the Laser cube as shown in the picture. It is a single plano-convex lens with f' = 50 mm. Align the lens to illuminate your Sample with a collimated beam - the diameter of the beam should be the same just after the lens cube and also far away from it. When you beam is well-collimated, the distance between the laser+lens duo and the Sample doesn't matter.

    2. You can switch the camera between the PIP and the BFP. You could also use two cameras, one in PIP and one in BFP, if you have them.

    3. Between the objective and the Beamsplitter is the Back Focal Plane of the Objective. You can see it if you put a piece of paper there - you will see the Fourier transform of the sample. You should see the same on camera in the side arm.

    4. In the PIP, you can see an image of the sample. Here we see our fish net. Align the camera to obtain a sharp image.
      ⭐ Because of the Talbot effect you can find more than one sharp image of the sample. Therefore, partially close the Field diaphragm (FD) and find the position of the camera where you not only see a sharp image of the grating (fish net) but also of the FD.

    1. In the BFP image in the side arm, you can see the Fourier transform of the grating just as it looks in the BFP itself. Align the second Relay lens to obtain a focussed image on the camera.
      ⭐ The grating is regular in both X and Y and therefore it's a very convenient sample for this experiment, because its Fourier transform is easily predictable. With a different sample the BFP will of course also look differently.

    ⭐ Back Focal Plane

    The intensity peaks in the BFP are the diffraction orders of our sample. By placing an aperture or another object here we’ll be able to modify the information transmitted through the microscope that contributes to the image. Depending on the aperture we can observe different effects.

    • Circular aperture: The circular aperture blocks the light symmetrically from outside towards the center. Close the aperture and align the laser such that the 0th order is in the center of the aperture. You can align the laser using the four screws in its holder.

    • Rectangular aperture: The rectangular aperture closes independently from both sides in X and Y direction (horizontally and vertically). Use a hex key or a similar tool to close/open the aperture doors.

    • Dot and line: Use a sample holder cube or your (presumably steady) hand to hold these two. You can block the 0th or 0th+1st orders with the dot, depending on how big it is. You can block the X-0th or Y-0th order with the line-object.

    1. This is the setup for the second experiment: Abbe Diffraction Experiment.

    ⭐ Abbe Diffraction experiment - What do we see?

    1. With no aperture in the BFP, we see the image of the Sample in PIP and the Fourier transorm of the sample in the BFP, as we just aligned it and prepared it.

    2. Firstly we use the Circular aperture. As we slowly close it and change the diameter of the transmitting area, we cut out the higher diffraction orders that carry the high frequency information, hence the fine details. In the image plane we see how these details blur and the sharp edges soften. The more orders we cut out, the blurrier the image gets.

    3. Using the Rectangular aperture, we can block the diffraction orders more selectively. When we close the aperture in the X direction to only let through the Y-0th orders, the square pattern of the image disappears, and we have only lines. This is because there is no X order that would transmit the information about the shape in the perpendicular direction.

    4. When we do the same trick in the other direction, we then see lines of the other orientation but again no square pattern.

    5. Closing the aperture in both X and Y direction, we eventually block all the higher orders that form the image of the sample. As we can see here, when only the 0th order is transmitted all image information is lost. What we see is only some background noise.

    6. On the other hand, when we block only the 0th order but keep all the others (we do this using the dot on a slide), we are still able to see the pattern is preserved, because all the orders still have a corresponding partner to interfere with on the other side from the 0th order. But now we are in a so-called dark field imaging mode. We'll explain it in the next steps.

    7. We can even block the 0th and 1st order by simply using a bigger dot in the BFP. We are still able to recognize the square pattern but the high frequency information, the noise, is taking over the image.

    8. When using the line object instead of the dot, we can block the 0th order completely in the Y direction and see what it does to the image. We still see the square pattern but suddenly, in the X direction, it seems that we have twice as many squares. This is the dark field imaging effect but in X only. We’re seeing just the edges and because there are two edges per square in one direction, it appears that we see them twice.

    9. The same works also in the perpendicular direction - blocking the 0th order in X results in the dark field imaging mode in Y.

    10. Using the rectangular aperture again and we can find out what is the minimal amount of orders that we need to form a reliable image. We said that they always interfere with the 0th order, so we don't need both sides. Therefore, we close the aperture and let through only one quarter of the orders. We can block the higher orders as well, as they only carry the high frequency information, and we are still able to see the basic pattern of our sample.

    ⭐ Watch the video of this experiment!

    UC2 YouSeeToo - Abbe Experiment Demonstration

    Notes to the video:

    • In this demonstration of the experiment, two Alvium cameras from Allied vision are used, so we can show the PIP and BFP on the screen simultaneously
      • Find the cubes for the Alvium cameras here anch choose the adjustable insert for easy alignment.
    • The optical path is different from the one described in this tutorial. This is because of the use of the above mentioned cameras
      • The objective and eyepiece are both lenses with f' = 100 mm. The magnification of the microscope is therefore equal to 1. The "magnified" image is just a zoom into the camera view.
      • Thanks to the use of a 10 mm lens as an objective, the diffraction orders in BFP are more separated and easily accessible.
      • In the side arm, the first lens has f' = 100 mm and the second lens f' = 50 mm. The image of the BFP is therefore demagnified twice, to fit better in the field of view of the camera.

    Bonus question: This magical image was taken by the RasPi camera in the BFP with the fish net as a sample. If you tell me what created this effect, I send you a chocolate ;-)

    Participate

    If you have a cool idea, please don't hesitate to write us a line, we are happy to incorporate it in our design to make it even better.

    References:

    1; 2; 3; Cat image source;
    4 Advanced Optical Imaging Workshop; Plymouth; Noah Russell, 2009©

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryDiffraction/MicroscopyCore/index.html b/docs/Toolboxes/DiscoveryDiffraction/MicroscopyCore/index.html index 18ac06c16..6f54bd90d 100644 --- a/docs/Toolboxes/DiscoveryDiffraction/MicroscopyCore/index.html +++ b/docs/Toolboxes/DiscoveryDiffraction/MicroscopyCore/index.html @@ -10,13 +10,13 @@ - +

    MicroscopyCore

    The first version of this BOX was designed for the "Principles of Light Microscopy" Course of the Light Microscopy Facility of MPI-CBG in Dresden. The alignment procedure was developed and optimised by Sebastian Bundschuh. It follows the lectures of Peter Evennett that can be found here.

    CourseBOX teaches the core principles of microscopy and basics of optical alignment. It is intended for microscopy courses for students that are rather on the side of users than designers. This BOX provides a hands-on experience with insight into the black box that microscope often seems to be. It comes with alignment tutorials and relies on basic components. By reusing the components and starting from the common ground, it shows that all the microscopy methods are based on only a few principles.

    It is not yet a comprehensive and optimised toolbox but rather a collection of modules and experiments that are frequently taught in microscopy courses. There is still space for improvement and we're hoping that more universities and institutions adopt the CourseBOX in their courses, which will lead to it's improvement and (if successful) production.

    Build the BOX

    A list of 3D-printed parts and necessary components is found in BUILD_ME, together with assembly guidelines and some printing tips and tricks.

    Setups

    What can you build with the CourseBOX?

    Compound microscope with proper Köhler illumination (finite optics)

    A finite corrected microscope with proper Köhler illumination. All conjugate planes are accessible.

    LINK for the detailed alignment procedure with image tutorial.

    Abbe Diffraction Experiment (finite optics)

    Classical experiment for explaining Fourier transform done by a lens. The illumination stays the same as in the previous experiment, but a beamsplitter and a relay lens are added, for simultaneous observation the Primary Image Plane and the Back Focal Plane.

    LINK for the detailed alignment procedure with image tutorial.

    Compound microscope with proper Köhler illumination (infinity optics)

    An infinity corrected microscope with proper Köhler illumination. All conjugate planes are accessible.

    LINK for the detailed alignment procedure with image tutorial.

    Abbe Diffraction Experiment (infinity optics)

    Classical experiment for explaining Fourier transform done by a lens. The setup of the previous experiment is reused and a laser pointer is added as a light source.

    LINK for the detailed alignment procedure with image tutorial.

    Coming soon:

    Laser Scanning Confocal Microscope

    Laser Scanning system is built on the detection side of the same setup. Scanning mirror can be rotated around one axis, which results in the translation of the point on the sample.

    Light Sheet Microscope

    The principle of Selective Plane illumination Microscopy is demonstrated with white light. The illumination path stays the same, only the collector lens is exchanged for a cylindrical one. The detection path is rotated by 90°.

    Participate

    If you have a cool idea, please don't hesitate to write us a line, we are happy to incorporate it in our design to make it even better.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryDiffraction/index.html b/docs/Toolboxes/DiscoveryDiffraction/index.html index 28719097d..81cc2072d 100644 --- a/docs/Toolboxes/DiscoveryDiffraction/index.html +++ b/docs/Toolboxes/DiscoveryDiffraction/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    Building The CourseBOX

    This is a guide for building the CourseBOX. If you were looking for another BOX version, click here.

    This guide currently describes how to build both the finite-corrected version and the infinity version of the CourseBOX.

    Content of each section

    1. Shopping
    2. 3D printing
    3. Tools
    4. Assembly

    Infinity Optics

    • Total price: 300 € without a Raspberry Pi (190 €) (assuming only material costs and printing everything yourself)
    • Printing time including preparation: 4 days
    • Assembly time: 1 day

    Shopping

    What to buy

    • Check out the RESOURCES for more information!
    Link - name of partAmountCommentPrice
    3D printing material~580 gChoose material that works with your 3D printer. If unsure, have a look at the guide in 3D printing section15 €
    Lens 50 mm5 piecesá 21 €
    Lens 100 mm3 piecesArtikel 2004á 6 €
    Lens for Eye Cube1 piece551.OALá 5 €
    Beamsplitter1 pieceArtikel 2137á 27 €
    Flashlight1 pieceLight source for microscope.á 7 €
    Laser Pointer1 pieceLight source for the Abbe experiment. With this very one you don't even need the flashlight. More information below.á 5 €
    Magnets64 piecesWhen using 3DP Cubes and baseplates. Ball magnets, diameter 5 mm.total 20 €
    Screws7 piecesM2×16 - 6 pieces; M2 nut - 6 pieces; M3×18 - 4 piecestotal ~2 €
    Screws112 piecesFor 3DP Cubes extra: (Art.-Nr. 00843 12) M3×12, galvanized steel - 64 pieces; (Art.-Nr. 00843 8) M3×8, galvanized steel - 64 pieces (or 128 pieces of M3×12) - BUT for this setup it isn't necessary to have screws on both sides ; (Art.-Nr. 025505 8) M5×8, galvanized steel - 96 pieces (half if one-sided)total ~4 €
    Raspberry Pi with accessoriesHave a look in our Bill-of-Materials for a complete list and links.190 €
    Chocolate1 barUse it as a reward when you're done.

    2 in 1 light source

    We propose the use of this Laser Pointer, because it also has a white LED. But we need to smartly adapt this for our experiments.

    • For beam expansion of the laser light, print a laser cap from the STL folder and put in it the RasPi lens that you removed from the camera
    • The LED here has a lens which cannot be removed. To be able to find a focussed image of your light source, take a thin permanent marker and make a cross on the surface of this lens. Now you'll be able to see a focussed image of the cross in the Field set of Aperture planes

    3D Printing:

    Parts

    To acquire the STL-files use the UC2-Configurator. The files themselves are in the RAW folder. The BOXes can be built using injection-moulded (IM) or 3D-printed (3DP) cubes.

    Note on the lens holders: If you use some other lens, you can generate a holder for it using our openSCAD design. Go to the Thingiverse page of this lens holder and use their in-built customizer to change the parameters of the insert.

    Completely new to 3D printing? Have a look into this beginner's guide!

    Our quick printing tutorial can be found here: UC2 YouSeeToo - How to print the base-cube?

    We have a good experience with this printer and settings:

    • Prusa i3/MK3S
      • Prusament PLA 1,75 mm, for one Box: 0,58 kg = 195 m = 90 hours = 15 €
      • Profile Optimal 0,15 mm, infill 20%, no support, 215/60°C

    Which tools to use

    ToolImageComment
    Electric screw driver with 2,5 mm hex bitFor putting the cubes together using M3×12 and M3×8 screws.
    2,5 mm hex keyFor fine adjustment of all the M3 screws if needed.
    Needle-nose PliersMight come handy

    Assembly

    Part - linkResultComment
    Baseplates16× baseplate puzzle
    Lens Cubes8× Lens Cube: 5× Lens Cube with 50 mm lens; 3× Lens Cube with 100 mm lens.
    Sample Cubes2× Sample Holder Cube
    Flashlight Cube1× Flashlight Cube
    Circular Aperture Cube2× Circular Aperture Cube
    Rectangular Aperture Cube1× Rectangular Aperture Cube
    Beamsplitter Cube1× Beam Expander Cube
    RasPi Camera Cube1× Camera Cube with Raspberry Pi camera with the lens removed
    Laser Cube1× Laser Holder Cube and Laser Clamp with a cap for RasPi lens
    Eye Cube1× Eyeball Cube

    Software

    Prepare the Raspberry Pi following our tutorial in UC2-Software-GIT!

    Done! Great job!


    Finite-corrected Optics

    • Printing time including preparation: 5 days
    • Assembly time: 1 day

    Shopping

    What to buy

    • Check out the RESOURCES for more information!
    Link - name of partAmountCommentPrice per amount used
    3D printing material~620 gChoose material that works with your 3D printer. If unsure, have a look at the guide in 3D printing section20 €
    Microscope objective 4×1 piece10 €
    Lens 35 mm1 pieceWe did the alignment with lenses of these focal lengths, but other combination are also possible. The alignment principle stays the same, but the positions of the element will be different.22 €
    Lens 40 mm2 pieces44 €
    Lens 50 mm1 piece21 €
    Lens 75 mm1 piece20 €
    Lens 100 mm1 piece20 €
    Flashlight1 pieceLight source for the projector and microscope.7 €
    Magnets128 piecesBall magnets, diameter 5 mm.30 €
    Screws~120 piecesM3×12, galvanized steel - ~90 pieces; M3×8, galvanized steel - ~90 pieces; M3×18, galvanized steel - 2 pieces; M3×30, not magnetic - 1 piece; M3 nut~15 €
    Chocolate1 barUse it as a reward when you're done.

    3D Printing:

    Completely new to 3D printing? Have a look into this beginner's guide!

    Our quick printing tutorial can be found here: UC2 YouSeeToo - How to print the base-cube?

    We have a good experience with this printer and settings:

    • Prusa i3/MK3S
      • PLA 1,75 mm, for one Box: 0,6 kg = 235 m = 85 hours = 20 €
      • Profile Optimal 0,15 mm, infill 20%, no support, 215/60°C

    Note: The design of the mechanical Z-stage has recently been changed. The files here are not yet up-to-date. Please check the Mechanical Z-stage for the latest version. Same applies to the Lens Holder available here

    Housing

    Name of part - Link to STL fileAmount
    (01) Basic Cube 2×11 piece
    (02) Basic Lid 2×11 piece
    (03) Basic Cube 1×120 pieces
    (04) Basic Lid 1×120 pieces
    (05) Baseplate 4×14 pieces
    (06) Baseplate 4×21 piece
    (07) Baseplate 1×11 piece
    (08) Baseplate Connector 1×11 piece

    Inserts

    Name of part - Link to STL fileAmountComment
    (09) Z-Stage Focusing Insert1 pieceRotate the part in your slicer before printing. Always print it laying on the flat side.
    (10) Z-Stage Objective Mount1 pieceFor mounting the objective lens (RMS thread).
    (11a) Z-Stage Bottom Plate1 pieceThe plate holds the gear and screw in position, allowing the only to rotate but not to wobble.
    (11b) Z-Stage Top Plate1 pieceThe plate holds the gear and screw in position, allowing the only to rotate but not to wobble.
    (12) Z-Stage Gear1 pieceKindly borrowed from openflexure.
    (13) Lens Holder6 piecesDiameter fits for the listed lenses (25 mm).
    (14) Lens Holder Clamp6 piecesDiameter fits for the listed lenses (25 mm).
    (15) Cylindrical Lens Holder1 pieceDiameter fits for the listed lenses (25 mm).
    (16) Generic Sample Holder5 piecesIn the SimpleBOX, it is used to hold the object in the projector setup.
    (17) Generic Sample Holder Clamp5 piecesTo fix the sample.
    (18) Mirror Holder 45° 30×30mm²1 pieceSize fits for the listed mirrors.
    (19) Flashlight Holder2 pieces
    (20) Circular Aperture Guide2 pieces
    (21) Circular Aperture Wheel2 pieces
    (22) Circular Aperture Lid2 pieces
    (23) Circular Aperture Leaf14 pieces
    (24) Laser Holder2 pieces
    (25) Laser Clamp1 piece
    (26) Beam Expander Insert1 piece
    (27) Beam Expander Lens Adapter1 piece
    (28) Beamsplitter Insert1 piece

    Which tools to use

    ToolImageComment
    Electric screw driver with 2,5 mm hex bitFor putting the cubes together using M3×12 and M3×8 screws.
    2,5 mm hex keyFor fine adjustment of all the M3 screws if needed.
    1,5 mm hex key↑↑For mounting worm screws.
    Needle-nose PliersMight come handy

    Assembly

    Part - linkResultComment
    Baseplates1× "big" baseplate (4×2), 4× "small" baseplate (4×1), 1× "unit" baseplate (1×1), 1× "unit" baseplate connector (1×1)
    Z-Stage Cube1× mechanical Z-Stage, Sample Clamp not necessary
    Lens Cubes6× Lens Cube; Write the focal lengths of the lenses on the holders, so you can always easily find the right one when building the setups.
    Cylindrical Lens Cube1× Cylindrical Lens Cube
    Sample Cubes5× Sample Holder Cube
    Mirror Cube1× Mirror Cube
    Flashlight Cube1× Flashlight Cube
    Circular Aperture Cube2× Circular Aperture Cube
    Laser Cube1× Laser Holder Cube and Laser Clamp
    Beam Expander Cube1× Beam Expander Cube
    Beamsplitter Cube1× Beam Expander Cube

    Done! Great job!

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryElectronics/04_1_seeedmicroscope/index.html b/docs/Toolboxes/DiscoveryElectronics/04_1_seeedmicroscope/index.html index 93af1c205..28fb76568 100644 --- a/docs/Toolboxes/DiscoveryElectronics/04_1_seeedmicroscope/index.html +++ b/docs/Toolboxes/DiscoveryElectronics/04_1_seeedmicroscope/index.html @@ -10,7 +10,7 @@ - + @@ -27,7 +27,7 @@ Close cube and attach it to the focus knob

  • Observe the motorized focus in action: Motorized Focus in Action

  • Adding Stage Scanning

    Stage scanning allows the automatic movement of the sample to enlarge the visible field. We use a simple servo motor to move a rubber-band-fitted sled, carrying the sample along an axis. This method provides a reliable movement path using steel rods salvaged from old CD/DVD drives.

    Simple rendering of an even simpler device

    Printing Parts

    Print the following parts using a 3D printer with PETG filament at 20% infill and 0.2mm layer height:

    Additional Components

    • Servo Motor 9G
    • M2 screws
    • UC2 ESP32-capable board (available at UC2-Shop)
    • Rubber band
    • 2x 3mm diameter, >50mm length steel rods (chrome), e.g., from an old DVD/CD drive or from online retailers.

    Assembly

    1. Assemble the base and slide components as illustrated: Assemble the Base

    2. Ensure the servo motor moves the slide smoothly to scan the sample area.

    For more information and community support, visit our GitHub Repository and Forum.

    Assembly

    Comming Soon

    Results

    Scanning of red blood cells contaminated with malaria in the field

    Stage in Action in continous mode

    Stage in Action in continous mode

    Add motorized flow-stop microscopy

    More coming soon

    Experiment 3: Timelapse of Yeast Cells

    • Capture the growth of yeast cells over time to study their behavior and development.
    • This can be set up in the Web GUI
    • Insert a FAT32 formated SD card into the Xiao and verify it gets mounted correctly (monitor the USB Serial )

    HeLa Cells

    Community and Support

    Join our community on Discord in the #tinyml channel for support and collaboration. Explore the extensive library of UC2-compatible parts to expand the capabilities of your microscope.

    For detailed tutorials and further information, visit openUC2 GitHub Repository.

    By combining the best components from the MatchboxScope and UC2 modular systems, the openUC2 XIAO Microscope offers a powerful and flexible tool for a wide range of applications, making advanced microscopy accessible to a broader audience.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryElectronics/Automation_intro/index.html b/docs/Toolboxes/DiscoveryElectronics/Automation_intro/index.html index d2c9b5936..bd0541d7e 100644 --- a/docs/Toolboxes/DiscoveryElectronics/Automation_intro/index.html +++ b/docs/Toolboxes/DiscoveryElectronics/Automation_intro/index.html @@ -10,7 +10,7 @@ - + @@ -34,7 +34,7 @@

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryElectronics/Camera Setup/index.html b/docs/Toolboxes/DiscoveryElectronics/Camera Setup/index.html index b6efc1003..7091f06da 100644 --- a/docs/Toolboxes/DiscoveryElectronics/Camera Setup/index.html +++ b/docs/Toolboxes/DiscoveryElectronics/Camera Setup/index.html @@ -10,13 +10,13 @@ - +

    openUC2 Camera Setup

    The openUC2 Camera Setup provides guidance on configuring and using webcams and Daheng cameras for imaging purposes within the openUC2 ecosystem. This setup allows users to seamlessly integrate cameras into their experimental setups and utilize them for imaging and data acquisition. Below are detailed instructions for setting up cameras on different platforms:

    Webcam

    On Windows

    • Users can utilize the built-in webcam functionality provided by Windows. They need to open the webcam using the Windows internal software and start streaming.

    On Mac

    • For Mac users, the Photobooth application can be used to access the webcam. Simply open the Photobooth application and select the camera to start capturing images or videos.

    Alternative Method

    • Users can also use the openUC2 Web Serial interface available at https://youseetoo.github.io/indexWebSerialTest.html to open the camera stream.

    Daheng Cameras

    On Windows

    • To use Daheng cameras on Windows, users should visit https://www.get-cameras.com/customerdownloads?submissionGuid=93704570-544a-43e8-83d6-f5f3cf0b97fb.
    • From the provided options, select the "Windows SDK USB2+USB3+GigE (including Directshow + Python) Galaxy V1.23.2305.9161" package.
    • Install the software and drivers from the downloaded package.
    • Once installed, users can start the "Galaxy Viewer" application to begin capturing images using the Daheng camera.

    On Android Phones

    • To use Daheng cameras on Android phones, users should first visit https://www.get-cameras.com/customerdownloads?submissionGuid=93704570-544a-43e8-83d6-f5f3cf0b97fb.
    • From the provided options, select the "Android USB3 SDK v1.2.2112.9201" package and download it.
    • After downloading, unzip the package and install the "GxViewer_GetRawImage.apk" on the Android phone (users may need to allow installation of apps from unknown sources or 3rd party apps).
    • Connect the Daheng camera to the Android phone using a USB-C to Daheng cable (adapter).
    • Open the installed app ("GxViewer_GetRawImage") and grant access to the USB connection when prompted.
    • Users can adjust camera settings by swiping left in the app and then proceed to capture images.

    Video Tutorial

    A video tutorial demonstrating the camera setup is available at https://youtu.be/PtdU5qE6BSc.

    The openUC2 Camera Setup provides users with easy-to-follow instructions for configuring and utilizing webcams and Daheng cameras on different platforms, enabling seamless integration into various imaging applications and experiments.

    XIAO Sense Camera

    Coming Soon.

    You can have a glimpse here https://github.com/openUC2/openUC2-SEEED-XIAO-Camera/

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryElectronics/XYZ_stage_mico/index.html b/docs/Toolboxes/DiscoveryElectronics/XYZ_stage_mico/index.html index b0f764f1f..d93ad119c 100644 --- a/docs/Toolboxes/DiscoveryElectronics/XYZ_stage_mico/index.html +++ b/docs/Toolboxes/DiscoveryElectronics/XYZ_stage_mico/index.html @@ -10,7 +10,7 @@ - + @@ -20,7 +20,7 @@ Fully assembled XYZ stage with high precision stepper motors, designed for seamless automation in microscopy setups.

    Key Features:

    • Mounting Flexibility: The XYZ stage can be easily mounted on top of a cube or suspended at the side, offering flexibility in integrating it into various experimental setups.

    • Interferometer and Microscopy Applications: This stage finds application in interferometers and light-sheet/fluorescence microscopes, where it plays a crucial role in precisely manipulating the sample in all directions.

    • Durable Construction: Constructed entirely from metal, the XYZ stage ensures robustness and stability during delicate experiments.

    • High Precision Stepper Motors: The stage is equipped with non-captive stepper motors, delivering exceptional precision during positioning operations.

    XYZ Stage in an Interferometer Setup Image showing two XYZ stages (one motorized and one manual stage) employed in an OCT / Michelson type interferometer.

    Setup and Integration: To assist users in setting up and integrating the XYZ stage into their experimental configurations, a comprehensive video guide is available. This instructional video can be viewed at https://www.youtube.com/embed/E_hhclFqx5g.

    For further information or inquiries regarding the openUC2 XYZ Micrometer Stage, interested parties can refer to the official openOCT project page at https://github.com/openUC2/openUC2-Hackathon-openOCTRemote. The project page contains additional details, resources, and support for utilizing the XYZ stage effectively in diverse research settings.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryElectronics/seeedmicroscope/index.html b/docs/Toolboxes/DiscoveryElectronics/seeedmicroscope/index.html index 78a3618de..51d01afaf 100644 --- a/docs/Toolboxes/DiscoveryElectronics/seeedmicroscope/index.html +++ b/docs/Toolboxes/DiscoveryElectronics/seeedmicroscope/index.html @@ -10,7 +10,7 @@ - + @@ -55,7 +55,7 @@ Seeedmicroscope_40

    Focus sample with manual focusing stage Seeedmicroscope_41

    Using an Android APP

    For Android users, plesae have a look here: https://matchboxscope.github.io/docs/APP

    This app will help you connect and capture images using this microscope.

    Conclusion

    Congratulations! You have successfully assembled your modular microscope. With this microscope, you can now observe various samples and capture images using the camera connected to your smartphone or computer. This modular design allows for easy customization and experimentation, making it a versatile tool for exploring the microscopic world.

    Remember, the performance of the microscope might be affected by the modifications made to the objective lens, so adjust your expectations accordingly. Enjoy exploring and discovering the hidden wonders of the microcosmos!

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryElectronics/spectrometer/index.html b/docs/Toolboxes/DiscoveryElectronics/spectrometer/index.html index 72b1a7684..95fee739c 100644 --- a/docs/Toolboxes/DiscoveryElectronics/spectrometer/index.html +++ b/docs/Toolboxes/DiscoveryElectronics/spectrometer/index.html @@ -10,7 +10,7 @@ - + @@ -31,7 +31,7 @@ 8. Example Plot of a spectrum (white light)

    Operating the Spectrometer

    • Calibration: Essential for accurate measurements.
    • Usage: Can be used to analyze spectra from various light sources.

    10. Example Plot of a spectrum (red LED)

    10. Example Plot of a spectrum (green LED)

    Further Reading and Resources

    For more in-depth information on spectroscopy and related subjects, refer to resources provided by Public Lab and other scientific publications.

    Public Lab: https://publiclab.org/wiki/spectrometry

    Gaudi Lab: https://www.gaudi.ch/GaudiLabs/?page_id=825

    ESPectrometer: https://matchboxscope.github.io/docs/Variants/ESPectrometer

    Youtube: https://www.youtube.com/watch?app=desktop&v=T_goVwwxKE4&ab_channel=Les%27Lab

    Software: https://github.com/leswright1977/PySpectrometer

    Contributing and Collaboration

    This open-source project welcomes contributions from everyone. Whether you're experienced in CAD design or programming,

    or just starting out, there are many ways to contribute. Check out our CONTRIBUTING guide for more details.

    Licensing and Collaboration Notes

    This project is licensed under the CERN open hardware license. We encourage users to share their modifications and improvements. All design files are available for free, but we appreciate feedback and collaboration.

    For details on the licensing, please visit License.md.

    Note: Design files were created using Autodesk Inventor 2019 (EDUCATION).

    Stay Connected

    If you find this project beneficial, please star this repository, follow us on Twitter, and cite our webpage in your work!

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryFluorescence/index.html b/docs/Toolboxes/DiscoveryFluorescence/index.html index 8ef4203a4..cd0c135cc 100644 --- a/docs/Toolboxes/DiscoveryFluorescence/index.html +++ b/docs/Toolboxes/DiscoveryFluorescence/index.html @@ -10,13 +10,13 @@ - + - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryInterferometer/InlineHolography/index.html b/docs/Toolboxes/DiscoveryInterferometer/InlineHolography/index.html index 20f1159f1..57a5bbbfe 100644 --- a/docs/Toolboxes/DiscoveryInterferometer/InlineHolography/index.html +++ b/docs/Toolboxes/DiscoveryInterferometer/InlineHolography/index.html @@ -10,7 +10,7 @@ - + @@ -26,7 +26,7 @@ Here you will finde a guide how to setup the ImSwitch Software:

    • Download the Software package from Dropbox
    • Install Anaconda (Important: When you're asked to add Anaconda to the PATH environment, say YES!)
    • Install Arduino + all drivers
    • Install the CH340 driver
    • Extract ImSwitch.zip to /User/$USER$/Documents/ImSwitch (clone or download from GitHub)
    • Extract ImSwitchConfig.zip to /User/$USER$/Documents/ImSwitchConfig (clone or download from GitHub)
    • Optional: Install Visual Studio Code + the Python plugin => setup the Visual studio code IDE for Python

    Install ImSwitch for UC2

    • Open the anaconda command (Windows + R => "CMD" => enter)
    • Type: conda create -n imswitch
    • Wait until environment is created
    • Type: conda activate imswitch
    • Type: cd /User/$USER$/Documents/ImSwitch
    • Type: pip intall -r requirements.txt
    • Type: pip intall -e ./
    • Type: imswitch

    Reconstruction

    This video will show you how to reconstruct holographic data using UC2 and ImSwitch.

    https://youtu.be/CWXx0Dw-Jro

    Things to explore:

    • Get Familiar with ImSwitch
    • Get a sparse sample e.g. plankton on coverslip would be best, or just dust/sand/cheeck cells and try to acquire some holograms

    Refocusing using ImSwitch

    Using the In-line Holography plug-in widget in ImSwitch we can refocus the sample by using a propagator in reverse from the recorded hologram in real-time.

    The In-line holography experiment can also be produced with a laser source. In this version of the In-line holography setup, we use white light as source and we use filters to have quasi-monochromatic light illuminating the sample.

    ADDITIONAL Speach-to-text

    The first experiment will be the inline holographic microscope. This is a relatively simple experiment where we can show both the temporal and especially coherence. We will create a lensless microscope where we use an LED that is filtered by a color filter and pinhole to create a quasi one of chromatic coherent light source. This is then illuminating the transparent sample that is sparse before the scattered wave is sitting the camera sensor. This is relatively simple to build with the C2 system; for this, we only need the LED holder, a gel color filter, as it sees from theaters, aluminum foil where we'll stitch in a hole in order to create a local pinhole, some space between this created light source and the sample, and then the sample that this ultimately glued onto the sensor very closely so that the pinhole virtually scales in size as the ratio between the distance of the light source to the sample and sample to the sensor. In order to build the system, we will place the here created light source on the far left; then another empty cube follows right next to it; then another empty cube follows on the right-hand side; and then we combine the sample mount and the camera into one cube so that the distance between the sample and the camera is minimized. All these cubes should be mounted on puzzle pieces on the lower end and the upper bar so that the whole system becomes stable. We will turn on the camera and also turn on the lights source. Then we go to the web app after connecting to the camera through Wi-Fi, and then we will try to see any variation in the contrast of the camera. If the contrast is not high enough because of this scattering background light, we have to cover the system with a box or with some closing so that there's no straight lights hitting the sensor. This will make a very bad result in the reconstruction. When you're lucky, you can see the sample as a kind of shadow on the sensor already. The core idea now is to reconstruct this digital hologram, where we have to carefully maximize the quality of the file image. Compression artifacts from the ESP32 camera are unavoidable and will eventually degrade the final image results. What we are going to do now is to temper in image and then back propagate the distance from the sensor to the sandal plane using a numerical transformation. What this really means is that we take the image and take every pixel and back propagated by a certain distance numerically. This is done using a fast-year transform where we first fiatransform the image so that it is in frequency space; then we multiply it with a parabolic face Factor, and then we inverse full-year transform the results to end up in real space again. This becomes a convolution of the Fresnel colonel, which essentially propagates every pixel edge of certain distance depending on the wavelength and sampling rate. We can conveniently do that in Python with the Script that is provided by the Jupiter notebook. For this, we go to the website of the ESP32, hit the capture button, and download the image onto the computer. Then we start the Jupiter notebook server by opening the command line in Windows or in Linux and enter Jupiter notebook. Then we go to the browser and open the example Jupiter notebook that will reconstruct our hologram. We will enter the path of our downloaded image file and then reconstruct the results. There are several problems which we can describe but not solve at the moment for stop inland holography, as the name already says, has the problem that the light source and the scattered wave interfere in line. That means the point source will create spherical waves that are propagating its free space and will become almost a plain wave when it's the sample. Here some parts of the wave are scattered where which means that a plane wave is altered in its face depending on the face of the microscopic example, and some portion of the wave is an altered. That means after the sample the unchecked and scattered wave are propagating to the sensor where the two amplitudes are superposing. That means they add up for stuff since our camera detector cannot record amplitudes since the object of frequency is very very high. We are averaging out over time. That means that we will record intensity values in the end. This also means that the information about the face is getting lost. When we are reconstructing the hologram, the color will differentiate whether the sample is before or behind the sensor since the face information is that anymore. This means that in the reconstruction, the so-called twin image always overlays the real image in the end. This causes an avoidable ringing artifacts in the reconstruction. There are some ways to remove it, for example by estimating the face using iterative algorithms or model-based approaches, where we take the full image acquisition process into account. Alternatively, suit also be machine learning algorithms where an algorithm estimates the background and remove these artifacts. However, here we won't use these algorithms as we just want to learn how we can reconstruct the simple.

    Some notes on the transform that we have just used here. Briefly, it is a transformation from spatial to frequency coordinates. This sounds very abstract, but for example, our ear does this all the time. When we talk, our voice generates a vibration of the air. That means different frequencies are oscillating and add up to something like noise. Our ear, in turn, has the cochlear where many nerve cells, in turn, are oscillating depending on the resonance frequency of every cell. In a way, they are unmixing the noise and modulate the different frequencies. That means that if you're singing like an A, there is the fundamental frequency and several higher and lower harmonics. And lens does something very similar but in two dimensions. You can have optical frequencies where, for example, a grating that is having stripes that represent on and off and on and off

    at a certain distance represent periodic structure. It lens when you place something in the focal plane will then flea transform this into the demodulated frequency components. When you, for example, have a periodic structure like a grating, it will produce two pieces in its Fourier transform or in its focal length on the object side. A fast Fourier transform is its equivalent in the computational science. You can take an image and then represent it in its frequency components for stock that means it tries to estimate the sum of all the different frequency components that make up the image. We use this fast Fourier transform in our code to bring it from real space to frequency space and back again. But since we start with an image without an amplitude or without the face, lack the information.

    This property creates additional artifacts since relax the information of the face when we record intensity values on our camera, we also limited to samples that I see like just for the tomt capture in the watcher. The optical resolution of our microscope is bound to the pixel size and the opening angle or the numerical aperture that is created by the illumination and the sensor size that we use to detect the image. However, it is a very nice way of demonstrating how long profil works and how we can detect images without a lens. For stop many different have used it, for example, to detect Malaria in blood. New sins the field of view is very Deutsch.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryInterferometer/Interferometer_intro/index.html b/docs/Toolboxes/DiscoveryInterferometer/Interferometer_intro/index.html index ae9f11525..c5aa02182 100644 --- a/docs/Toolboxes/DiscoveryInterferometer/Interferometer_intro/index.html +++ b/docs/Toolboxes/DiscoveryInterferometer/Interferometer_intro/index.html @@ -10,13 +10,13 @@ - +

    openUC2 Interferometer Introduction

    This is a a collection of different mini-tutorials to assemble the different optical systems using UC2. First, we will introduce the setup with a brief text. Afterwards, a little video will help you assembling the device. If you have any questions, please feel free to post a question in the Forum or in the Github Issue section.

    What will you learn?

    • What's inside the box?
    • How can we start different experiments?

    What's inside the box?

    Duration: 3

    Inside the box you will find a number of different cubes, all coming with different functionalities. Below you will find a list of all modules inside the discovery kit.

    Lasers and Beamexpanders

    Duration: 3

    Lasers and Interferometers

    Duration: 3

    Microscopes

    Duration: 3

    Polarization

    Duration: 3

    Microscope with Webcam

    Duration: 3

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryInterferometer/MachZenderInterferometer/index.html b/docs/Toolboxes/DiscoveryInterferometer/MachZenderInterferometer/index.html index ff26868ca..442c0531c 100644 --- a/docs/Toolboxes/DiscoveryInterferometer/MachZenderInterferometer/index.html +++ b/docs/Toolboxes/DiscoveryInterferometer/MachZenderInterferometer/index.html @@ -10,7 +10,7 @@ - + @@ -20,7 +20,7 @@

    Step 6: Setup and alignment

    Place the camera on the sample arm as shown. Put the screen on the other arm exit. Place the sample holder using one half of the cube at a time to not collide with the microscope objective.

    Turn the laser on and use the screen to align both beams using the screws on the reference mirror.

    Step 7: Connect and adjust in the MVS app

    Connect the camera to the computer and open the MVS app. Block the reference beam. Move the coverslide such that your sample enters the FoV (Field of View). Unblock the reference beam. Zoom into the image to distinguish the fringe pattern in the MVS camera display. Adjust the angles of the reference mirror using the screws to change the fringe pattern as shown.

    Step 7: Data processing

    Process the data. Phase unwrapping possible.

    First Tests with Modifications to the Original Setup

    Using Lei code, the need of a linear stage for the sample was identified. Adjusting the objective and tube lens enhances the interference, making it crucial to use the ImSwitch interface to see the FFT in real time and optimize. The final goal is to move the position of the first order interference to use Lei algorithm (or some Phase unwrapping algorithm) to retrieve the Phase. To achieve this, two images need to be acquired: a sample image and a background image (without a cover slide or a slide region with no specimen).

    Result of Phase Unwrapping

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryInterferometer/MichelsonInterferometer/index.html b/docs/Toolboxes/DiscoveryInterferometer/MichelsonInterferometer/index.html index bfcfc04ff..43178a05a 100644 --- a/docs/Toolboxes/DiscoveryInterferometer/MichelsonInterferometer/index.html +++ b/docs/Toolboxes/DiscoveryInterferometer/MichelsonInterferometer/index.html @@ -10,7 +10,7 @@ - + @@ -35,7 +35,7 @@

    Step 12: Adjust the camera exposure

    Adjust the exposure time of the camera. You should see a fringe pattern. Try to adjust the reference mirror screws finely to bring the center of the interference pattern to the center of the camera.

    Experimental Data

    This is the fully assembled UC2 interferometer with a green laser diode, a camera representing a scree and to digitize the inteference, a beamsplitter, a kinematic mirror and a mirror that can be translated along Z.

    If you bring the two beams on top of each other, you will be able to observe the interference pattern, which in case of one beam exactly overlaying the other will be a ring pattern. These rings are also called Newton rings and come from the fact that we interfere two divergent beams, leading to a super position of two spherical caps/waves.

    Using the ESP32 camera, we can quantify the motion of the beams and e.g. measure distances or angles.

    Conclusion

    Congratulations! You have successfully built a Michelson Interferometer using the UC2 modular microscope toolbox. This device allows you to explore the interference properties of light and perform fascinating experiments. As you move one of the arms, you will observe constructive and destructive interference patterns on the camera, demonstrating the wave-like nature of light. Have fun experimenting with different setups and learning more about the wave-particle duality of light!

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryInterferometer/SoftwareTutorial/index.html b/docs/Toolboxes/DiscoveryInterferometer/SoftwareTutorial/index.html index 8ddf1445c..9534077ec 100644 --- a/docs/Toolboxes/DiscoveryInterferometer/SoftwareTutorial/index.html +++ b/docs/Toolboxes/DiscoveryInterferometer/SoftwareTutorial/index.html @@ -10,14 +10,14 @@ - +

    HIK-Camera Software Installation

    Install MVS App for Camera Utilization

    Camera model: MV-CE060-10UC. Visit the HIKROBOTICS website and download the MVS software suitable for your computer. Below are steps exemplifying the software installation for Mac.

    Install the downloaded file.

    Open the MVS Software.

    You should see the following window.

    Connect the camera. Refresh the USB line to detect the camera.

    Select the make-link button on the detected camera.

    The following window should be displayed.

    Click on the play button in the actions bar of the camera.

    If properly connected, you should see a real-time image. Adjust the exposure if the image is overexposed.

    To adjust the exposure time, go to the Feature tree, select the Acquisition Control Category, and change the Exposure Auto option to Continuous.

    Now, a clear image with good contrast should be visible.

    To stop recording, click on the stop button in the camera's actions bar.

    To disconnect the camera, click on the break-link button next to the detected camera in the USB devices list.

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPhaseMicroscopy/DPCmicroscopy/index.html b/docs/Toolboxes/DiscoveryPhaseMicroscopy/DPCmicroscopy/index.html index 2899f2af8..9eca27967 100644 --- a/docs/Toolboxes/DiscoveryPhaseMicroscopy/DPCmicroscopy/index.html +++ b/docs/Toolboxes/DiscoveryPhaseMicroscopy/DPCmicroscopy/index.html @@ -10,7 +10,7 @@ - + @@ -19,7 +19,7 @@

    Step 3: Build the DPC setup

    Substep 1

    Build the camera module as shown. It comprises of a tube lens and a Hikrobot Camera. Adjust the screw which binds the camera to the camera base plate to get the right distance between the camera and the tube lens.

    Substep 2

    Insert the non-kinematic mirror, the microscope objective in the fixed mount and the XYZ stage accordingly.

    Substep 3

    Build the illumination module which comprises of the LED array and the condenser lens as shown.

    Substep 4

    Finally, on top of the module built in substep 2 add the illimination module.

    Step 4: Adjust the Source-sample distance

    First, adjust the distance between the LED array and the condenser lens by placing them a focal distance (f = 50 mm) apart. This assures the plane wave illumination. Then, adjust the XYZ to the central positions. Adjust the Microscope objective position so that it matches roughly the working distance.

    Step 5: Focus on the sample

    Use Imswitch to turn one of the central LEDs, place a test sample to focus on it by coarse moving the microscope objective and finely tuning the height using the XYZ stage. Once it is in focus, adjust the distance from the condenser to the sample to be the focal length (f = 50 mm). In this geometry the LED array dimensions are near the match illumination condition. Hence, some LEDs illuminate at the objective NA (NAi = NAobj).

    Note: If your sample is transparent be careful not to crash the sample with the microscope objective! For more information about this experimental setup look at: 3D differential phase-contrast microscopy with computational illumination using an LED array.

    Example of illuminating sample with one half circle illumination. We should be able to see the phase gradient using oblique illumination. In the figure we can compare a defocused and focused image of a cheek cells sample.

    Step 6: Run the ImDPC experiment!

    Once you have focused on the sample, adjust the desired FoV. Now you are set. Click Start on the DPC widget!

    Congrats! You have created a DPC microscope with OpenUC2!

    DPC Images

    Using the reconstruction algorithm we can retrieve the phase of the sample.

    First test with the OpenUC2-DPC setup:

    In the animation you can compare the contrast that we can get with brightfield illumination and the DPC reconstruction generated by the four images taken with the half circle illumination.

    Taking a series of DPC images at different focal planes. Cropped DPC image of Unknown cells (top) and Cheek cells (bottom) captured with 0.25 NA microscope objective with 10x magnification.

    Left:Cropped DPC image captured with 0.17 NA microscope objective with 4x magnification.

    Reconstruction algorithm (Waller-Lab)

    The reconstruction algorithm works with the development of the Weak Object Transfer Function (WOTF). Using the code implemented by Waller (Waller-Lab/DPC), we are able to reconstruct the absorption and phase of the samples. Here we explain each step and implementation of the code using Imswitch.

    We are going to revise each part of the code and understand it.

    Acquisition

    We need four images corresponding to each half-circle illumination pattern. With a good exposure time for the camera to reduce noise. In the figure we can see an example of the four captured DPC images.

    We can correct the images using flatfield correction. Flatfield correction consists on taking an image without the sample, then we take the image to be corrected and divided by the flatfield image. This enables us to get rid of noise like dust on the camera, for instance.

    The code

    The code consist of a Jupyter notebook and one python script.

    Python script: dpc_algorithm.py

    This script contains the core algorithm to solve the DPC problem and from the four acquired images retrieve the phase.

    import numpy as np
    from scipy.ndimage import uniform_filter
    pi = np.pi
    naxis = np.newaxis
    F = lambda x: np.fft.fft2(x)
    IF = lambda x: np.fft.ifft2(x)

    def pupilGen(fxlin, fylin, wavelength, na, na_in=0.0):
    pupil = np.array(fxlin[naxis, :]**2+fylin[:, naxis]**2 <= (na/wavelength)**2)
    if na_in != 0.0:
    pupil[fxlin[naxis, :]**2+fylin[:, naxis]**2 < (na_in/wavelength)**2] = 0.0
    return pupil

    def _genGrid(size, dx):
    xlin = np.arange(size, dtype='complex128')
    return (xlin-size//2)*dx

    class DPCSolver:
    def __init__(self, dpc_imgs, wavelength, na, na_in, pixel_size, rotation, dpc_num=4):
    self.wavelength = wavelength
    self.na = na
    self.na_in = na_in
    self.pixel_size = pixel_size
    self.dpc_num = 4
    self.rotation = rotation
    self.fxlin = np.fft.ifftshift(_genGrid(dpc_imgs.shape[-1], 1.0/dpc_imgs.shape[-1]/self.pixel_size))
    self.fylin = np.fft.ifftshift(_genGrid(dpc_imgs.shape[-2], 1.0/dpc_imgs.shape[-2]/self.pixel_size))
    self.dpc_imgs = dpc_imgs.astype('float64')
    self.normalization()
    self.pupil = pupilGen(self.fxlin, self.fylin, self.wavelength, self.na)
    self.sourceGen()
    self.WOTFGen()

    def setTikhonovRegularization(self, reg_u = 1e-6, reg_p = 1e-6):
    self.reg_u = reg_u
    self.reg_p = reg_p

    def normalization(self):
    for img in self.dpc_imgs:
    img /= uniform_filter(img, size=img.shape[0]//2)
    meanIntensity = img.mean()
    img /= meanIntensity # normalize intensity with DC term
    img -= 1.0 # subtract the DC term

    def sourceGen(self):
    self.source = []
    pupil = pupilGen(self.fxlin, self.fylin, self.wavelength, self.na, na_in=self.na_in)
    for rotIdx in range(self.dpc_num):
    self.source.append(np.zeros((self.dpc_imgs.shape[-2:])))
    rotdegree = self.rotation[rotIdx]
    if rotdegree < 180:
    self.source[-1][self.fylin[:, naxis]*np.cos(np.deg2rad(rotdegree))+1e-15>=
    self.fxlin[naxis, :]*np.sin(np.deg2rad(rotdegree))] = 1.0
    self.source[-1] *= pupil
    else:
    self.source[-1][self.fylin[:, naxis]*np.cos(np.deg2rad(rotdegree))+1e-15<
    self.fxlin[naxis, :]*np.sin(np.deg2rad(rotdegree))] = -1.0
    self.source[-1] *= pupil
    self.source[-1] += pupil
    self.source = np.asarray(self.source)

    def WOTFGen(self):
    self.Hu = []
    self.Hp = []
    for rotIdx in range(self.source.shape[0]):
    FSP_cFP = F(self.source[rotIdx]*self.pupil)*F(self.pupil).conj()
    I0 = (self.source[rotIdx]*self.pupil*self.pupil.conj()).sum()
    self.Hu.append(2.0*IF(FSP_cFP.real)/I0)
    self.Hp.append(2.0j*IF(1j*FSP_cFP.imag)/I0)
    self.Hu = np.asarray(self.Hu)
    self.Hp = np.asarray(self.Hp)

    def solve(self, xini=None, plot_verbose=False, **kwargs):
    dpc_result = []
    AHA = [(self.Hu.conj()*self.Hu).sum(axis=0)+self.reg_u, (self.Hu.conj()*self.Hp).sum(axis=0),\
    (self.Hp.conj()*self.Hu).sum(axis=0) , (self.Hp.conj()*self.Hp).sum(axis=0)+self.reg_p]
    determinant = AHA[0]*AHA[3]-AHA[1]*AHA[2]
    for frame_index in range(self.dpc_imgs.shape[0]//self.dpc_num):
    fIntensity = np.asarray([F(self.dpc_imgs[frame_index*self.dpc_num+image_index]) for image_index in range(self.dpc_num)])
    AHy = np.asarray([(self.Hu.conj()*fIntensity).sum(axis=0), (self.Hp.conj()*fIntensity).sum(axis=0)])
    absorption = IF((AHA[3]*AHy[0]-AHA[1]*AHy[1])/determinant).real
    phase = IF((AHA[0]*AHy[1]-AHA[2]*AHy[0])/determinant).real
    dpc_result.append(absorption+1.0j*phase)

    return np.asarray(dpc_result)

    Jupyer notebook: main_dpc.ipynb

    With this Jupyter notebook you can test the DPC reconstruction algorithm using your own images!

    Import Modules

    %load_ext autoreload
    %autoreload 2
    %matplotlib notebook
    import numpy as np
    import matplotlib.pyplot as plt
    from os import listdir
    from skimage import io
    from mpl_toolkits.axes_grid1 import make_axes_locatable
    from dpc_algorithm import DPCSolver

    Load DPC Measurements

    data_path  = "../sample_data/" #INSERT YOUR DATA PATH HERE
    image_list = listdir(data_path)
    image_list = [image_file for image_file in image_list if image_file.endswith(".tif")]
    image_list.sort()
    dpc_images = np.array([io.imread(data_path+image_list[image_index]) for image_index in range(len(image_list))])
    #plot first set of measured DPC measurements
    f, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(6, 6))

    for plot_index in range(4):
    plot_row = plot_index//2
    plot_col = np.mod(plot_index, 2)
    ax[plot_row, plot_col].imshow(dpc_images[plot_index], cmap="gray",\
    extent=[0, dpc_images[0].shape[-1], 0, dpc_images[0].shape[-2]])
    ax[plot_row, plot_col].axis("off")
    ax[plot_row, plot_col].set_title("DPC {:02d}".format(plot_index))
    plt.show()

    Output (example):

    Set System Parameters

    wavelength     =  0.514 #micron
    mag = 40.0
    na = 0.40 #numerical aperture
    na_in = 0.0
    pixel_size_cam = 6.5 #pixel size of camera
    dpc_num = 4 #number of DPC images captured for each absorption and phase frame
    pixel_size = pixel_size_cam/mag
    rotation = [0, 180, 90, 270] #degree

    DPC Absorption and Phase Retrieval

    Initialize DPC Solver

    dpc_solver_obj = DPCSolver(dpc_images, wavelength, na, na_in, pixel_size, rotation, dpc_num=dpc_num)

    Visualize Source Patterns

    #plot the sources
    max_na_x = max(dpc_solver_obj.fxlin.real*dpc_solver_obj.wavelength/dpc_solver_obj.na)
    min_na_x = min(dpc_solver_obj.fxlin.real*dpc_solver_obj.wavelength/dpc_solver_obj.na)
    max_na_y = max(dpc_solver_obj.fylin.real*dpc_solver_obj.wavelength/dpc_solver_obj.na)
    min_na_y = min(dpc_solver_obj.fylin.real*dpc_solver_obj.wavelength/dpc_solver_obj.na)
    f, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(6, 6))
    for plot_index, source in enumerate(list(dpc_solver_obj.source)):
    plot_row = plot_index//2
    plot_col = np.mod(plot_index, 2)
    ax[plot_row, plot_col].imshow(np.fft.fftshift(dpc_solver_obj.source[plot_index]),\
    cmap='gray', clim=(0,1), extent=[min_na_x, max_na_x, min_na_y, max_na_y])
    ax[plot_row, plot_col].axis("off")
    ax[plot_row, plot_col].set_title("DPC Source {:02d}".format(plot_index))
    ax[plot_row, plot_col].set_xlim(-1.2, 1.2)
    ax[plot_row, plot_col].set_ylim(-1.2, 1.2)
    ax[plot_row, plot_col].set_aspect(1)

    Output (example):

    Visualize Weak Object Transfer Functions

    #plot the transfer functions
    f, ax = plt.subplots(2, 4, sharex=True, sharey=True, figsize = (10, 4))
    for plot_index in range(ax.size):
    plot_row = plot_index//4
    plot_col = np.mod(plot_index, 4)
    divider = make_axes_locatable(ax[plot_row, plot_col])
    cax = divider.append_axes("right", size="5%", pad=0.05)
    if plot_row == 0:
    plot = ax[plot_row, plot_col].imshow(np.fft.fftshift(dpc_solver_obj.Hu[plot_col].real), cmap='jet',\
    extent=[min_na_x, max_na_x, min_na_y, max_na_y], clim=[-2., 2.])
    ax[plot_row, plot_col].set_title("Absorption WOTF {:02d}".format(plot_col))
    plt.colorbar(plot, cax=cax, ticks=[-2., 0, 2.])
    else:
    plot = ax[plot_row, plot_col].imshow(np.fft.fftshift(dpc_solver_obj.Hp[plot_col].imag), cmap='jet',\
    extent=[min_na_x, max_na_x, min_na_y, max_na_y], clim=[-.8, .8])
    ax[plot_row, plot_col].set_title("Phase WOTF {:02d}".format(plot_col))
    plt.colorbar(plot, cax=cax, ticks=[-.8, 0, .8])
    ax[plot_row, plot_col].set_xlim(-2.2, 2.2)
    ax[plot_row, plot_col].set_ylim(-2.2, 2.2)
    ax[plot_row, plot_col].axis("off")
    ax[plot_row, plot_col].set_aspect(1)

    Output (example):

    Solve DPC Least Squares Problem

    #parameters for Tikhonov regurlarization [absorption, phase] ((need to tune this based on SNR)
    dpc_solver_obj.setTikhonovRegularization(reg_u = 1e-1, reg_p = 5e-3)
    dpc_result = dpc_solver_obj.solve()
    _, axes  = plt.subplots(1, 2, figsize=(10, 6), sharex=True, sharey=True)
    divider = make_axes_locatable(axes[0])
    cax_1 = divider.append_axes("right", size="5%", pad=0.05)
    plot = axes[0].imshow(dpc_result[0].real, clim=[-0.15, 0.02], cmap="gray", extent=[0, dpc_result[0].shape[-1], 0, dpc_result[0].shape[-2]])
    axes[0].axis("off")
    plt.colorbar(plot, cax=cax_1, ticks=[-0.15, 0.02])
    axes[0].set_title("Absorption")
    divider = make_axes_locatable(axes[1])
    cax_2 = divider.append_axes("right", size="5%", pad=0.05)
    plot = axes[1].imshow(dpc_result[0].imag, clim=[-1.0, 3.0], cmap="gray", extent=[0, dpc_result[0].shape[-1], 0, dpc_result[0].shape[-2]])
    axes[1].axis("off")
    plt.colorbar(plot, cax=cax_2, ticks=[-1.0, 3.0])
    axes[1].set_title("Phase")

    Output (example):

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPhaseMicroscopy/index.html b/docs/Toolboxes/DiscoveryPhaseMicroscopy/index.html index 8dfd6ceac..c596d0486 100644 --- a/docs/Toolboxes/DiscoveryPhaseMicroscopy/index.html +++ b/docs/Toolboxes/DiscoveryPhaseMicroscopy/index.html @@ -10,13 +10,13 @@ - + - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Brewster_Angle_Experiment/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Brewster_Angle_Experiment/index.html index 385d22dc8..7bb048504 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Brewster_Angle_Experiment/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Brewster_Angle_Experiment/index.html @@ -10,7 +10,7 @@ - + @@ -20,7 +20,7 @@ The amount of reflected light is specified by the optical properties of the reflecting surface, such as plastic sheets, glass, or highways.
    The incident angle of the incoming electromagnetic lightwave and refractive indices of media in which light travels through them have an essential role in the polarization degree of the reflected and refracted polarized light beams.

    You can see the reflection and transmission of unpolarized light with most of the incident angle (𝜃) values below.

    What is the Brewster angle?

    When the incident ray travels from a less dense medium (n1) to a higher dense medium (n2) with a critical angle (𝜃_B), the reflected ray is perfectly s-polarized in which the orientation of the electric field vectors are perpendicular to the plane of incidence. Otherwise, the refracted beam has a 90-degree polarization angle, partially p-polarized. This critical angle is called a Brewster angle or polarization angle and is represented by 𝜃_B in the scheme below.

    Brewster angle can be easily calculated using refractive indices of traveling media of light. In our experiment, we used air (n1 = 1) as the first medium in which light comes first and reflects in this part and microscope slide glass (n2 = 1.5) as the second medium, and the light transmits through. When we calculate the Brewster angle for our experiment, it equals approximately 57 degrees, and we can find the equation below.

    Parts

    Modules for this setup

    NamePropertiesPriceLink#
    4×4 BaseplateSkeleton of the System21.79 €Base-plate1
    MODULE: Polarizer CubeIt holds the linearly polarizing filter4.31 €Linear Polarizer1
    MODULE: Kinematic Microscope Slide HolderIt is used to insert the microscope slide with Brewster angle (53 degrees)3.7 €Microscope Slide Holder1
    MODULE: Laser CubeLASER source holder17.68 €Light Source 1
    EXTRA MODULE: Screen Holder CubeIt holds the Display Screen (Not Used in Practice)1 €Screen1

    Parts to print

    Additional components

    • Check out the RESOURCES for more information!
    • 1 × Linear Polarizing Sheet 🢂
    • 67 × 5 mm Ball magnets 🢂
    • 28 x Screws DIN912 ISO 4762 - M3×12 mm 🢂
    • 3 x Screws DIN912 ISO 4762 M2×16 mm 🢂
    • Microscope Rectangular Coverslips 🢂
    • 1 × 5 mW Blue UV Laser Pointer 🢂

    Assembly

    All necessary parts to assemble a Microscope Slide Glass Holder Module be gathered in the image below:

    Results

    Brewster's angle experiment setup can be prepared easily. After printing and assembling the module parts, we try to find a critical angle or Brewster's angle as much as we can.

    In this setup, we use LASER as a light source because it is easier to observe the polarization degree changes by reflection. As a first step, LASER light beams reflect from microscope slide glass and pass through a linear polarizer. Then a piece of paper is inserted on the sample holder comb. Finally, we can observe the totally polarized reflected light.

    We should make a good alignment to find the Brewster angle as the incidence angle of the incoming light. In practice, it is hard because of using screws, we could not find the exact incidence angle of a microscope slide. Nevertheless, I could take results that are almost perfectly polarized light after reflection in almost critical angle.
    You can see the reflected laser light beam without alignment at a random incident angle above.

    In the image below, the incoming beam is reflected with almost Brewster angle, 57 degrees for microscope slide glass:

    Let's look at our almost perfectly polarized light by reflection experiment video records. The orientation direction of the linear polarizer only changes in 1st and 2nd videos, and we see that reflected light is totally polarized almost. The light is blocked, and we can see almost no light after the polarizer when the orientation of the linear polarizer is perpendicular to the polarization orientation of the reflected beam.

    New Ideas

    We are open to new ideas. Please contribute to the project freely, this place is a free country which is built by codes and machines :robot:

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Circular_Polarizer/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Circular_Polarizer/index.html index 24e42f116..3dc45d77a 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Circular_Polarizer/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Circular_Polarizer/index.html @@ -10,7 +10,7 @@ - + @@ -21,7 +21,7 @@ The incoming light passes through a linear polarizer (grey UC2 cube). In the next step, new demonstrated linearly polarized light travels to the circular polarizer module (black UC2 cube). When an observer looks from the circular polarizer side, she/he can see the circularly polarized light.

    The circular polarizer filter was taken out of 3D cinema glasses, and it was inserted into a sample holder insert. This new circular polarizer filter insert was assembled with a UC2 unit block. Ta-da! The circular polarizer cube is ready for flight.

    The effect of the propagation direction of the linear polarizer can be seen in the video below. The polarization direction of light before the circular polarizer changes with turning the wheel of the linear polarizer and changing its orientation.

    New Ideas

    Dear Visitor,
    you have an opportunity to view our experiments. If you have a new idea, just open a new issue and shine our eyes with your light.
    Greetings from UC2 Team

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Crossed_Polarizers/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Crossed_Polarizers/index.html index 17fb03b1c..45985eba4 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Crossed_Polarizers/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Crossed_Polarizers/index.html @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@

    Crossed Polarizers

    Crossed Polarizers are used to analyze the polarization of light. We use two linear polarizers, which are located perpendicular to each other. [1]

    In the experiment, the polarizing direction of the first polarizer is oriented vertically to the incoming beam, and it will allow passing only vertical electric field vectors. After the first polarizer, we have an s-polarized light wave. [2]

    The second polarizer is located horizontally to the electric field vector. It blocks the wave which passes through the first polarizer. These two polarizers should be oriented at the right angle with respect to each other. You can see the orientation of the linear filters and light polarization change during the experiment in the figure below.

    Time to build a Crossed Polarizers setup!

    Parts

    Modules for this setup

    NamePropertiesPriceLink#
    4×1 BaseplateSkeleton of the System5.47 €Base-plate1
    MODULE: Polarizer CubeIt holds the linearly polarizing filter8.62 €Linear Polarizer2
    EXTRA MODULE: Sample Holder CubeIt holds the Sample (Not Used in Practice)1.3 €Sample Holder1
    EXTRA MODULE: Screen Holder CubeIt holds the Display Screen (Not Used in Practice)1 €Screen1
    EXTRA MODULE: Flashlight Lamp CubeLight Source7.2 €Flashlight1

    Parts to print

    Additional components

    • Check out the RESOURCES for more information!
    • 1 × Linear Polarizing Sheet 🢂
    • 16 × 5 mm Ball magnets 🢂
    • 16 x Screws DIN912 ISO 4762 - M3×12 mm 🢂
    • 6 x Screws DIN912 ISO 4762 M2×16 mm 🢂
    • NOT USED 1 × flashlight 🢂

    Assembly

    If all written modules are used in the experiment, the setup will look like:

    Results

    We printed and assembled two Linear Polarizer module parts. Then, we bought the necessary components and inserted them into cubes.
    You will find the basic version of Crossed Polarization experiment without a specific sample and additional light source below. We demonstrated the experiment with a room light.

    We can observe the direct effect of the angle between two linear polarizers in the video below. The intensity of passing light through crossed polarizers changes when the direction angle of the polarization filter changes 360 degrees.

    New Ideas

    We are open to new ideas. Please contribute to the project freely, this place is a free country which is built by codes and machines :robot:

    References

    [1] Introduction to Polarized Light. (n.d.). Nikon’s MicroscopyU. Retrieved February 15, 2021, from https://www.microscopyu.com/techniques/polarized-light/introduction-to-polarized-light
    [2] Logiurato, F. (2018). Teaching Light Polarization by Putting Art and Physics Together. The Physics Teachers, 1–5. https://arxiv.org/ftp/arxiv/papers/1803/1803.09645.pdf

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Many_Microscope_Slides_Experiment/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Many_Microscope_Slides_Experiment/index.html index 676ac7d74..eaaa0d4b9 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Many_Microscope_Slides_Experiment/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Many_Microscope_Slides_Experiment/index.html @@ -10,7 +10,7 @@ - + @@ -19,7 +19,7 @@ The printed and assembled Sample Holder Comb module with nine microscope glasses:

    You will find the basic version of the experiment without a Screen adn Linear Polarizer module below.

    Images of the resulting experimental setup;
    side view (top) , top view (bottom)

    We can observe the direct effect of the rotational angle of the linear polarizer in the video below. Laser light travels to microscope slides and air gap media several times and gets close to fully polarized light. We can see this effect eith adding Linear Polarizer cube.
    The intensity of passing light through linear polarizer changes when the direction angle of the polarization filter changes 360 degrees.

    We can see the change with two videos below. They demonstrates the experiment from two different views.

    New Ideas

    Rat-Tat! We are here to hear new ideas. Please don't shy and have a heart-to-heart talk with us. 💝

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Newtons_Rings_Experiment/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Newtons_Rings_Experiment/index.html index 800d10604..6fe3e3f40 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Newtons_Rings_Experiment/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Newtons_Rings_Experiment/index.html @@ -10,13 +10,13 @@ - +

    Newton's Rings Experiment

    Soap bubbles, oil slicks, or oxidized metal surfaces can create interference patterns under the white light illumination. In Newton's fringes, light reflects as a constructively or destructively interfered when the light waves travel through two surfaces. These combinations can be created using glass-air or air-glass contacts. These two interferences generate a concentric ring pattern of rainbow colours in white light illumination. In the same way, monochromatic light creates dark-light rings.

    The simplest example can be made using two well-cleaned microscope slides as interfaces. Air film is enclosed between two slides inconsistently, and irregular-coloured fringes are generated under the daily light. When the pressure on the microscope slides changes, fringes move and change.

    In the Newton's Rings Experiment, we used a convex lens whose surface is placed on an optical plane glass, a microscope glass, from its long focal length side. These two pieces are held together with non-uniform thin air film. After light illumination through these surfaces,the air gap and random pressures on the microscope slide and plano-convex lens cause the generation of irregular coloured or single-colour fringes; Newton's Rings.

    The details of Experiment Modules

    Linear Polarizer is used in this experiment to visualize the polarization change of reflected lights from two media on the interference pattern.

    Additional module design was made for combining the microscope glass slide and plano-convex lens inside one cube insert. You can see rendered image of the Newton's Rings Lens-Slide Holder Module from Inventor.

    s

    We used laser as a light souce in the setup. During the experiment, we extended beam size of the pen laser from 2 mm to 6 mm using a regular Beam Expander Module.

    Parts

    Modules for this setup

    NamePropertiesPriceLink#
    4×4 BaseplateSkeleton of the System21.79 €Base-plate1
    MODULE: Beam Expander CubeIt expands the laser beam size13.55 €Beam Expander1
    MODULE: Beam Splitter CubeIt splits the incoming beam and recombine them29.17 €Beam Splitter Holder1
    MODULE: Newton's Rings Slide-Lens Holder CubeIt creates Newton's Rings7.54 €Lens - Slide Holder1
    MODULE: Polarizer CubeIt holds the linearly polarizing filter4.31 €Linear Polarizer1
    MODULE: Laser CubeLASER source holder17.68 €Light Source1
    EXTRA MODULE: Screen Holder CubeIt holds the Display Screen (Not Used in Practice)1 €Screen1

    Parts to print

    Additional components

    • Check out the RESOURCES for more information!
    • 1 × Linear Polarizing Sheet 🢂
    • 64 × 5 mm Ball magnets 🢂
    • 44 x Screws DIN912 ISO 4762 - M3×12 mm 🢂
    • 3 x Screws DIN912 ISO 4762 M2×16 mm 🢂
    • 1 x Beamsplitter Cube (Art. 2137) 🢂
    • 1 x Microscope Rectangular Coverslips 🢂
    • 1 x Plano-Convex Lens 🢂
    • 1 × 5 mW Blue UV Laser Pointer 🢂
    • 1 x iPhone 5 Lens f'=3mm (separated from an iPhone camera spare part) 🢂
    • 1 x Achromat Lens f' = 26,5 mm 🢂

    Assembly

    Results

    We started with building of UC2 Modules: design, print, assemble and be ready for testing. You can see our Beam Expander Cube on the 4x1 Baseplate below.

    Lens - Slide Holder Module is the key element for the generation of Newton's Rings. Necessary Module parts are shown in the image below.

    Assembled and Ready-To-Use module should look like ...

    The experimental procedure begins with

    • installation the Laser and Beam Expander Modules on the 4x4 Baseplate.

    • After checking the expansion of the laser beam width, Beam Splitter Cube is added to the setup.

    • One of the divided incoming beams is directed to the Newton's Rings Lens & Microscope Holder cube, and light reflects from the convex lens-plate glass combination through the beam splitter cube, then on observation screen.

    Demonstrating the experiment is much easier with a laser light source and results in visible fringes. The Newton's Fringes will vary in colour from inner to outer circles if a white light source is replaced with a laser source.

    Let's zoom in Newton's Fringes with more experiment images!

    The effect of the polarization angle change of the Linear Polarizer Filter can be seen in the video.

    New Ideas

    We are open to new ideas. Please contribute to the project freely, this place is a free country which is built by codes and machines :robot:

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Polarization_using_optically_active_solution/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Polarization_using_optically_active_solution/index.html index bc2141b01..e3bf7652f 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Polarization_using_optically_active_solution/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Polarization_using_optically_active_solution/index.html @@ -10,14 +10,14 @@ - +

    Polarization Experiment using Optically Active Solution

    The optical activity was discovered by Arago in the quartz in 1811. In 1847, molecular chirality was observed by scientist Louis Pasteur. He found that natural tartaric acid is optically active, and its external crystals have isomerism and chiral morphology [1].

    Two molecules with the same chemical formula, the mirror image of the other, describe molecular chirality. These come in two varieties: dextrorotatory (rotate plane-polarized light clockwise) and levorotatory (counterclockwise).

    Sucrose is a disaccharide made of glucose and fructose and dextrorotatory, which rotates the plane-polarized light to the right. A well-known example of sucrose is table sugar produced naturally in plants. Fructose is a simple ketonic sugar and levorotatory which rotates the plane-polarized light to the left. Glucose is a simple sugar that belongs to the carbohydrate family and is dextrorotatory. The molecules of fructose and glucose are mirror images of each other. Corn syrup is one of the most commonly used sugar solutions [2].

    Two simple sugar-water solutions were prepared and used in the experiment. The first solution was produced with one cup of table sugar and one cup of water. Table sugar is sucrose and dextrorotatory, turning clockwise to the right plane-polarized light. Grape sugar is dextrorotatory and glucose, and the second solution mixes grape sugar (Traubenzucker) and water components in the same amount. It rotates the incoming light polarization state to the right, clockwise direction. However, two solutions have different polarization states at the same time because of their molecular structure and demonstrate different colors inside the crossed polarizers.

    s

    In the image, Table sugar-water solution is shown in left-side, Grape sugar-water solution is in the right glass.

    Parts

    Modules for this setup

    NamePropertiesPriceLink#
    4×1 BaseplateSkeleton of the System5.47 €Base-plate1
    MODULE: Polarizer CubeIt holds the linearly polarizing filter8.62 €Linear Polarizer2
    MODULE: Active Solution ChamberIt contains sugar-water solutions.7.32 €Active Solution Chambers1
    EXTRA MODULE: Flashlight Lamp CubeLight Source7.2 €Flashlight1

    Parts to print

    Additional components

    • Check out the RESOURCES for more information!
    • 1 × Linear Polarizing Sheet 🢂
    • 24 × 5 mm Ball magnets 🢂
    • 20 x Screws DIN912 ISO 4762 - M3×12 mm 🢂
    • 6 x Screws DIN912 ISO 4762 M2×16 mm 🢂
    • 2 x metal plates
    • Microscope Rectangular Coverslips 🢂
    • NOT USED 1 × flashlight 🢂

    Assembly

    Results

    We printed and assembled two Linear Polarizer and Active Solution Chamber module parts. Then, we bought the necessary components and inserted them into cubes.
    You can see the datils of the Active Solution Chamber designs.

    Two different chamber design is shown in the image below. Left-side chamber has a container only for 1 active solution. In the other one, two different mixtures can be observed in the same time.

    You can find the basic version of Polarization Using Optically Active Solution experiment with an additional flashlight source below. Depends on the experiment place conditions, you can add an extra light source.

    The chamber module was inserted between two linear polarizers, the Crossed Polarizers. We can observe the direct effect of the angle between two linear polarizers in the video below. The intensity of passing light through crossed polarizers changes when the direction angle of the polarization filter changes 360 degrees.

    Experimental result of two optically active solutions is shown in the video:

    • Left Is Grape Sugar-water Solution (Glucose) Table,
    • Right Is Sugar-water Solution (Sucrose

    References

    [1] Gal, J. (2017). Pasteur and the art of chirality. Nature Chemistry, 9(7), 604–605. https://doi.org/10.1038/nchem.2790

    [2] Logiurato, F. (2018). Teaching Light Polarization by Putting Art and Physics Together. The Physics Teachers, 1–5. https://arxiv.org/ftp/arxiv/papers/1803/1803.09645.pdf

    New Ideas

    We are open to new ideas. Please contribute the project freely, this place is a free country which is built by codes and machines :robot:

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Stress_Birefringence/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Stress_Birefringence/index.html index 96e62fbd9..29a638040 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Stress_Birefringence/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Stress_Birefringence/index.html @@ -10,7 +10,7 @@ - + @@ -19,7 +19,7 @@ Birefringence is the optical property of a material that has a dependent refractive index to polarization and propagation direction of light. These optically anisotropic materials are said to be birefringent.

    In general, birefringence is the double refraction of light. Each incoming light through the birefringent material such as calcite crystal is split by polarization into two rays; an ordinary and an extraordinary ray. These rays have different paths and polarizations.

    Common birefringent materials are;

    * best characterized birefringent materials are crystals
    * many plastics under mechanical stress such as cellophane or plastic boxes
    * many biological materials such as collagen, found in cartilage, tendon, bone, corneas, and several other areas in the body or some proteins.
    Polarized light microscopy is commonly used in biological tissue.

    Birefringence is used in many optical and medical devices. In medical applications, it can be used for the measurement of the optical nerve thickness or the diagnosis of glaucoma.

    Well then, what is the connection with polarization?

    Let's think. You ordered a new T-shirt from Amazon. You tried it and liked it. How beautiful! But wait. You can use a plastic cargo package for a polarization experiment and demonstrate the stress birefringence of a plastic sheet easily. Yesss, you can make science using 'garbage' too.
    Let's look at that more closely!

    Polarizers are frequently used to detect stress in plastics; birefringence. In this experiment, we use basic materials from our daily life as samples and see birefringence with our naked eyes. Let's collect simple objects such as plastic boxes, plastic cutlery (Image 1) or plastic packages. Even we can prepare our own birefringent object (Image 2) using a plastic punched pocket and sticky tape.

    Image 1 :

    Image 2:

    Stress Birefringence

    Stress birefringence results with stressed or deformation of isotropic materials and applying stressed causes a loss of physical isotropy and generates birefringence.

    How can stress be applied? Externally or as another method can be used. A birefringent plastic object is manufactured using injection molding and is cooled.

    Parts

    Modules for this setup

    NamePropertiesPriceLink#
    4×1 BaseplateSkeleton of the System5.47 €Base-plate1
    MODULE: Polarizer CubeIt holds the linearly polarizing filter8.62 €Linear Polarizer2
    MODULE: Sample Holder CubeIt holds the Birefringent Samples3.47 €Sample Holder1
    EXTRA MODULE: Screen Holder CubeIt holds the Display Screen (Not Used in Practice)1 €Screen1
    EXTRA MODULE: Flashlight Lamp CubeLight Source7.2 €Flashlight1

    Parts to print

    Additional components

    • Check out the RESOURCES for more information!
    • 1 × Linear Polarizing Sheet 🢂
    • 16 × 5 mm Ball magnets 🢂
    • 24 x Screws DIN912 ISO 4762 - M3×12 mm 🢂
    • 6 x Screws DIN912 ISO 4762 M2×16 mm 🢂
    • Birefringent Materials or Samples
    • NOT USED 1 × flashlight 🢂

    Assembly

    If all written modules are used in the experiment, the setup will look like:

    Results

    You will find the basic version of the Stress Birefringence experiment without an extra light source and sample holder below.
    A sample is placed into the Crossed Polarizers in the setup, and color patterns can be observed clearly. The polarization of a light ray is rotated after passing through a birefringent material and the amount of rotation is dependent on wavelength.

    The printed cube parts were assembled and the result images of experiences were taken for 3 different birefringent materials.

    In the 1st Experiment, we prepared our sample using a plastic punched pocket and randomly applied sticky tape on it.

    In the 2nd Experiment, we used a plastic piece as a sample for the setup.

    In the 3rd Experiment, plastic cutlery was used.

    New Ideas

    Rat-Tat! We are here to hear new ideas. Please don't shy and have a heart-to-heart talk with us. 💝

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Three_Polarizers/index.html b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Three_Polarizers/index.html index 8a6629e25..cd70ac29b 100644 --- a/docs/Toolboxes/DiscoveryPolarization/APP_POL_Three_Polarizers/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/APP_POL_Three_Polarizers/index.html @@ -10,7 +10,7 @@ - + @@ -20,7 +20,7 @@ Some light will pass through three polarizers if we add a third polarizer between these two crossed polarizers.

    In the Three Linear Polarizers setup, the amount of light passing through the polarizers can be calculated by the Law of Malus, cosine-squared law.

    I : the intensity of passing light through polarizers (total amount of passing light into three polarizers setup)
    I(0) : the intensity of incoming light
    θ : the angle between the transmission axes of two polarizers

    The polarization direction of the first polarizer is oriented vertically to the incoming beam at 0 degrees. Incoming unpolarized light passes through the first polarizer (linear s-polarized). After the first polarizer, the vertically polarized light travels to the second linear polarizer, which is rotated by 45 degrees to the first polarizer. Then the traveling light passes through the third polarizer (linear p-polarized), oriented at 90 degrees tilted for the first polarizer. Due to the orientation angle of each linear polarizer, transmitted light intensity changes based on the Law of Malus.

    Three linear polarizers are used in the experiment, and each of them has different angles concerning the transmission axis.

    Parts

    Modules for this setup

    NamePropertiesPriceLink#
    4×1 BaseplateSkeleton of the System5.47 €Base-plate1
    MODULE: Polarizer CubeIt holds the linearly polarizing filter13.43 €Linear Polarizer3
    EXTRA MODULE: Sample Holder CubeIt holds the Sample (Not Used in Practice)1.3 €Sample Holder1
    EXTRA MODULE: Screen Holder CubeIt holds the Display Screen (Not Used in Practice)1 €Screen1
    EXTRA MODULE: Flashlight Lamp CubeLight Source7.2 €Flashlight1

    Parts to print

    Additional components

    • Check out the RESOURCES for more information!
    • 1 × Linear Polarizing Sheet 🢂
    • 16 × 5 mm Ball magnets 🢂
    • 24 x Screws DIN912 ISO 4762 - M3×12 mm 🢂
    • 9 x Screws DIN912 ISO 4762 M2×16 mm 🢂
    • NOT USED 1 × flashlight 🢂

    If all written modules are used in the experiment, the setup will look like:

    Assembly

    Results

    The basic version of the Three Polarizers experiment without a specific sample and extra light source below is demonstrated. You can see the experiment images below.

    The effect of the angle between two linear polarizers can be seen in the video below. The intensity of passing light on the eye of the observer through the polarizers changes when the wheel insert of polarization filter wheeled 45 angles.

    New Ideas

    We are open to new idea source (dad joke about the open-source project 😐 ). Just open a new issue and spread your idea!

    - + \ No newline at end of file diff --git a/docs/Toolboxes/DiscoveryPolarization/index.html b/docs/Toolboxes/DiscoveryPolarization/index.html index 6d4fbbd5b..7889fca42 100644 --- a/docs/Toolboxes/DiscoveryPolarization/index.html +++ b/docs/Toolboxes/DiscoveryPolarization/index.html @@ -10,13 +10,13 @@ - + - + \ No newline at end of file diff --git a/docs/Toolboxes/index.html b/docs/Toolboxes/index.html index efcab9d09..06e5cddf6 100644 --- a/docs/Toolboxes/index.html +++ b/docs/Toolboxes/index.html @@ -10,13 +10,13 @@ - +

    Educational Kits

    CoreBox: Entry-Level Education Box

     - Features and Specifications
    - Assembling the CoreBox
    - Core Lens, Telescope, and Microscope

    Discovery Kit: Extension of CoreBox

     - Adding Modules to the Discovery Kit
    - Enhanced Functionality

    Interferometer Kit

     - You can build a Michelson Interferometer
    - Try enhancing it to become a MAch Zhender Microscope
    - Ultimatively test the microscope extension and reconstruct images using holography
    -
    - + \ No newline at end of file diff --git a/docs/WORKSHOPS/Workshop Nigeria/index.html b/docs/WORKSHOPS/Workshop Nigeria/index.html index 11604a034..7902a71f1 100644 --- a/docs/WORKSHOPS/Workshop Nigeria/index.html +++ b/docs/WORKSHOPS/Workshop Nigeria/index.html @@ -10,13 +10,13 @@ - +

    UC2 Microscopy Building Workshop at BioRTC Yobe University, Nigeria

    Welcome to the UC2 Microscopy Workshop! 📷🔬

    If you've ever been curious about the fascinating world of microscopy, you're in the right place! In this workshop, we will take you on a journey through the core concepts of microscopy, starting with lenses and interferometry, where you'll learn how different waves superpose to create powerful imaging techniques.

    Our approach centers around the open-source modular toolbox, UC2. This revolutionary system is built on the idea that every optical, mechanical, or electrical component can be mounted inside a compact 50mm cube. With a wide variety of components already available in our extensive library, you'll have the flexibility to design and build your own optical setups, limited only by your creativity.

    We'll kick off the workshop with the fully lensless microscope, utilizing just an LED, spatial filter, sample, and camera sensor. As we progress, you'll upgrade to a finite corrected objective lens, improving the resolution and focusing capabilities on the camera chip. We'll explore different microscopy techniques, including directional microscopy and light sheet microscopy, where the alignment of light enhances optical resolution along the axis.

    The heart of our workshop is the UC2-produced microscope, aptly named "sub." Although basic, it is the perfect tool to grasp the fundamental concepts of microscopy. From there, the possibilities are limitless as you delve into designing and printing specific inserts to adapt the system for your experiments.

    The UC2 system was born out of a quest for a small, affordable microscope for lifestyle microscopy imaging inside an incubator. As it evolved, we expanded its modularity, adding different contrast mechanisms and extensions like fluorescence and more. The success of this open-source initiative has been demonstrated through various publications, showcasing its applications in structured illumination microscopy, focal microscopene, and beyond.

    Our mission is to bridge the gap between education and real-world applications, providing a platform where anyone, regardless of experience, can get creative with optics. We strive to make microscopy accessible and affordable for all, and we are excited to announce the birth of our company, now headquartered in Gina, as we embark on a journey to revolutionize microscopy.

    So, if you're ready to dive into the world of microscopy, join us in this workshop as we build and enhance simple microscopes, bring them to life with software and image processing, and unlock the incredible potential of UC2 and open-source hardware.

    Let's embark on this adventure together! Happy exploring! 🚀✨

    Inline Holographic Microscope:

    Simple SEEED ESP32S3 Xiao Sense-based microscope:

    Michelson Interferometer:

    Light-sheet microscope:

    - + \ No newline at end of file diff --git a/docs/WORKSHOPS/index.html b/docs/WORKSHOPS/index.html index 8ac5a1ac7..1a7df2a19 100644 --- a/docs/WORKSHOPS/index.html +++ b/docs/WORKSHOPS/index.html @@ -10,13 +10,13 @@ - +

    openUC2 Workshops

    From time to time we try to share our knowledge in various formats. If you want to learn more how you can have an openUC2 workshop near you, please contact us! We would be happy to introduce you into the world of open optics.

    - + \ No newline at end of file diff --git a/docs/intro/index.html b/docs/intro/index.html index 8a977e3c2..a46f23fea 100644 --- a/docs/intro/index.html +++ b/docs/intro/index.html @@ -10,13 +10,13 @@ - +

    openUC2 Documentation

    Here you can find all information to enhance, repair, improve, use, communicate,.... our optical toolbox openUC2. Did not find what you were looking for? No problem. Send us a mail or write an issue in our github repository https://github.com/openUC2/UC2-GIT/issues.

    Looking for...

    The CompanyFlash FirmwareImSwitch Web GUI
    TopicDescription
    Geometrical Optics [EN, DE]
    Wave Optics [EN]
    Microsocpy [EN, DE]
    Telescope [EN, DE]
    Electronics (BOX) [EN]
    Electronics (PCB) [EN]
    ImSwitch [EN]
    Light-Sheet Microscopy [EN]
    STORM Microscopy [EN]
    openUC2 FiveD [EN]
    Workshops [EN]]
    Production [EN]
    - + \ No newline at end of file diff --git a/index.html b/index.html index e01b19f2c..42bedf3b9 100644 --- a/index.html +++ b/index.html @@ -10,13 +10,13 @@ - +

    openUC2 Documentation

    Seeing is believing. But better with the docs!

    Learning Kits (Explorer/Discovery)

    Learning Kits (Explorer/Discovery)

    Step by step guides to learn everything about optics.

    Cutting the Edge! (Investigator)

    Cutting the Edge! (Investigator)

    Get the most of your ready-to-use microscopes.

    Anything else.

    Anything else.

    Anything that is yet missing.

    - + \ No newline at end of file diff --git a/markdown-page/index.html b/markdown-page/index.html index 8733db53b..add2f1527 100644 --- a/markdown-page/index.html +++ b/markdown-page/index.html @@ -10,13 +10,13 @@ - +

    Markdown page example

    You don't need React to write simple standalone pages.

    - + \ No newline at end of file diff --git a/search/index.html b/search/index.html index 5253cfca2..0c8a2b6e3 100644 --- a/search/index.html +++ b/search/index.html @@ -10,13 +10,13 @@ - +

    Search the documentation

    - + \ No newline at end of file