vault backup: 2024-10-14 10:05:08
This commit is contained in:
commit
e0c10ab711
14 changed files with 471 additions and 265 deletions
110
.obsidian/workspace.json
vendored
110
.obsidian/workspace.json
vendored
|
@ -1,19 +1,19 @@
|
||||||
{
|
{
|
||||||
"main": {
|
"main": {
|
||||||
"id": "457b1626aad25933",
|
"id": "9c5b007ab74924bc",
|
||||||
"type": "split",
|
"type": "split",
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"id": "1f52c0279712e3d2",
|
"id": "ee1680277f6a0d97",
|
||||||
"type": "tabs",
|
"type": "tabs",
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"id": "e23f9dfc04a75f51",
|
"id": "8d8de4cd4c80f0f8",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "markdown",
|
"type": "markdown",
|
||||||
"state": {
|
"state": {
|
||||||
"file": "Autonomous Networking/notes/4 WSN pt. 2.md",
|
"file": "Foundation of data science/notes/1 CV Basics.md",
|
||||||
"mode": "source",
|
"mode": "source",
|
||||||
"source": false
|
"source": false
|
||||||
}
|
}
|
||||||
|
@ -25,15 +25,15 @@
|
||||||
"direction": "vertical"
|
"direction": "vertical"
|
||||||
},
|
},
|
||||||
"left": {
|
"left": {
|
||||||
"id": "6a3cb9001ef6ba4d",
|
"id": "e2078ffa3de56c07",
|
||||||
"type": "split",
|
"type": "split",
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"id": "c0a60ed96ba06609",
|
"id": "d86cb8d8115f9e4b",
|
||||||
"type": "tabs",
|
"type": "tabs",
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"id": "5d5551c2fd0314c8",
|
"id": "2b2245f56092006e",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "file-explorer",
|
"type": "file-explorer",
|
||||||
|
@ -43,7 +43,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "a4bcc1c786569338",
|
"id": "954699747dc12b5e",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "search",
|
"type": "search",
|
||||||
|
@ -58,7 +58,7 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "2dfc44e60fc51bbe",
|
"id": "71e92c2ed6f6f21c",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "bookmarks",
|
"type": "bookmarks",
|
||||||
|
@ -72,20 +72,20 @@
|
||||||
"width": 300
|
"width": 300
|
||||||
},
|
},
|
||||||
"right": {
|
"right": {
|
||||||
"id": "11560c155f3d8f6e",
|
"id": "bc4b945ded1926e3",
|
||||||
"type": "split",
|
"type": "split",
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"id": "95208597e1d680ae",
|
"id": "00a3201508c9b6f7",
|
||||||
"type": "tabs",
|
"type": "tabs",
|
||||||
"children": [
|
"children": [
|
||||||
{
|
{
|
||||||
"id": "3c35a40edfa1f381",
|
"id": "34cc5dc90419b254",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "backlink",
|
"type": "backlink",
|
||||||
"state": {
|
"state": {
|
||||||
"file": "Autonomous Networking/notes/4 WSN pt. 2.md",
|
"file": "Foundation of data science/notes/1 CV Basics.md",
|
||||||
"collapseAll": false,
|
"collapseAll": false,
|
||||||
"extraContext": false,
|
"extraContext": false,
|
||||||
"sortOrder": "alphabetical",
|
"sortOrder": "alphabetical",
|
||||||
|
@ -97,19 +97,19 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "926b8cdb0ccf5242",
|
"id": "f4a0915b879a43cd",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "outgoing-link",
|
"type": "outgoing-link",
|
||||||
"state": {
|
"state": {
|
||||||
"file": "Autonomous Networking/notes/4 WSN pt. 2.md",
|
"file": "Foundation of data science/notes/1 CV Basics.md",
|
||||||
"linksCollapsed": false,
|
"linksCollapsed": false,
|
||||||
"unlinkedCollapsed": true
|
"unlinkedCollapsed": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "9fbde0b1f6b76a5d",
|
"id": "c12ba700d0604b95",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "tag",
|
"type": "tag",
|
||||||
|
@ -120,17 +120,17 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "7745bbb89c3a5344",
|
"id": "77997770a5699d72",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "outline",
|
"type": "outline",
|
||||||
"state": {
|
"state": {
|
||||||
"file": "Autonomous Networking/notes/4 WSN pt. 2.md"
|
"file": "Foundation of data science/notes/1 CV Basics.md"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "b5d8a3515919e28a",
|
"id": "0d5325c0f9289cea",
|
||||||
"type": "leaf",
|
"type": "leaf",
|
||||||
"state": {
|
"state": {
|
||||||
"type": "git-view",
|
"type": "git-view",
|
||||||
|
@ -142,60 +142,62 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"direction": "horizontal",
|
"direction": "horizontal",
|
||||||
"width": 300
|
"width": 233.5
|
||||||
},
|
},
|
||||||
"left-ribbon": {
|
"left-ribbon": {
|
||||||
"hiddenItems": {
|
"hiddenItems": {
|
||||||
|
"switcher:Apri selezione rapida": false,
|
||||||
|
"graph:Apri vista grafo": false,
|
||||||
|
"canvas:Crea nuova lavagna": false,
|
||||||
|
"daily-notes:Apri nota del giorno": false,
|
||||||
|
"templates:Inserisci modello": false,
|
||||||
|
"command-palette:Apri riquadro comandi": false,
|
||||||
"obsidian-ocr:Search OCR": false,
|
"obsidian-ocr:Search OCR": false,
|
||||||
"switcher:Open quick switcher": false,
|
|
||||||
"graph:Open graph view": false,
|
|
||||||
"canvas:Create new canvas": false,
|
|
||||||
"daily-notes:Open today's daily note": false,
|
|
||||||
"templates:Insert template": false,
|
|
||||||
"command-palette:Open command palette": false,
|
|
||||||
"pdf-plus:PDF++: Toggle auto-copy": false,
|
"pdf-plus:PDF++: Toggle auto-copy": false,
|
||||||
"pdf-plus:PDF++: Toggle auto-focus": false,
|
"pdf-plus:PDF++: Toggle auto-focus": false,
|
||||||
"pdf-plus:PDF++: Toggle auto-paste": false,
|
"pdf-plus:PDF++: Toggle auto-paste": false,
|
||||||
"obsidian-git:Open Git source control": false
|
"obsidian-git:Open Git source control": false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
<<<<<<< HEAD
|
||||||
"active": "e23f9dfc04a75f51",
|
"active": "e23f9dfc04a75f51",
|
||||||
|
=======
|
||||||
|
"active": "0d5325c0f9289cea",
|
||||||
|
>>>>>>> origin/main
|
||||||
"lastOpenFiles": [
|
"lastOpenFiles": [
|
||||||
|
"Foundation of data science/slides/IP CV Basics.pdf",
|
||||||
|
"Foundation of data science/slides/notes 2.md",
|
||||||
|
"Foundation of data science/slides/Untitled.md",
|
||||||
|
"Foundation of data science/notes/1 CV Basics.md",
|
||||||
|
"Biometric Systems/notes/2. Performance indexes.md",
|
||||||
|
"Foundation of data science/notes",
|
||||||
|
"Biometric Systems/slides/LEZIONE2_Indici_di_prestazione.pdf",
|
||||||
|
"Biometric Systems/notes/1. Introduction.md",
|
||||||
|
"Autonomous Networking/notes/4 WSN Routing.md",
|
||||||
"Autonomous Networking/slides/4 WSN2.pdf",
|
"Autonomous Networking/slides/4 WSN2.pdf",
|
||||||
"Untitled.md",
|
"Autonomous Networking/images/Pasted image 20241012174130.png",
|
||||||
"Autonomous Networking/notes/4 WSN pt. 2.md",
|
"Autonomous Networking/notes/3 WSN MAC.md",
|
||||||
|
"Autonomous Networking/images/Pasted image 20241012182403.png",
|
||||||
|
"Autonomous Networking/images/Pasted image 20241012175224.png",
|
||||||
|
"Autonomous Networking/slides/3 WSN.pdf",
|
||||||
|
"conflict-files-obsidian-git.md",
|
||||||
|
"Autonomous Networking/slides/2 RFID.pdf",
|
||||||
|
"Foundation of data science/slides/FDS_intro_new.pdf",
|
||||||
|
"Foundation of data science/slides",
|
||||||
|
"Foundation of data science",
|
||||||
|
"Biometric Systems/images/Pasted image 20241002181936.png",
|
||||||
|
"Biometric Systems/images/Pasted image 20241002181932.png",
|
||||||
|
"Biometric Systems/images/Pasted image 20241002135922.png",
|
||||||
|
"BUCA/Queues.md",
|
||||||
|
"BUCA",
|
||||||
"Autonomous Networking/images/Pasted image 20241011191033.png",
|
"Autonomous Networking/images/Pasted image 20241011191033.png",
|
||||||
"Autonomous Networking/images/Pasted image 20241011182343.png",
|
"Autonomous Networking/images/Pasted image 20241011182343.png",
|
||||||
"Autonomous Networking/images/Pasted image 20241011182036.png",
|
"Autonomous Networking/images/Pasted image 20241011182036.png",
|
||||||
"Autonomous Networking/images/Pasted image 20241011182031.png",
|
"Autonomous Networking/images/Pasted image 20241011182031.png",
|
||||||
"Autonomous Networking/images/Pasted image 20241011180413.png",
|
|
||||||
"Autonomous Networking/images/Pasted image 20241011175026.png",
|
|
||||||
"Autonomous Networking/notes/3 WSN.md",
|
|
||||||
"Autonomous Networking/slides/3 WSN.pdf",
|
|
||||||
"Autonomous Networking/notes/2 RFID.md",
|
"Autonomous Networking/notes/2 RFID.md",
|
||||||
"BUCA/Queues.md",
|
|
||||||
"BUCA",
|
|
||||||
"Biometric Systems/slides/LEZIONE2_Indici_di_prestazione.pdf",
|
|
||||||
"Biometric Systems/final notes/2. Performance indexes.md",
|
|
||||||
"Autonomous Networking/images/Pasted image 20241002235837.png",
|
|
||||||
"().md",
|
|
||||||
"a.md",
|
|
||||||
"[[[LEZIONE2_Indici_di_prestazione.pdf.md",
|
|
||||||
"[LEZIONE2_Indici_di_prestazione.pdf.md",
|
|
||||||
"Autonomous Networking/images/Pasted image 20241002235555.png",
|
|
||||||
"Autonomous Networking/images/Pasted image 20241002235259.png",
|
|
||||||
"Autonomous Networking/images/Pasted image 20241002235016.png",
|
|
||||||
"Biometric Systems/final notes/1. Introduction.md",
|
|
||||||
"Foundation of data science/slides/Untitled.md",
|
|
||||||
"conflict-files-obsidian-git.md",
|
|
||||||
"Autonomous Networking/slides/2 RFID.pdf",
|
|
||||||
"Autonomous Networking/slides",
|
|
||||||
"Autonomous Networking/notes",
|
|
||||||
"Autonomous Networking/images",
|
"Autonomous Networking/images",
|
||||||
"Autonomous Networking",
|
|
||||||
"Foundation of data science/slides/notes 2.md",
|
|
||||||
"Foundation of data science/slides/FDS_intro_new.pdf",
|
|
||||||
"Biometric Systems/slides/lezione1 notes.md",
|
"Biometric Systems/slides/lezione1 notes.md",
|
||||||
"Untitled.canvas"
|
"prova per obsidian.md",
|
||||||
|
"Senza nome.canvas"
|
||||||
]
|
]
|
||||||
}
|
}
|
BIN
Autonomous Networking/images/Pasted image 20241012174130.png
Normal file
BIN
Autonomous Networking/images/Pasted image 20241012174130.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 30 KiB |
BIN
Autonomous Networking/images/Pasted image 20241012175224.png
Normal file
BIN
Autonomous Networking/images/Pasted image 20241012175224.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 71 KiB |
BIN
Autonomous Networking/images/Pasted image 20241012182403.png
Normal file
BIN
Autonomous Networking/images/Pasted image 20241012182403.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 39 KiB |
|
@ -115,12 +115,12 @@ To address these issues, we use MAC protocols. We need a protocol suitable for w
|
||||||
- increases energy efficiency, aviding idle listening
|
- increases energy efficiency, aviding idle listening
|
||||||
- allows scalability, lower latency, fairness and better throughput
|
- allows scalability, lower latency, fairness and better throughput
|
||||||
|
|
||||||
##### Reasons of energy waste:
|
### Reasons of energy waste
|
||||||
- collisions
|
- Collision: they need to be discarded and retransmitted
|
||||||
- overhearing: a node receive packets destined to others
|
- Overhearing: node receiving packet destined to other nodes
|
||||||
- overhead caused by control-packets
|
- Overhead caused by control-packets
|
||||||
- idle listening
|
- Idle listening
|
||||||
- overemitting: transmitting before the destination node is ready
|
- Overemitting: destination not ready
|
||||||
|
|
||||||
#### Techniques for WSN MAC
|
#### Techniques for WSN MAC
|
||||||
- Contention based
|
- Contention based
|
||||||
|
@ -191,3 +191,64 @@ However, collisions are still possible as RTS packets can collide. But RTS packe
|
||||||
- all the stations receiving the CTS need to adjust their NAV
|
- all the stations receiving the CTS need to adjust their NAV
|
||||||
- sender sends data after SIFS
|
- sender sends data after SIFS
|
||||||
- receiver sends ACK after SIFS
|
- receiver sends ACK after SIFS
|
||||||
|
|
||||||
|
### Communication patterns
|
||||||
|
- **Broadcast**
|
||||||
|
- a broadcast pattern is generally used by a base station (sink) to transmit information to all sensor nodes of the network
|
||||||
|
- **Convergecast or data gathering (all/many to 1)**
|
||||||
|
- all or a group of sensors comunicate to the sink
|
||||||
|
- typically used to collect sensed data
|
||||||
|
|
||||||
|
## S-MAC protocol
|
||||||
|
S-MAC: Sleep MAC
|
||||||
|
|
||||||
|
As idle listening consumes significant energy (50-100% of the energy required for receiving), the solution is to periodic listen and sleep, with a listen duty cycle of about 10% (e.g. listen 200ms and sleep 2s).
|
||||||
|
|
||||||
|
![[Pasted image 20241011175026.png]]
|
||||||
|
|
||||||
|
- Duration of sleep and listen cycles are the same for all nodes
|
||||||
|
- All nodes are free to choose their own listen/sleep schedules
|
||||||
|
- to reduce control overhead, **neighbor nodes are syncronized together**
|
||||||
|
- neighboring nodes form **virtual clusters** so as to set up a common sleep schedule
|
||||||
|
- each node maintaines a table with neighbors’ schedule
|
||||||
|
- table entries are filled when the node receives sync packets
|
||||||
|
- SYNC packets are exchanged periodically to maintain schedule synchronization
|
||||||
|
- they are sent every SINCHRONYZATION PERIOD
|
||||||
|
- Receivers will adjust their timer counters immediately after they receive the SYNC packet
|
||||||
|
|
||||||
|
- If there are no neighbors, the node will chose a random schedule. These nodes will be called **synchronizers**, nodes who receive a schedule are called **followers**.
|
||||||
|
|
||||||
|
- In a large network we cannot guarantee that all nodes follow the same schedule
|
||||||
|
- node on the border will follow both schedules
|
||||||
|
- they need to broadcast packet twice, for schedule 1 and 2
|
||||||
|
![[Pasted image 20241011180413.png]]
|
||||||
|
|
||||||
|
|
||||||
|
#### Collision avoidance
|
||||||
|
- RTS/CTS with duration is used (so NAV is used for backoff)
|
||||||
|
- carrier sense before initiating a transmission
|
||||||
|
- neighbor nodes of both sender and receiver sleeps during transmission to save power
|
||||||
|
|
||||||
|
- listen time is divided into minislots
|
||||||
|
- sender selects a minislot to end carrier sense
|
||||||
|
- if channel is free it transmits SYNC in the next minislot
|
||||||
|
|
||||||
|
### S-MAC Performance evaluation
|
||||||
|
- Topology: Two-hop network with two sources and two sinks
|
||||||
|
- Sources periodically generate a sensing message which is divided into fragments
|
||||||
|
- Traffic load is changed by varying the inter-arrival period of the messages: (for inter-arrival period of 5s, a message is generated every 5s by each source. Here it varies between 1-10s)
|
||||||
|
![[Pasted image 20241011182036.png]]
|
||||||
|
|
||||||
|
- In each test, there are 10 messages generated on each source node
|
||||||
|
- Each message has 10 fragments, and each fragment has 40 bytes (200 data packets to be passed from sources to sinks)
|
||||||
|
- The total energy consumption of each node is measured for sending this fixed amount of data
|
||||||
|
![[Pasted image 20241011182343.png]]
|
||||||
|
|
||||||
|
- S-MAC consumes much less energy than 802.11-like protocol without sleeping
|
||||||
|
- At heavy load, idle listening rarely happens, energy savings from sleeping is very limited
|
||||||
|
- At light load, periodic sleeping plays a key role
|
||||||
|
|
||||||
|
Conclusions:
|
||||||
|
- A mainly static network is assumed
|
||||||
|
- Trades off latency for reduced energy consumption
|
||||||
|
- Redundant data is still sent with increased latency
|
129
Autonomous Networking/notes/4 WSN Routing.md
Normal file
129
Autonomous Networking/notes/4 WSN Routing.md
Normal file
|
@ -0,0 +1,129 @@
|
||||||
|
|
||||||
|
- routing technique is needed to establish multi-hop communication
|
||||||
|
- the routing strategy should ensure
|
||||||
|
- mminimun energy consumption
|
||||||
|
- maximization of the network lifetime
|
||||||
|
|
||||||
|
#### Ad Hoc Routing Protocols – Classification
|
||||||
|
- **network topology**
|
||||||
|
- flat
|
||||||
|
- hierarchical
|
||||||
|
- **which data is used to identify nodes**
|
||||||
|
- arbitrary identifier
|
||||||
|
- the position of a node
|
||||||
|
- can be used to assist in geographical routing problems to decide next hop
|
||||||
|
- scalable and suitable for sensor networks
|
||||||
|
|
||||||
|
##### Flat routing protocols
|
||||||
|
Three main categories
|
||||||
|
|
||||||
|
- Proactive protocols (table driven)
|
||||||
|
- always tries to keep routing data up-to-date
|
||||||
|
- active before tables are actually needed
|
||||||
|
- routes are always already known
|
||||||
|
- more bandwidth and energy usage
|
||||||
|
- Reactive protocols
|
||||||
|
- route determined only when needed
|
||||||
|
- operates on demand
|
||||||
|
- when a route is needed, a kind of global search is started
|
||||||
|
- causes delays if routes are not already cached
|
||||||
|
- Hybrid protocols
|
||||||
|
- combination of these behaviors
|
||||||
|
|
||||||
|
### Destination Sequence Distance Vector (DSDV)
|
||||||
|
- based on bellman-ford algorithm
|
||||||
|
- proactive protocol
|
||||||
|
- add aging information to avoid routing loops
|
||||||
|
- on topology change, send incremental route updates
|
||||||
|
- unstable route updates are delayed
|
||||||
|
|
||||||
|
![[Pasted image 20241011191033.png]]
|
||||||
|
|
||||||
|
- to avoid loops, DSDV adds a **sequence number** to each routing table entry which is periodically updated. Routes with higher sequence number are preferred
|
||||||
|
|
||||||
|
##### Reactive protocols
|
||||||
|
|
||||||
|
### Flooding
|
||||||
|
- copies of incoming packets are sent by every link except the one by which the packet is arrived
|
||||||
|
- generates a lot of superfluous traffic
|
||||||
|
- flooding is a reactive technique, and does not require costly topology maintenance and complex route discovery algorithms
|
||||||
|
|
||||||
|
Characteristics:
|
||||||
|
- derivery is guaranteed (e grazie al cazzo)
|
||||||
|
- one copy will arrive by the quickest possible route (wow)
|
||||||
|
|
||||||
|
Drawbacks:
|
||||||
|
- implosion: duplicated messages are broadcasted to the same node
|
||||||
|
- overlap: if two nodes share the same under observation region, both of them may sense the same stimuli at the same time. As a result, neighbor nodes receive duplicated messages
|
||||||
|
- resource blindness (no knowledge about the available resources)
|
||||||
|
- does not take into consideration all the available energy resources
|
||||||
|
- consumes a lot of energy
|
||||||
|
|
||||||
|
### Gossiping
|
||||||
|
- nodes send the incoming packages to a randomly selected neighbor
|
||||||
|
- avoids implosion, but it takes long to propagate the message
|
||||||
|
|
||||||
|
### Dynamic Source Routing (DSR)
|
||||||
|
- Source routing: Each data packet sent carries in its header the complete, ordered list of nodes through which the packet will pass
|
||||||
|
- The sender can select and control the routes used for its own packets and supports the use of multiple routes to any destination
|
||||||
|
- Including the route in the header of each packet, helps other nodes forwarding the packet to cache the routing information for future use
|
||||||
|
|
||||||
|
**DSR is composed by two main mechanism:**
|
||||||
|
- **Route Discovery**
|
||||||
|
- mechanism by which a node S obtains the route to a destination node D
|
||||||
|
- used only if S doesn't already know the route
|
||||||
|
- every request contains an unique ID and a route record
|
||||||
|
- S sends a broadcast Route Request packet
|
||||||
|
- every node broadcasts the packet (with the same ID) appending their own address to the route record
|
||||||
|
- when D receives the request, it sends a Route Reply back to the initiator S, with a copy of the route record
|
||||||
|
- more than a route can be returned, making the protocol more resistent to changes in network topology
|
||||||
|
- **Route Maintenance**
|
||||||
|
- mechanism by which a node S can detect (while sending a packet to D) if the network topology has changed and the route can't be used anymore
|
||||||
|
|
||||||
|
![[Pasted image 20241012174130.png]]
|
||||||
|
|
||||||
|
### Ad-hoc On Demand Distance Vector routing (AODV)
|
||||||
|
- a mix between DSR and DSDV
|
||||||
|
- nodes maintain routing tables
|
||||||
|
- sequence numbers added to handle stale caches (when routing info is too old)
|
||||||
|
- nodes remember from where a packet came and populate routing tables
|
||||||
|
|
||||||
|
We can see it as an improved DSDV, as it minimizes the number of required broadcast by creating routes on a demand basis.
|
||||||
|
- if the source node S does not have the route to D, S initiates a path discovery process to locate D
|
||||||
|
- the route request is broadcasted to the neighbors
|
||||||
|
- when a node receives a broadcast RREQ, it records in their table the address of the neighbor who sent the request
|
||||||
|
- when the destination or an intermediate node that can reach the destiation is reached, it replies with a route reply (RREP) packet back to the neighbor from which it first received the RREQ
|
||||||
|
- the RREQ is routed back along the reverse path
|
||||||
|
- nodes along the path updates their table
|
||||||
|
|
||||||
|
![[Pasted image 20241012175224.png]]
|
||||||
|
|
||||||
|
## Geographical routing
|
||||||
|
- routing tables contain information to which next hop a packet should be forwarded
|
||||||
|
- this can be explicitly constructed, or implicitly inferred from physical placement of nodes
|
||||||
|
- by knowing the position of nodes, we can send the packet to a neighbor in the right direction
|
||||||
|
- we can also do *geocasting*: sending to any node in a given area
|
||||||
|
- to map a node ID to the node position we might need a location service
|
||||||
|
|
||||||
|
Strategies
|
||||||
|
- **Most forward within range r**
|
||||||
|
- send to that neighbor that realizes the most forward progress towards destination, but staying in a range r (quello che stando nel range r si avvicina di più geograficamente parlando)
|
||||||
|
- **Nearest node with (any) forward progress**
|
||||||
|
- the opposite as the previous strategy
|
||||||
|
- minimizes transmission power
|
||||||
|
- **Directional routing**
|
||||||
|
- choose next hop that is angularly closest to destination (closest to the connecting line to destination)
|
||||||
|
- come se tracciassi una linea tra S e D e prendessi gli hop più vicini alla linea
|
||||||
|
- problem: might result in loops!
|
||||||
|
|
||||||
|
#### Dead ends problem
|
||||||
|
![[Pasted image 20241012182403.png]]
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
- there are many other protocols
|
||||||
|
- the best solution depends on network characteristics
|
||||||
|
- mobility
|
||||||
|
- node capabilities
|
||||||
|
- geographic approach allows to save more energy
|
||||||
|
- proactive approach is fast, but involves overhead
|
||||||
|
- reactive approach generate much less overhead, but it is slower
|
|
@ -1,144 +0,0 @@
|
||||||
### Communication patterns
|
|
||||||
- **Broadcast**
|
|
||||||
- a broadcast pattern is generally used by a base station (sink) to transmit information to all sensor nodes of the network
|
|
||||||
- **Convergecast or data gathering (all/many to 1)**
|
|
||||||
- all or a group of sensors comunicate to the sink
|
|
||||||
- typically used to collect sensed data
|
|
||||||
|
|
||||||
We need to define a good MAC protocol for wireless sensor networks, the following attrivutes must be considered:
|
|
||||||
- energy efficiency
|
|
||||||
- scatability and adaptability to changes
|
|
||||||
Latency, throughput and bandwidth utilization may be secondary, but desirable.
|
|
||||||
|
|
||||||
### Reasons of energy waste
|
|
||||||
- Collision: they need to be discarded and retransmitted
|
|
||||||
- Overhearing: node receiving packet destined to other nodes
|
|
||||||
- Control-packet overhead
|
|
||||||
- Idle listening
|
|
||||||
- Overemitting: destination not ready
|
|
||||||
|
|
||||||
## S-MAC protocol
|
|
||||||
S-MAC: Sleep MAC
|
|
||||||
|
|
||||||
As idle listening consumes significant energy (50-100% of the energy required for receiving), the solution is to periodic listen and sleep, with a listen duty cycle of about 10% (e.g. listen 200ms and sleep 2s).
|
|
||||||
|
|
||||||
![[Pasted image 20241011175026.png]]
|
|
||||||
|
|
||||||
- Duration of sleep and listen cycles are the same for all nodes
|
|
||||||
- All nodes are free to choose their own listen/sleep schedules
|
|
||||||
- to reduce control overhead, **neighbor nodes are syncronized together**
|
|
||||||
- neighboring nodes form **virtual clusters** so as to set up a common sleep schedule
|
|
||||||
- each node maintaines a table with neighbors’ schedule
|
|
||||||
- table entries are filled when the node receives sync packets
|
|
||||||
- SYNC packets are exchanged periodically to maintain schedule synchronization
|
|
||||||
- they are sent every SINCHRONYZATION PERIOD
|
|
||||||
- Receivers will adjust their timer counters immediately after they receive the SYNC packet
|
|
||||||
|
|
||||||
- If there are no neighbors, the node will chose a random schedule. These nodes will be called **synchronizers**, nodes who receive a schedule are called **followers**.
|
|
||||||
|
|
||||||
- In a large network we cannot guarantee that all nodes follow the same schedule
|
|
||||||
- node on the border will follow both schedules
|
|
||||||
- they need to broadcast packet twice, for schedule 1 and 2
|
|
||||||
![[Pasted image 20241011180413.png]]
|
|
||||||
|
|
||||||
|
|
||||||
#### Collision avoidance
|
|
||||||
- RTS/CTS with duration is used (so NAV is used for backoff)
|
|
||||||
- carrier sense before initiating a transmission
|
|
||||||
- neighbor nodes of both sender and receiver sleeps during transmission to save power
|
|
||||||
|
|
||||||
- listen time is divided into minislots
|
|
||||||
- sender selects a minislot to end carrier sense
|
|
||||||
- if channel is free it transmits SYNC in the next minislot
|
|
||||||
|
|
||||||
### S-MAC Performance evaluation
|
|
||||||
- Topology: Two-hop network with two sources and two sinks
|
|
||||||
- Sources periodically generate a sensing message which is divided into fragments
|
|
||||||
- Traffic load is changed by varying the inter-arrival period of the messages: (for inter-arrival period of 5s, a message is generated every 5s by each source. Here it varies between 1-10s)
|
|
||||||
![[Pasted image 20241011182036.png]]
|
|
||||||
|
|
||||||
- In each test, there are 10 messages generated on each source node
|
|
||||||
- Each message has 10 fragments, and each fragment has 40 bytes (200 data packets to be passed from sources to sinks)
|
|
||||||
- The total energy consumption of each node is measured for sending this fixed amount of data
|
|
||||||
![[Pasted image 20241011182343.png]]
|
|
||||||
|
|
||||||
- S-MAC consumes much less energy than 802.11-like protocol without sleeping
|
|
||||||
- At heavy load, idle listening rarely happens, energy savings from sleeping is very limited
|
|
||||||
- At light load, periodic sleeping plays a key role
|
|
||||||
|
|
||||||
Conclusions:
|
|
||||||
- A mainly static network is assumed
|
|
||||||
- Trades off latency for reduced energy consumption
|
|
||||||
- Redundant data is still sent with increased latency
|
|
||||||
|
|
||||||
### Routing
|
|
||||||
- routing technique is needed to establish multi-hop communication
|
|
||||||
- the routing strategy should ensure
|
|
||||||
- mminimun energy consumption
|
|
||||||
- maximization of the network lifetime
|
|
||||||
|
|
||||||
#### Ad Hoc Routing Protocols – Classification
|
|
||||||
- **network topology**
|
|
||||||
- flat
|
|
||||||
- hierarchical
|
|
||||||
- **which data is used to identify nodes**
|
|
||||||
- arbitrary identifier
|
|
||||||
- the position of a node
|
|
||||||
- can be used to assist in geographical routing problems to decide next hop
|
|
||||||
- scalable and suitable for sensor networks
|
|
||||||
|
|
||||||
##### Flat routing protocols
|
|
||||||
Three main categories
|
|
||||||
|
|
||||||
- Proactive protocols (table driven)
|
|
||||||
- always tries to keep routing data up-to-date
|
|
||||||
- active before tables are actually needed
|
|
||||||
- routes are always already known
|
|
||||||
- more bandwidth and energy usage
|
|
||||||
- Reactive protocols
|
|
||||||
- route determined only when needed
|
|
||||||
- operates on demand
|
|
||||||
- when a route is needed, a kind of global search is started
|
|
||||||
- causes delays if routes are not already cached
|
|
||||||
- Hybrid protocols
|
|
||||||
- combination of these behaviors
|
|
||||||
|
|
||||||
### Destination Sequence Distance Vector (DSDV)
|
|
||||||
- based on bellman-ford algorithm
|
|
||||||
- proactive protocol
|
|
||||||
- add aging information to avoid routing loops
|
|
||||||
- on topology change, send incremental route updates
|
|
||||||
- unstable route updates are delayed
|
|
||||||
|
|
||||||
![[Pasted image 20241011191033.png]]
|
|
||||||
|
|
||||||
- to avoid loops, DSDV adds a **sequence number** to each routing table entry which is periodically updated. Routes with higher sequence number are preferred
|
|
||||||
|
|
||||||
##### Reactive protocols
|
|
||||||
|
|
||||||
### Flooding
|
|
||||||
- copies of incoming packets are sent by every link except the one by which the packet is arrived
|
|
||||||
- generates a lot of superfluous traffic
|
|
||||||
- flooding is a reactive technique, and does not require costly topology maintenance and complex route discovery algorithms
|
|
||||||
|
|
||||||
Characteristics:
|
|
||||||
- derivery is guaranteed (e grazie al cazzo)
|
|
||||||
- one copy will arrive by the quickest possible route (wow)
|
|
||||||
|
|
||||||
Drawbacks:
|
|
||||||
- implosion: duplicated messages are broadcasted to the same node
|
|
||||||
- overlap: if two nodes share the same under observation region, both of them may sense the same stimuli at the same time. As a result, neighbor nodes receive duplicated messages
|
|
||||||
- resource blindness (no knowledge about the available resources)
|
|
||||||
- does not take into consideration all the available energy resources
|
|
||||||
- consumes a lot of energy
|
|
||||||
|
|
||||||
### Gossiping
|
|
||||||
- nodes send the incoming packages to a randomly selected neighbor
|
|
||||||
- avoids implosion, but it takes long to propagate the message
|
|
||||||
|
|
||||||
### Dynamic Source Routing (DSR)
|
|
||||||
- Source routing: Each data packet sent carries in its header the complete, ordered list of nodes through which the packet will pass
|
|
||||||
- The sender can select and control the routes used for its own packets and supports the use of multiple routes to any destination
|
|
||||||
|
|
||||||
|
|
||||||
not finished yet
|
|
|
@ -134,3 +134,167 @@ We define
|
||||||
#### Closed set
|
#### Closed set
|
||||||
We don't have thresholds!
|
We don't have thresholds!
|
||||||
The only possible error is that the correct identity does not appear at rank 1.
|
The only possible error is that the correct identity does not appear at rank 1.
|
||||||
|
|
||||||
|
L’identificazione closed-set è un caso speciale in cui si assume che il soggetto
|
||||||
|
associato a ogni probe ha sicuramente una corrispondenza nella gallery. Questo
|
||||||
|
caso, seppur applicabile in pochi contesti, va comunque tenuto in considerazione.
|
||||||
|
Possiamo utilizzare le seguenti metriche:
|
||||||
|
- Cumulative Match Score a rango k ( **CMS(k)** ): probabilità di identificare un soggetto entro il rango k
|
||||||
|
- Cumulative Match Characteristic (**CMC**): curva che mostra per ogni rango k il valore $CMS(k)$
|
||||||
|
- Recognition Rate (**RR**): $CMS(1)$
|
||||||
|
|
||||||
|
## Organizzazione del dataset
|
||||||
|
Una cosa da tenere bene a mente è che in fase di sviluppo e test del modello noi abbiamo sempre a disposizione il “ground truth” (la verità) di ogni template, nel momento in cui invece un modello viene utilizzato poi in situazioni reali, questa informazione è ovviamente non disponibile.
|
||||||
|
|
||||||
|
Per una buona valutazione di un sistema è molto importante avere una buona suddivisione del dataset. Questa avviene su diversi livelli.
|
||||||
|
|
||||||
|
##### Suddivisione Training Set / Testing Set
|
||||||
|
L'obiettivo è quello di consentire una fase di training che possa garantire la generalità, deve quindi comprendere esempi molto differenziati tra loro.
|
||||||
|
Non tutti i sistemi richiedono necessariamente training in senso lato (ML), ma il training set può essere usato in ogni caso per impostare parametri da usare poi in fase di testing.
|
||||||
|
Si possono inserire nel TS soggetti non presenti nel TR per testare meglio la generalità del sistema.
|
||||||
|
|
||||||
|
##### Suddivisione probe vs gallery
|
||||||
|
Suddivisione effettuata per sample, condizione fondamentale $P ∩ G = ∅$
|
||||||
|
Nel gallery è buona norma inserire template catturati in condizioni controllate, in quanto nella realtà è la condizione più frequente.
|
||||||
|
|
||||||
|
##### Suddivisione probe set
|
||||||
|
Si sceglie quanti probe enrolled e quanti probe non enrolled selezionare
|
||||||
|
- $P = P_{G}∩P_{N}$ potrebbe condizionare i risultati di valutazione nel caso di identificazione open-set
|
||||||
|
- in identificazione closed-set, l'unica scelta possibile è chiaramente $P = P_{G}$
|
||||||
|
- in fase di verifica la scelta non condiziona i risultati
|
||||||
|
|
||||||
|
In generale, per evitare il più possibile di avere bias, bisognerebbe partizionare il dataset in diversi modi, ripetere l'evaluation e considerare le prestazioni medie.
|
||||||
|
## Probe vs All gallery
|
||||||
|
Possiamo costruire una matrice di distanze (es. distanza euclidea) tra coppie di template probe-gallery
|
||||||
|
- Per ogni coppia probe/gallery è possibile calcolare in anticipo una matrice di distanza probe-vs-all gallery, memorizzando le distanze tra coppie di template (probe template vs gallery template)
|
||||||
|
- La matrice può essere usata come strumento di valutazione per diversi tipi di applicazioni
|
||||||
|
- Ogni riga corrisponde a un'operazione di riconoscimento su un probe in input (con o senza claim di identità)
|
||||||
|
- le colonne i gallery template
|
||||||
|
- le righe rappresentano i probe template
|
||||||
|
- Si possono avere più di un sample per soggetto, e sarebbero rappresentati in colonne diverse della matrice
|
||||||
|
- avere più sample diminuisce il FRR ma aumenta il FAR!
|
||||||
|
|
||||||
|
##### Verifica
|
||||||
|
- in caso di verifica, viene l'identità reale viene associata a quella claimed
|
||||||
|
- è importante considerare la claimed identity piuttosto che quella reale
|
||||||
|
- per una buona valutazione bisogna fare test con suddivisioni probe/gallery diverse
|
||||||
|
- per una buona valutazione bisogna fare test con diverse distribuzioni di probe genuine e probe di impostori
|
||||||
|
|
||||||
|
##### Identificazione open set
|
||||||
|
come prima, ma non abbiamo identity claim. Inoltre possono esistere probe non appartenenti a nessun'identità.
|
||||||
|
- per una buona valutazione bisogna fare test con suddivisioni probe/gallery diverse
|
||||||
|
- per una buona valutazione bisogna fare test con diverse distribuzioni di probe genuine e impostori
|
||||||
|
|
||||||
|
##### Identificazione closed set
|
||||||
|
come prima, ma non abbiamo impostori né threshold.
|
||||||
|
- per una buona valutazione bisogna fare test con suddivisioni probe/gallery diverse
|
||||||
|
|
||||||
|
## All-against-All
|
||||||
|
Metodo alternativo al precedente. Consiste nel calcolare una matrice di distanze tra tutti i possibili template, ognuno dei quali giocherà entrambi i ruoli di "probe" e "gallery template".
|
||||||
|
|
||||||
|
Pro:
|
||||||
|
- facile da programmare
|
||||||
|
- mette sotto stress il sistema (ci saranno più tentativi da impostore rispetto a quelli da utente genuino)
|
||||||
|
|
||||||
|
Contro:
|
||||||
|
- calcolo della matrice dispendioso
|
||||||
|
- impossibilità di calcolare diverse distribuzioni tra probe e gallery
|
||||||
|
|
||||||
|
Tutti gli algoritmi riportati di seguito hanno in comune i seguenti elementi:
|
||||||
|
- M = matrice delle distanze
|
||||||
|
- N = numero di soggetti
|
||||||
|
- S = numero di template per soggetto
|
||||||
|
- G = numero totale di sample = numero di righe/colonne di M ($|G|=SxN$)
|
||||||
|
- i = indice righe
|
||||||
|
- j = indice colonne
|
||||||
|
- $label(x)$ = vera identità di x
|
||||||
|
|
||||||
|
Ovviamente ad ogni riga escludo sempre un'operazione, perché non testo un template con se stesso.
|
||||||
|
##### Verifica, template singolo
|
||||||
|
ogni riga è un set di |G| − 1 operazioni, con S − 1 test genuini e (N − 1) × (S) test da impostore.
|
||||||
|
```js
|
||||||
|
for each threshold t
|
||||||
|
for each cell Mi,j with i != j
|
||||||
|
if Mi,j <= t then
|
||||||
|
if label(i) = label(j) then GA++
|
||||||
|
else FA++
|
||||||
|
else if label(i) = label(j) then FR++
|
||||||
|
else GR++
|
||||||
|
GAR(t)=GA/TG //TG = |G| * (S-1) total genuine attempts
|
||||||
|
FAR(t)=FA/TI //TI = |G| * (N-1) * S total impostor attempts
|
||||||
|
FRR(t)=FR/TG
|
||||||
|
GRR(t)=GR/TI
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Verifica, template multipli
|
||||||
|
invece di testare ogni coppia di template, testo un template nelle righe con tutto il gruppo di template che hanno la stessa identità nelle colonne, considerando però solo quello con distanza minore.
|
||||||
|
Per cui, ogni riga è un set di N operazioni (una per utente), con 1 test genuino e N − 1 test da impostore.
|
||||||
|
|
||||||
|
```js
|
||||||
|
for each threshold t
|
||||||
|
for each row i
|
||||||
|
for each group Mlabel of cells Mi,j with same label(j) excluding Mi,i
|
||||||
|
diff = min(Mlabel)
|
||||||
|
if diff <= t then
|
||||||
|
if label(i) = label(Mlabel) then GA++
|
||||||
|
else FA++
|
||||||
|
else if label(i) = label(Mlabel) then FR++
|
||||||
|
else GR++
|
||||||
|
GAR(t)=GA/TG
|
||||||
|
FAR(t)=FA/TI
|
||||||
|
FRR(t)=FR/TG
|
||||||
|
GRR(t)=GR/TI
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Identificazione open-set, template multipli
|
||||||
|
ogni riga è un set di due operazioni, una da impostore e una da utente genuino.
|
||||||
|
```js
|
||||||
|
for each threshold t
|
||||||
|
for each row i
|
||||||
|
{Li,m| m=1 … |G|-1} =
|
||||||
|
{Mi,j |j=1, … |G|} \ {Mi,i} ordered by increasing value
|
||||||
|
// the identical element is excluded
|
||||||
|
if Li,1 <= t then // potential accept
|
||||||
|
if label(i) = label(Li,j) then
|
||||||
|
DI(t, 1)++ // genuine case detected+identified
|
||||||
|
|
||||||
|
// parallel impostor case:
|
||||||
|
// jump the templates belonging to label(i) since i not in G
|
||||||
|
find first Li,k such that label(Li,k) != label(i) and Li,k <= t
|
||||||
|
if this k exists, then FA++
|
||||||
|
// the first template != label(i) has a distance <= t
|
||||||
|
else GR++ // impostor is correctly not detected
|
||||||
|
else find first Li,k such that label(i) = label(Li,k) and Li,k <= t
|
||||||
|
//if genuine yet not the first, look for higher ranks
|
||||||
|
if this k exists, then
|
||||||
|
DI(t, k)++ //end of genuine
|
||||||
|
FA++ //impostor in parallel, distance below t but different label. No need to jump since the first label is not the «impostor»
|
||||||
|
else GR++ // impostor case counted directly, FR computed through DIR
|
||||||
|
|
||||||
|
DIR(t, 1) = DI(t, 1)/TG
|
||||||
|
FRR(t) = 1 - DIR(t,1)
|
||||||
|
FAR(t) = FA/TI
|
||||||
|
GRR(t)=GR/TI
|
||||||
|
k=2 //higher ranks
|
||||||
|
while DI(t, k) != 0
|
||||||
|
DIR(t, k) = (DI(t, k) / TG) + DIR(t, k-1) //we have to compute rates
|
||||||
|
k++
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Identificazione closed-set, template multipli
|
||||||
|
ogni riga è un'operazione da utente genuino, non ci sono impostori e non c'è threshold.
|
||||||
|
TA è il numero totale di attempts = |G|
|
||||||
|
```js
|
||||||
|
for each row i
|
||||||
|
{Li,m| m=1 … |G|} = {Mi,j |j=1, …|G|} ordered by increasing value
|
||||||
|
find the first Li,k such that label(i)=label(Li,k)
|
||||||
|
CMS(k)++
|
||||||
|
|
||||||
|
CMS(1) = CMS(1) / TA
|
||||||
|
RR = CMS(1)
|
||||||
|
|
||||||
|
k=2
|
||||||
|
while k < |G| - 1
|
||||||
|
CMS(k) = CMS(k) / TA + CMS(k - 1)
|
||||||
|
k++
|
||||||
|
```
|
53
Foundation of data science/notes/1 CV Basics.md
Normal file
53
Foundation of data science/notes/1 CV Basics.md
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
|
||||||
|
## Filtering
|
||||||
|
### Linear Filtering
|
||||||
|
- Linear filtering involves replacing each pixel in an image with a linear combination of its neighbours.
|
||||||
|
- This technique can be used to reduce noise, fill in missing information, and extract image features like edges and corners.
|
||||||
|
- The simplest case of linear filtering is convolution, which uses a filter kernel to multiply and sum values from a local image patch.
|
||||||
|
- Convolution is a common linear filtering operation that can be expressed as matrix multiplication. It involves sliding the filter kernel across the image, multiplying the kernel values with the corresponding pixel values, and summing the results to get the new pixel value.
|
||||||
|
##### Applications of linear filtering
|
||||||
|
- Noise reduction
|
||||||
|
- Filling in missing values/information
|
||||||
|
- Extracting image features (edges, corners...)
|
||||||
|
### Gaussian Filtering
|
||||||
|
- Gaussian filtering is a type of linear filtering that uses a Gaussian kernel, which weights nearby pixels more than distant ones.
|
||||||
|
- This approach reflects the idea of probabilistic inference and leads to effective smoothing of the image.
|
||||||
|
- Gaussian filters are separable, meaning they can be implemented efficiently by convolving each row and column with a 1D Gaussian filter.
|
||||||
|
|
||||||
|
### Multi-Scale Image Representation: Gaussian Pyramid
|
||||||
|
- A Gaussian pyramid is a multi-scale representation of an image created by repeatedly applying Gaussian smoothing and subsampling.
|
||||||
|
- This pyramid allows for the analysis of images at different levels of detail, enabling tasks like searching for objects across scales.
|
||||||
|
- Subsampling without proper pre-smoothing can introduce aliasing artefacts, where high-frequency details are misrepresented. Using a Gaussian filter before downsampling helps prevent this by attenuating high frequencies.
|
||||||
|
|
||||||
|
### Edge Detection
|
||||||
|
- Edges represent significant changes in image intensity and can be detected using derivatives.
|
||||||
|
- Image derivatives can be computed using linear filters that approximate the first and second derivatives of the image.
|
||||||
|
- The first derivative is useful for detecting edges as points of rapid intensity change
|
||||||
|
- The second derivative helps locate edges as zero-crossings.
|
||||||
|
- The Canny edge detector is a popular algorithm that combines Gaussian smoothing, gradient computation, non-maximum suppression, and hysteresis thresholding for robust edge detection.
|
||||||
|
1. Gaussian smoothing: Reduces noise before calculating derivatives
|
||||||
|
2. Gradient computation: Calculate the magnitude and direction of the gradient using derivative filters
|
||||||
|
3. Non-maximum suppression: Thin edges by keeping only the local maxima of the gradient magnitude along the gradient direction
|
||||||
|
4. Hysteresis thresholding: Use two thresholds to connect strong edges and extend them to weaker ones
|
||||||
|
- The Laplacian of Gaussian operator, often approximated using the Difference of Gaussians, can also be used for edge detection by identifying zero-crossings in the second derivative.
|
||||||
|
- The choice of scale for smoothing filters (e.g., the sigma value for Gaussian) influences the types of edges detected. Smaller scales capture fine details, while larger scales focus on more prominent edges.
|
||||||
|
|
||||||
|
## Recognition
|
||||||
|
### Recognition using Line Drawings
|
||||||
|
- Line drawings, often generated from edge detection results, can serve as a simplified representation for object recognition and localisation.
|
||||||
|
- By matching model lines to extracted image lines, the 3D pose of an object can be estimated.
|
||||||
|
- Method
|
||||||
|
1. Extract edges from the image using an edge detector
|
||||||
|
2. Fit lines to the detected edges, representing object boundaries
|
||||||
|
3. Match model lines (from a known object) to the extracted image lines
|
||||||
|
4. By finding the best-fitting alignment, the 3D pose (position and orientation) of the object can be estimated.
|
||||||
|
### Object Instance Identification using Color Histograms
|
||||||
|
- Object instance identification aims to recognize specific instances of objects, distinguishing them from other instances of the same class.
|
||||||
|
- Color histograms provide a statistical representation of the color distribution in an image, enabling object identification by comparing histogram similarity.
|
||||||
|
- They are robust to translation, rotation, and partial occlusion.
|
||||||
|
- However, color histograms are sensitive to illumination changes and might not be discriminative enough for all object types.
|
||||||
|
|
||||||
|
##### Comparison Measures for Histograms:
|
||||||
|
- **Intersection:** Measures the overlap between two histograms, reflecting shared colour characteristics. Ranges from 0 (no overlap) to 1 (identical).
|
||||||
|
- **Euclidean distance:** Quantifies the overall difference between histograms. Larger distances indicate less similarity.
|
||||||
|
- **Chi-square:** A statistically motivated measure sensitive to differences in bin proportions. More discriminative than Euclidean distance but can be affected by outliers.
|
BIN
Foundation of data science/slides/IP CV Basics.pdf
Normal file
BIN
Foundation of data science/slides/IP CV Basics.pdf
Normal file
Binary file not shown.
|
@ -1,12 +0,0 @@
|
||||||
$$f[[m,n]+[m^{\prime},n^{\prime}]]=f\left\lbrack m+m^{\prime},n+n^{\prime}\right\rbrack=f\left\lbrack m,n\right\rbrack+f\left\lbrack m^{\prime},n^{\prime}\right\rbrack
|
|
||||||
|
|
||||||
|
|
||||||
$$
|
|
||||||
$$\sum_{k,l}{I[(m+m')-k,(n+n')-l]g[k,l]}=\sum_{k,l}{I[m-k,n-l]g[k,l]}+\sum_{k,l}{I[m'-k,n'-l]g[k,l]}$$
|
|
||||||
|
|
||||||
$$\sum_{k,l}{I[(m+m')-k,(n+n')-l]g[k,l]}=\sum_{k,l}{I[m-k,n-l]g[k,l] + I[m'-k,n'-l]g[k,l]}$$
|
|
||||||
|
|
||||||
$$\sum_{k,l}{I[(m+m')-k,(n+n')-l]g[k,l]}=\sum_{k,l}{(I[m-k,n-l] + I[m'-k,n'-l])g[k,l]}$$
|
|
||||||
|
|
||||||
$$\sum_{k,l}{I[(m+m')-k,(n+n')-l]g[k,l]}=\sum_{k,l}{I[(m+m')-k,(n+n')-l]g[k,l]}$$
|
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
#### Object recognition
|
|
||||||
Different types of recognition
|
|
||||||
- object identification
|
|
||||||
- object classification
|
|
||||||
|
|
||||||
##### Which level is right for Object Classes?
|
|
||||||
- Basic-Level Categories
|
|
||||||
|
|
||||||
###### Challenges
|
|
||||||
- multi-view: different view points
|
|
||||||
- multi-class: different types of the same object (different car models)
|
|
||||||
- varying illumination
|
|
||||||
- ecc
|
|
||||||
|
|
||||||
### Filtering basics
|
|
||||||
- Linear filtering
|
|
||||||
- Gaussian filtering
|
|
||||||
- Multi scale image representation
|
|
||||||
- gaussian pyramid
|
|
||||||
- edge detection
|
|
||||||
- recognition using line drawings
|
|
||||||
- image derivatives (1st and 2nd order)
|
|
||||||
- object instance identification using color histograms
|
|
||||||
- performing evaluation
|
|
||||||
|
|
||||||
probabilità dadi
|
|
||||||
$Px(5) = 1/6$
|
|
||||||
$Py(5) = 1/6$
|
|
||||||
$Px+y(5) = ?$
|
|
||||||
|
|
||||||
We can count the possible cases
|
|
||||||
total cases: $6*6=36$
|
|
||||||
|
|
||||||
|
|
||||||
| 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|
|
||||||
| --- | --- | --- | --- | --- | --- | --- |
|
|
||||||
| 5 | 6 | 7 | 8 | 9 | 10 | 11 |
|
|
||||||
| 4 | 5 | 6 | 7 | 8 | 9 | 10 |
|
|
||||||
| 3 | 4 | 5 | 6 | 7 | 8 | 9 |
|
|
||||||
| 2 | 3 | 4 | 5 | 6 | 7 | 8 |
|
|
||||||
| 1 | 2 | 3 | 4 | 5 | 6 | 7 |
|
|
||||||
| | 1 | 2 | 3 | 4 | 5 | 6 |
|
|
||||||
|
|
||||||
possible cases: $P(3)P(1)+P(2)P(2)+P(1)P(3)$
|
|
||||||
|
|
||||||
|
|
||||||
$P[x*y](S) = $
|
|
0
conflict-files-obsidian-git.md
Normal file
0
conflict-files-obsidian-git.md
Normal file
Loading…
Reference in a new issue