vault backup: 2024-10-21 00:46:21
50
.obsidian/core-plugins.json
vendored
|
@ -1,20 +1,30 @@
|
|||
[
|
||||
"file-explorer",
|
||||
"global-search",
|
||||
"switcher",
|
||||
"graph",
|
||||
"backlink",
|
||||
"canvas",
|
||||
"outgoing-link",
|
||||
"tag-pane",
|
||||
"page-preview",
|
||||
"daily-notes",
|
||||
"templates",
|
||||
"note-composer",
|
||||
"command-palette",
|
||||
"editor-status",
|
||||
"bookmarks",
|
||||
"outline",
|
||||
"word-count",
|
||||
"file-recovery"
|
||||
]
|
||||
{
|
||||
"file-explorer": true,
|
||||
"global-search": true,
|
||||
"switcher": true,
|
||||
"graph": true,
|
||||
"backlink": true,
|
||||
"canvas": true,
|
||||
"outgoing-link": true,
|
||||
"tag-pane": true,
|
||||
"properties": false,
|
||||
"page-preview": true,
|
||||
"daily-notes": true,
|
||||
"templates": true,
|
||||
"note-composer": true,
|
||||
"command-palette": true,
|
||||
"slash-command": false,
|
||||
"editor-status": true,
|
||||
"bookmarks": true,
|
||||
"markdown-importer": false,
|
||||
"zk-prefixer": false,
|
||||
"random-note": false,
|
||||
"outline": true,
|
||||
"word-count": true,
|
||||
"slides": false,
|
||||
"audio-recorder": false,
|
||||
"workspaces": false,
|
||||
"file-recovery": true,
|
||||
"publish": false,
|
||||
"sync": false
|
||||
}
|
91
.obsidian/workspace.json
vendored
|
@ -4,31 +4,21 @@
|
|||
"type": "split",
|
||||
"children": [
|
||||
{
|
||||
"id": "4ed64efefb09317f",
|
||||
"id": "381b669928317be4",
|
||||
"type": "tabs",
|
||||
"children": [
|
||||
{
|
||||
"id": "6d4343be199e576a",
|
||||
"id": "6253f413423c2998",
|
||||
"type": "leaf",
|
||||
"state": {
|
||||
"type": "image",
|
||||
"type": "markdown",
|
||||
"state": {
|
||||
"file": "Biometric Systems/images/Pasted image 20241016174417.png"
|
||||
}
|
||||
}
|
||||
"file": "Foundation of data science/notes/1 CV Basics.md",
|
||||
"mode": "source",
|
||||
"source": false
|
||||
},
|
||||
{
|
||||
"id": "924948579db1cec9",
|
||||
"type": "leaf",
|
||||
"state": {
|
||||
"type": "pdf",
|
||||
"state": {
|
||||
"file": "Biometric Systems/slides/LEZIONE3_Affidabilita_del_riconoscimento.pdf",
|
||||
"page": 29,
|
||||
"left": -4,
|
||||
"top": 89,
|
||||
"zoom": 0.8
|
||||
}
|
||||
"icon": "lucide-file",
|
||||
"title": "1 CV Basics"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -52,7 +42,9 @@
|
|||
"type": "file-explorer",
|
||||
"state": {
|
||||
"sortOrder": "alphabetical"
|
||||
}
|
||||
},
|
||||
"icon": "lucide-folder-closed",
|
||||
"title": "Files"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -67,7 +59,9 @@
|
|||
"collapseAll": false,
|
||||
"extraContext": false,
|
||||
"sortOrder": "alphabetical"
|
||||
}
|
||||
},
|
||||
"icon": "lucide-search",
|
||||
"title": "Search"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -75,7 +69,9 @@
|
|||
"type": "leaf",
|
||||
"state": {
|
||||
"type": "bookmarks",
|
||||
"state": {}
|
||||
"state": {},
|
||||
"icon": "lucide-bookmark",
|
||||
"title": "Bookmarks"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -98,7 +94,7 @@
|
|||
"state": {
|
||||
"type": "backlink",
|
||||
"state": {
|
||||
"file": "Biometric Systems/images/Pasted image 20241016174417.png",
|
||||
"file": "6 Internet of Things.md",
|
||||
"collapseAll": false,
|
||||
"extraContext": false,
|
||||
"sortOrder": "alphabetical",
|
||||
|
@ -106,7 +102,9 @@
|
|||
"searchQuery": "",
|
||||
"backlinkCollapsed": false,
|
||||
"unlinkedCollapsed": true
|
||||
}
|
||||
},
|
||||
"icon": "links-coming-in",
|
||||
"title": "Backlinks for 6 Internet of Things"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -115,10 +113,12 @@
|
|||
"state": {
|
||||
"type": "outgoing-link",
|
||||
"state": {
|
||||
"file": "Biometric Systems/images/Pasted image 20241016174417.png",
|
||||
"file": "6 Internet of Things.md",
|
||||
"linksCollapsed": false,
|
||||
"unlinkedCollapsed": true
|
||||
}
|
||||
},
|
||||
"icon": "links-going-out",
|
||||
"title": "Outgoing links from 6 Internet of Things"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -129,7 +129,9 @@
|
|||
"state": {
|
||||
"sortOrder": "frequency",
|
||||
"useHierarchy": true
|
||||
}
|
||||
},
|
||||
"icon": "lucide-tags",
|
||||
"title": "Tags"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -138,8 +140,10 @@
|
|||
"state": {
|
||||
"type": "outline",
|
||||
"state": {
|
||||
"file": "Biometric Systems/images/Pasted image 20241016174417.png"
|
||||
}
|
||||
"file": "6 Internet of Things.md"
|
||||
},
|
||||
"icon": "lucide-list",
|
||||
"title": "Outline of 6 Internet of Things"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -147,7 +151,9 @@
|
|||
"type": "leaf",
|
||||
"state": {
|
||||
"type": "git-view",
|
||||
"state": {}
|
||||
"state": {},
|
||||
"icon": "git-pull-request",
|
||||
"title": "Source Control"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -172,15 +178,26 @@
|
|||
"obsidian-git:Open Git source control": false
|
||||
}
|
||||
},
|
||||
"active": "5d5551c2fd0314c8",
|
||||
"active": "b5d8a3515919e28a",
|
||||
"lastOpenFiles": [
|
||||
"Autonomous Networking/images/Pasted image 20241017161803.png",
|
||||
"Autonomous Networking/images/Pasted image 20241017161747.png",
|
||||
"Autonomous Networking/images/Pasted image 20241017161744.png",
|
||||
"Autonomous Networking/images/Pasted image 20241017161724.png",
|
||||
"Autonomous Networking/images/Pasted image 20241017154152.png",
|
||||
"Biometric Systems/images/Pasted image 20241017083255.png",
|
||||
"Biometric Systems/images/Pasted image 20241017083943.png",
|
||||
"Biometric Systems/images/Pasted image 20241017083506.png",
|
||||
"Autonomous Networking/notes/6.1 RL.md",
|
||||
"Autonomous Networking/notes/6 Internet of Things.md",
|
||||
"Autonomous Networking/notes/5 Drones.md",
|
||||
"Autonomous Networking/slides/5 Drones.pdf",
|
||||
"Biometric Systems/notes/4. Face recognition.md",
|
||||
"Biometric Systems/slides/LEZIONE4_Face introduction and localization.pdf",
|
||||
"Biometric Systems/notes/3. Recognition Reliability.md",
|
||||
"Biometric Systems/slides/LEZIONE3_Affidabilita_del_riconoscimento.pdf",
|
||||
"Biometric Systems/images/Pasted image 20241016174417.png",
|
||||
"Biometric Systems/images/Pasted image 20241016174411.png",
|
||||
"Biometric Systems/images/Pasted image 20241016174120.png",
|
||||
"Biometric Systems/images/Pasted image 20241016143112.png",
|
||||
"Biometric Systems/images/Pasted image 20241016141746.png",
|
||||
"Foundation of data science/slides/IP CV Basics.pdf",
|
||||
"Foundation of data science/notes/1 CV Basics.md",
|
||||
"Foundation of data science/slides/Untitled.md",
|
||||
|
@ -193,12 +210,6 @@
|
|||
"Biometric Systems/notes",
|
||||
"Autonomous Networking/notes/4 WSN Routing.md",
|
||||
"Autonomous Networking/notes/3 WSN MAC.md",
|
||||
"Autonomous Networking/images/Pasted image 20241012182403.png",
|
||||
"Autonomous Networking/images/Pasted image 20241012175224.png",
|
||||
"Autonomous Networking/images/Pasted image 20241012174130.png",
|
||||
"Untitled.md",
|
||||
"Autonomous Networking/images/Pasted image 20241011191033.png",
|
||||
"Autonomous Networking/images/Pasted image 20241011182343.png",
|
||||
"Autonomous Networking/notes/3 WSN.md",
|
||||
"Autonomous Networking/slides/3 WSN.pdf",
|
||||
"Autonomous Networking/notes/2 RFID.md",
|
||||
|
@ -211,8 +222,6 @@
|
|||
"[[[LEZIONE2_Indici_di_prestazione.pdf.md",
|
||||
"[LEZIONE2_Indici_di_prestazione.pdf.md",
|
||||
"Biometric Systems/final notes/1. Introduction.md",
|
||||
"Autonomous Networking/slides/2 RFID.pdf",
|
||||
"Autonomous Networking/slides",
|
||||
"Foundation of data science/slides/notes 2.md",
|
||||
"Biometric Systems/slides/lezione1 notes.md",
|
||||
"Untitled.canvas"
|
||||
|
|
BIN
Autonomous Networking/images/Pasted image 20241017154152.png
Normal file
After Width: | Height: | Size: 109 KiB |
BIN
Autonomous Networking/images/Pasted image 20241017161724.png
Normal file
After Width: | Height: | Size: 59 KiB |
BIN
Autonomous Networking/images/Pasted image 20241017161744.png
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
Autonomous Networking/images/Pasted image 20241017161747.png
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
Autonomous Networking/images/Pasted image 20241017161803.png
Normal file
After Width: | Height: | Size: 64 KiB |
138
Autonomous Networking/notes/5 Drones.md
Normal file
|
@ -0,0 +1,138 @@
|
|||
#### Unmanned Aerial Vehicle (UAV)
|
||||
- UAV, commonly known as a Drone, is an aircraft without a human pilot aboard (unmanned or uncrewed)
|
||||
- it operates
|
||||
- under remote control by a human
|
||||
- autonomously by on board computers
|
||||
|
||||
**Weight:** from 0.5g 15000kg
|
||||
**Maximum speed:** up to 11265Kph
|
||||
**Propellant:**
|
||||
- fossil fuel
|
||||
- battery
|
||||
|
||||
#### Usages
|
||||
- can provide timely disaster warnings
|
||||
- medical supplies
|
||||
- dangerous situations
|
||||
- traffic monitoring, wind estimation, remote sensing...
|
||||
|
||||
I many scenarios, UAVs need to exchange a relatively large amount of data among themselves and/or a control station. In many case there isn't any network infrastructure available. Drones can also used to expand terrestrial communication networks.
|
||||
|
||||
Drones can be equipped with several standard radio modules:
|
||||
- Wi-Fi
|
||||
- Cellular
|
||||
- LPWAN (low power wide area network, es. LoRa)
|
||||
|
||||
## Routing
|
||||
may require multi-hop data connections
|
||||
#### Comparison WSN with Dronets
|
||||
|
||||
| | **WSN** | **Dronet** |
|
||||
| ------------------ | --------------------------------- | -------------------------------- |
|
||||
| **Mobility** | none or partial | high, even 3D |
|
||||
| **Topology** | random, star, ad-hoc node failure | mesh |
|
||||
| **Infrastructure** | absent | absent |
|
||||
| **Energy source** | battery | battery (very limited) |
|
||||
| **Typical use** | environmental monitoring | rescue, monitoring, surveillance |
|
||||
|
||||
**Goals of routing protocols:**
|
||||
- increase delivery ratio
|
||||
- loop freedom
|
||||
- low overhead
|
||||
- reduce delays
|
||||
- energy consumption
|
||||
- scalability
|
||||
|
||||
### Proactive routing protocols
|
||||
Are they suitable for UAV networks?
|
||||
- slow reaction to topology changes, will cause delays
|
||||
- bandwidth constraints
|
||||
|
||||
Protocols:
|
||||
- OLSR - Optimize Link State Routing
|
||||
- DSDV - Destination-Sequenced Distance Vector
|
||||
- B.A.T.M.A.N. - Better Approach to Mobile Ad Hoc Network
|
||||
|
||||
### Reactive protocols
|
||||
- DSR - Dynamic Source Routing
|
||||
- AODV - Ad hoc On Demand Distance Vector
|
||||
|
||||
#### Hybrid protocols
|
||||
- ZRP - Zone Routing Protocol
|
||||
- TORA - Temporarily Ordered Routing Algorithm
|
||||
|
||||
### B.A.T.M.A.N.
|
||||
A proactive, distance-vector routing protocol for Mobile Ad-hoc Networks (MANETs) and Mesh Networks. Designed for decentralized decision-making and self-organizing networks.
|
||||
|
||||
**Key features:**
|
||||
- decentralized routing
|
||||
- no node has global knowledge of the entire network
|
||||
- next-hop based
|
||||
- nodes only know their best-hop neighbor for reaching a destination
|
||||
- link quality driven
|
||||
- decisions are based on the quality of the link between nodes
|
||||
- self-healing
|
||||
- adapts to changes automatically
|
||||
|
||||
**How batman works**
|
||||
- Originator messages (OGMs):
|
||||
- broadcast to announce its presence
|
||||
- OGMs are forwarded by neighbors to propagate through the network
|
||||
- each OGM carries a sequence number to ensure the information is up-to-date and avoid routing loops
|
||||
- nodes evaluates how frequently they receive OGMs to their neighbors to determine link quality
|
||||
- each node maintains a routing table with the best next-hop neighbor based on link quality
|
||||
|
||||
- fields inside OGM
|
||||
- originator address
|
||||
- sequence number
|
||||
- TTL (hop limit)
|
||||
- LQ (quality between the sender and the originator)
|
||||
- hop count
|
||||
|
||||
Asimmetry problem:
|
||||
If A can reach B well, B thinks it can reach A well too. But it may not be the case.
|
||||
To overcome the issue there is the Transmit Quality (TQ) algorithm.
|
||||
|
||||
B transmits RQ (receive quality) packet to A. A counts them to know the link quality.
|
||||
A knows the echo quality by counting the rebroadcasts of its own OGMs from its neighbors.
|
||||
Dividing echo quality by receiving quality, A can calculate the transmit quality.
|
||||
|
||||
**propagation**
|
||||
A when originates the OGMs, it sets TQ to 100%. The neighbor computes their own local link quality into the received TQ value and rebroadcast the packet.
|
||||
- $TQ = TQ_{incoming} * TQ_{local}$
|
||||
|
||||
![[Pasted image 20241017154152.png]]
|
||||
|
||||
### Geographic protocols
|
||||
the geographical position information of the nodes is utilized for forwarding decisions.
|
||||
Nodes knows their position by GPS.
|
||||
|
||||
Geographic routing schemes don't need the entire network information
|
||||
- no routing discovery
|
||||
- no routing tables
|
||||
- forward packet based on local information
|
||||
- less overhead, bandwidth and so energy consumption
|
||||
- for routing decisions a drone needs only the neighbors and destination position
|
||||
|
||||
Every node has coordinates of neighbors.
|
||||
|
||||
**Dead end problem**
|
||||
several techniques have been defined in sensor networks to recover from a dead end but they are often not applicable to dronets.
|
||||
|
||||
Geo routing is based on three main approaches
|
||||
|
||||
##### Greedy forwarding
|
||||
as stated before
|
||||
|
||||
#### Store-carry and forward
|
||||
When the network is intermittently connected, forwarder nodes do not have any a solution to find a relay node. Not possible to forward any data packet to a predefined node which is not in range. So the current node will carry the packet until it meet another node or the destination target itself.
|
||||
|
||||
#### Prediction
|
||||
based on geographical location, direction, and speed to predict the future position of a given node. They will predict the position of a next relay node.
|
||||
|
||||
### DGA algorithm
|
||||
![[Pasted image 20241017161724.png]]
|
||||
![[Pasted image 20241017161747.png]]
|
||||
|
||||
![[Pasted image 20241017161803.png]]
|
||||
|
55
Autonomous Networking/notes/6 Internet of Things.md
Normal file
|
@ -0,0 +1,55 @@
|
|||
IoT term is used to refer to
|
||||
- the resulting globlal network of connecting smart objects
|
||||
- the protocols ...
|
||||
- ...
|
||||
|
||||
required features:
|
||||
- devices hetereogeneity
|
||||
- scalability
|
||||
- data ubiquitous data exchange
|
||||
- energy-optimized solutions
|
||||
|
||||
|
||||
#### Backscattering
|
||||
- allows devices to run without battery
|
||||
- only available at research level for now
|
||||
- use radio frequency signals as power source
|
||||
- two types
|
||||
- ambient
|
||||
- rfid
|
||||
|
||||
##### Ambient backscattering
|
||||
- devices harvest power from signals available in the environment
|
||||
- they use existing RF signals without requiring any additional
|
||||
|
||||
- Performance drawbacks
|
||||
- low data rate (about 1kbps)
|
||||
- not suitable for real-time applications that continuously exchange data
|
||||
- availability of signals
|
||||
- signal may not be available indoor or not powerful enough
|
||||
|
||||
##### RFID backscattering
|
||||
|
||||
...
|
||||
|
||||
##### Battery free smart home
|
||||
- in a smart home there may be a lot of smart devices
|
||||
- if every one of them has a battery, it's not good for the environment
|
||||
- we can deploy an RFID reader with multiple antennas that covers all the different rooms
|
||||
|
||||
### Communication
|
||||
add scheme slide
|
||||
|
||||
RFID tags run EPC Global Standard
|
||||
- in a smart home we may want less bits dedicated to the tag id and more dedicated to the actual data
|
||||
- 8 bits for ID
|
||||
- 6 bits for data
|
||||
- 4 for CRC
|
||||
|
||||
### Infrastructure based wireless networks
|
||||
- base stations connected to wired backbone network
|
||||
- stations choses the closest base station
|
||||
- Limits
|
||||
- when no infrastructure is available
|
||||
- expensive/inconvenient to setup
|
||||
- when there is no time to set it up
|
122
Autonomous Networking/notes/6.1 RL.md
Normal file
|
@ -0,0 +1,122 @@
|
|||
Case study: battery-free smart home
|
||||
- each device produces a new data sample with a rate that depends on the environment and the user (continuously, event based / on demand...)
|
||||
- a device should only transmit when it has new data
|
||||
- but in backscattering-based networks they need to be queried by the receiver
|
||||
|
||||
In which order should the reader query tags?
|
||||
- assume prefixed timeslots
|
||||
- TDMA with random access performs poorly
|
||||
- TDMA with fixed assignment also does (wasted queries)
|
||||
- we want to query devices that have new data samples and avoid
|
||||
- data loss
|
||||
- redundant queries
|
||||
|
||||
Goal: design a mac protocol that adapts to all of this.
|
||||
One possibility is to use Reinforcement Learning
|
||||
|
||||
#### Reinforcement learning
|
||||
How can an intelligent agent learns to make a good sequence of decisions
|
||||
|
||||
- an agent can figure out how the world works by trying things and see what happens
|
||||
- is what people and animals do
|
||||
- we explore a computational approach to learning from interaction
|
||||
- goal-directed learning from interaction
|
||||
|
||||
RL is learning what to do, it presents two main characteristics:
|
||||
- trial and error search
|
||||
- delayed reward
|
||||
|
||||
- sensation, action and goal are the 3 main aspects of a reinforcement learning method
|
||||
- a learning agents must be able to
|
||||
- sense the state of the environment
|
||||
- take actions that affects the state
|
||||
|
||||
Difference from other ML
|
||||
- no supervisor
|
||||
- feedback may be delayed
|
||||
- time matters
|
||||
- agent action affects future decisions
|
||||
- ...
|
||||
- online learning
|
||||
|
||||
Learning online
|
||||
- learning while interacting with an ever changing world
|
||||
- we expect agents to get things wrong, to refine their understanding as they go
|
||||
- the world is not static, agents continuously encounter new situations
|
||||
|
||||
RL applications:
|
||||
- self driving cars
|
||||
- engineering
|
||||
- healthcare
|
||||
- news recommendation
|
||||
- ...
|
||||
|
||||
Rewards
|
||||
- a reward is a scalar feedback signal (a number)
|
||||
- reward Rt indicates how well the agent is doing at step t
|
||||
- the agent should maximize cumulative reward
|
||||
|
||||
RL based on the reward hypotesis
|
||||
all goals can be described by the maximization of expected cumulative rewards
|
||||
|
||||
communication in battery free environments
|
||||
- positive rewards if the queried device has new data
|
||||
- else negative
|
||||
|
||||
Challenge:
|
||||
- tradeoff between exploration and exploitation
|
||||
- to obtain a lot of reward a RL agent must prefer action that it tried and ...
|
||||
- ...
|
||||
|
||||
exploration vs exploitation dilemma:
|
||||
- comes from incomplete information: we need to gather enough information to make best overall decisions while keeping the risk under control
|
||||
- exploitation: we take advanced of the best option we know
|
||||
- exploration: test new decisions
|
||||
|
||||
### A general RL framework
|
||||
at each timestamp the agent
|
||||
- executes action
|
||||
- receives observation
|
||||
- receives scalar reward
|
||||
|
||||
the environment
|
||||
...
|
||||
|
||||
agent state: the view of the agent on the environment state, is a function of history
|
||||
- the function of the history is involved in taking the next decision
|
||||
- the state representation defines what happens next
|
||||
- ...
|
||||
|
||||
#### Inside the agent
|
||||
one or more of these components
|
||||
- **Policy:** agent's behavior function
|
||||
- defines what to do (behavior at a given time)
|
||||
- maps state to action
|
||||
- core of the RL agent
|
||||
- the policy is altered based on the reward
|
||||
- may be
|
||||
- deterministic: single function of the state
|
||||
- stochastic: specifying probabilities for each actions
|
||||
- reward changes probabilities
|
||||
- **Value function:**
|
||||
- specifies what's good in the long run
|
||||
- is a prediction of future reward
|
||||
- used to evaluate the goodness/badness of states
|
||||
- values are prediction of rewards
|
||||
- Vp(s) = Ep[yRt+1 + y^2Rt+2 ... | St = s]
|
||||
- **Model:**
|
||||
- predicts what the environment will do next
|
||||
- many problems are model free
|
||||
|
||||
back to the original problem:
|
||||
- n devices
|
||||
- each devices produces new data with rate_i
|
||||
- in which order should the reader query tags?
|
||||
- formulate as an RL problem
|
||||
- agent is the reder
|
||||
- one action per device (query)
|
||||
- rewards:
|
||||
- positive when querying a device with new data
|
||||
- negative if it has no data
|
||||
- what to do if the device has lost data?
|
||||
- state?
|
BIN
Autonomous Networking/slides/5 Drones.pdf
Normal file
BIN
Biometric Systems/images/Pasted image 20241017083255.png
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
Biometric Systems/images/Pasted image 20241017083506.png
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
Biometric Systems/images/Pasted image 20241017083943.png
Normal file
After Width: | Height: | Size: 44 KiB |
|
@ -121,3 +121,37 @@ vedere slide per integrare formule
|
|||
Un altro approccio è tramite il concetto di “margine”, che viene calcolato
|
||||
nel seguente modo: $M(t) = |FAR(t) − FRR(t)|$
|
||||
notare in corrispondenza dell'ERR, si ha $M(t) = 0$
|
||||
|
||||
### SRR
|
||||
C’è una netta differenza tra misurare la qualità di un’immagine in input rispetto
|
||||
al misurare l’affidabilità di una risposta da parte del sistema.
|
||||
Quest’ultimo approccio viene chiamato indice SRR, ovvero un valore nel range [0, 1] che fornisce una misura di quanto un sistema, in fase di identificazione, riesce a separare bene soggetti genuini da soggetti impostori sulla base di un singolo probe.
|
||||
Questo sistema utilizza una funzione φ che misura la quantità di “confusione” tra i possibili candidati.
|
||||
Definiamo due funzioni φ
|
||||
- relative distance
|
||||
- density ratio
|
||||
|
||||
Presa la lista data in output da una fase di identificazione, si guarda nell’intorno del risultato a rango 1. Se i soggetti a ranghi più bassi sono molto vicini, avremo una risposta poco affidabile, altrimenti se c’è una buona distanza avremo una risposta affidabile.
|
||||
|
||||
|
||||
Possibili esempi di φ sono:
|
||||
|
||||
- Relative distance
|
||||
![[Pasted image 20241017083255.png]]
|
||||
$$φ(p) = \frac{F (d(p, g1 )) − F (d(p, g2))}{F (d(p, g|G| ))}$$
|
||||
|
||||
|
||||
- Density ratio![[Pasted image 20241017083506.png]]
|
||||
- questa funzione è meno sensibile agli "outlier", ovvero template con distanza anomalamente molto alta dal probe, e infatti performa meglio della funzione precedente
|
||||
|
||||
Definiamo poi φk , come quel valore che minimizza gli errori di φ(p), ovvero
|
||||
i casi in cui, probe impostori hanno φ(pI) > φk e probe genuini hanno φ(pG) ≤
|
||||
φk . Valori φ(p) molti distanti da φk avranno un SRR maggiore, quindi:
|
||||
![[Pasted image 20241017083943.png]]
|
||||
|
||||
#### Template Updating
|
||||
Un altro modo per aumentare la qualità e l’affidabilità di un sistema è quella tramite l’aggiornamento dei template (evitando problemi come l’invecchiamento).
|
||||
Si può prendere un probe e aggiungerlo al gallery.
|
||||
Questa operazione per una maggiore sicurezza deve essere fatta in soli due possibili modi:
|
||||
- Supervisionata (supervised)
|
||||
- Semi-supervisionata (semi-supervised)
|
21
Biometric Systems/notes/4. Face recognition.md
Normal file
|
@ -0,0 +1,21 @@
|
|||
I fattori più importanti di un sistema biometrico sono l’accettabilità, l’affidabilità e l’accuratezza. l DNA, ad esempio, fornisce un’alta accuratezza e affidabilità ma una bassa accettabilità, in quanto il metodo di prelievo è sicuramente intrusivo. Le impronte digitali, invece, forniscono anch'esse buone prestazioni, ma possono spesso presentarsi in modo “parziale” e inoltre sono spesso associate ai ”criminali”. Il riconoscimento facciale invece è altamente accettato, in quanto siamo abituati a farci foto e a pubblicarle, ma l’accuratezza può diminuire drasticamente in casi non controllati.
|
||||
Possibili problemi relativi a essa sono:
|
||||
- Variazioni intrapersonali
|
||||
- Similarità interpersonali
|
||||
- PIE e A-PIE: posa, illuminazione ed espressione + invecchiamento
|
||||
- Facilmente camuffabile: makeup, chirurgia plastica, occhiali, etc..
|
||||
|
||||
Steps:
|
||||
- capture
|
||||
- localizzazione
|
||||
- cropping dei ROIs (regioni di interesse)
|
||||
- normalizzazione
|
||||
- feature extraction
|
||||
- costruzione del template
|
||||
|
||||
#### Localizzazione della faccia
|
||||
- Problema: data un'immagine o un video, rilevare la presenza di una o più facce e localizzarle nell'immagine
|
||||
- Requisiti: deve funzionare indipendentemente da posizione, orientamento, dimensione, espressione, soggetti nell'immagine, illuminazione e sfondo.
|
||||
|
||||
##### Ci si può nascondere?
|
||||
Secondo Adam Harvey, il punto chiave che i computer rilevano è il "nose bridge", o l'area tra gli occhi. Se si nascondono si può far credere al computer che non ci sia una faccia.
|