text
stringlengths 121
672k
|
|---|
#pragma once
#ifndef LINEAGE_HEURISTICS_GREEDY_LINEAGE_HXX
#define LINEAGE_HEURISTICS_GREEDY_LINEAGE_HXX
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <limits>
#include <map>
#include <numeric>
#include <queue>
#include <random>
#include <sstream>
#include <stack>
#include <utility>
#include <vector>
#include <andres/partition.hxx>
#include <levinkov/timer.hxx>
#include "heuristic-base.hxx"
#include "lineage/evaluate.hxx"
#include "lineage/problem-graph.hxx"
#include "lineage/solution.hxx"
namespace lineage {
namespace heuristics {
template <class EVA = std::vector<double>>
class DynamicLineage
{
/// Class adapted from
/// andres::graph::multicut::greedyAdditiveEdgeContraction
public:
DynamicLineage(Data& data)
: data_(data)
, vertices_(data.problemGraph.graph().numberOfVertices())
, partition_(vertices_.size())
, parents_(vertices_.size())
, children_(vertices_.size(), 0)
, sizes_(vertices_.size(), 1)
{
setup();
}
struct EdgeOperation
{
EdgeOperation(size_t _v0, size_t _v1, typename EVA::value_type _delta,
size_t _edition = 0)
{
v0 = _v0;
v1 = _v1;
delta = _delta;
edition = _edition;
}
size_t v0, v1;
size_t edition;
typename EVA::value_type delta;
inline bool operator<(const EdgeOperation& other) const
{
return delta >
other.delta; // inversed operation due to default-max order
// in queue.
}
};
inline void setup()
{
const auto& graph = data_.problemGraph.graph();
const auto& costs = data_.costs;
std::iota(parents_.begin(), parents_.end(), 0);
for (size_t edge = 0; edge < graph.numberOfEdges(); ++edge) {
const auto& v0 = graph.vertexOfEdge(edge, 0);
const auto& v1 = graph.vertexOfEdge(edge, 1);
updateEdgeWeight(v0, v1, costs[edge]);
}
objective_ = evaluate(data_, getSolution());
}
template <class T>
inline void initializeFromSolution(T&& edge_labels)
{
for (size_t edge = 0; edge < data_.problemGraph.graph().numberOfEdges();
++edge) {
const auto v0 = data_.problemGraph.graph().vertexOfEdge(edge, 0);
const auto v1 = data_.problemGraph.graph().vertexOfEdge(edge, 1);
if (edge_labels[edge] == 0) {
applyMove({ v0, v1, 0, 0 });
}
}
}
// takes over partition and tree from other.
template <class T>
inline void resetTo(const T& other)
{
this->vertices_ = other.vertices_;
this->partition_ = other.partition_;
this->children_ = other.children_;
this->parents_ = other.parents_;
this->sizes_ = other.sizes_;
this->objective_ = other.objective_;
}
inline bool edgeExists(size_t a, size_t b) const
{
return !vertices_[a].empty() &&
vertices_[a].find(b) != vertices_[a].end();
}
inline std::map<size_t, typename EVA::value_type> const&
getAdjacentVertices(size_t v) const
{
return vertices_[v];
}
inline typename EVA::value_type getEdgeWeight(size_t a, size_t b) const
{
return vertices_[a].at(b);
}
inline void removeVertex(size_t v)
{
for (auto& p : vertices_[v])
vertices_[p.first].erase(v);
vertices_[v].clear();
}
inline void updateEdgeWeight(size_t a, size_t b, typename EVA::value_type w)
{
vertices_[a][b] += w;
vertices_[b][a] += w;
}
inline void setParent(size_t child, size_t parent)
{
if (!edgeExists(child, parent)) {
throw std::runtime_error("Cannot set parent to non-adjacent node!");
}
if (hasParent(child).first && hasParent(child).second != parent) {
removeChild(hasParent(child).second);
}
parents_[findRep(child)] = findRep(parent);
addChild(parent);
}
inline size_t findParent(size_t v)
{
auto rep = findRep(v);
auto parent = findRep(parents_[rep]);
if (parents_[rep] != parent) { // update lookup.
parents_[rep] = parent;
}
if (parent == rep) {
return v;
} else {
return parent;
}
}
inline std::pair<bool, size_t> hasParent(size_t v)
{
auto parent = findParent(v);
bool found = (parent != v) ? true : false;
return std::make_pair(found, parent);
}
inline size_t children(size_t v)
{
auto rep = findRep(v);
return children_[rep];
}
inline bool hasChild(size_t v) { return children(v) > 0; }
inline void addChild(size_t v)
{
auto rep = findRep(v);
++children_[rep];
if (this->data_.enforceBifurcationConstraint && children_[rep] > 2)
throw std::runtime_error("has more than two children!");
}
inline void removeChild(size_t v)
{
auto rep = findRep(v);
if (children_[rep] == 0) {
throw std::runtime_error("Has no children to remove!");
}
--children_[rep];
}
inline size_t findRep(size_t v) { return partition_.find(v); }
inline void merge(const size_t v0, const size_t v1)
{
size_t stable_vertex = v0;
size_t merge_vertex = v1;
// merges are only allowed in-plane!
if (getFrameOfNode(stable_vertex) != getFrameOfNode(merge_vertex)) {
throw std::runtime_error(
"Not allowed to merge nodes across frames!");
}
if (getAdjacentVertices(stable_vertex).size() <
getAdjacentVertices(merge_vertex).size()) {
std::swap(stable_vertex, merge_vertex);
}
// if stable_vertex doesnt have a parent but merge_vertex does,
// then we need to set it as stable_vertex's parent.
{
auto stable_parent = hasParent(stable_vertex);
auto merge_parent = hasParent(merge_vertex);
if (!stable_parent.first && merge_parent.first) {
parents_[findRep(stable_vertex)] = merge_parent.second;
}
if (stable_parent.first && merge_parent.first) {
// check if parent loses a child through the merge.
if (stable_parent.second == merge_parent.second) {
removeChild(stable_parent.second);
} else {
throw std::runtime_error(
"Nodes with different parents cannot be merged!");
}
}
}
// keep previous config (since the representative of the partition may
// change).
auto numberOfChildren =
children(stable_vertex) + children(merge_vertex);
auto hadParentBefore = hasParent(stable_vertex);
auto newSize = sizes_[stable_vertex] + sizes_[merge_vertex];
partition_.merge(stable_vertex, merge_vertex);
// keep the edge indices consistent to representatives!
if (findRep(stable_vertex) == merge_vertex) {
std::swap(stable_vertex, merge_vertex);
}
#ifdef DEBUG
if (stable_vertex != findRep(stable_vertex) and
merge_vertex != findRep(stable_vertex)) {
throw std::runtime_error("Assumption violated!");
}
#endif
// update all edges.
for (const auto& p : getAdjacentVertices(merge_vertex)) {
const auto& other_vertex = p.first;
if (other_vertex == stable_vertex) {
continue;
}
updateEdgeWeight(stable_vertex, other_vertex, p.second);
}
removeVertex(merge_vertex);
// apply previous settings.
{
sizes_[stable_vertex] = newSize;
children_[stable_vertex] = numberOfChildren;
if (hadParentBefore.first &&
hadParentBefore.second != findParent(stable_vertex)) {
// dont use setParent to avoid increasing the children_ counter.
parents_[stable_vertex] = hadParentBefore.second;
}
}
}
size_t sizeOf(size_t v0) { return sizes_[findRep(v0)]; }
inline EdgeOperation proposeMove(const size_t v0, const size_t v1)
{
// increaseEdition(v0, v1); // invalidate old moves along (v0, v1)
if (!edgeExists(v0, v1)) {
throw std::runtime_error(
"Cannot propose move for an edge that does not exist!");
}
// first part of cost change through merge / setParent.
auto delta = -getEdgeWeight(v0, v1);
// potential merge.
if (getFrameOfNode(v0) == getFrameOfNode(v1)) {
const auto& p0 = hasParent(v0);
const auto& p1 = hasParent(v1);
// Cases that can be merged:
if (p0.first xor p1.first) { // one with parents.
if (getFrameOfNode(v0) != 0) {
const size_t partitionSize =
p0.first ? sizeOf(v1) : sizeOf(v0);
delta -= data_.costBirth * partitionSize;
}
} else if (!p0.first && !p1.first) { // no parents.
;
} else if (p0.first && p1.first &&
p0.second == p1.second) { // same parents.
;
} else { // the rest cant.
return {
v0, v1,
std::numeric_limits<typename EVA::value_type>::infinity(), 0
};
}
// cost adjustments for all nodes that have either first
// or second as a parent and share connections to the other.
for (const auto& other : getAdjacentVertices(v1)) {
const auto& v2 = other.first;
const auto& p2 = hasParent(v2);
if (edgeExists(v0, v2)) {
// is v2 a parent to v0 and not v1?
// (or vice versa)
if (p0.second == findRep(v2) and p1.second != findRep(v2)) {
delta -= getEdgeWeight(v1, v2);
} else if (p1.second == findRep(v2) and
p0.second != findRep(v2)) {
delta -= getEdgeWeight(v0, v2);
} else {
// is either v0 or v1 a parent of v2?
if (p2.first) {
if (p2.second ==
findRep(v0)) { // v0 is parent to v2
delta -= getEdgeWeight(v1, v2);
} else if (p2.second ==
findRep(v1)) { // v1 is parent to v2
delta -= getEdgeWeight(v0, v2);
}
}
}
}
}
// if one has no child, we gain a termination cost.
if (!hasChild(v0) xor !hasChild(v1)) {
if (getFrameOfNode(v0) !=
data_.problemGraph.numberOfFrames() - 1) {
const size_t partitionSize =
hasChild(v0) ? sizeOf(v1) : sizeOf(v0);
delta -= data_.costTermination * partitionSize;
}
} else if (this->data_.enforceBifurcationConstraint &&
children(v0) + children(v1) >= 3) {
delta =
std::numeric_limits<typename EVA::value_type>::infinity();
}
return { v0, v1, delta, 0 };
// Potential new parent.
} else {
size_t child = v0;
size_t parent = v1;
if (getFrameOfNode(child) < getFrameOfNode(parent)) {
std::swap(child, parent);
}
// If bifurcation constraint is active:
// dont allow more than two children!
if (this->data_.enforceBifurcationConstraint) {
if (children(parent) >= 2) {
return { v0, v1, std::numeric_limits<
typename EVA::value_type>::infinity(),
0 };
}
}
// is it a re-set?
{
auto parentOfChild = hasParent(child);
if (parentOfChild.first) {
if (parentOfChild.second == parent) {
return { child, parent,
std::numeric_limits<
typename EVA::value_type>::infinity(),
0 };
} else {
if (!edgeExists(child, parentOfChild.second)) {
throw std::runtime_error(
"Cannot have a parent with no connection!");
}
delta += getEdgeWeight(child, parentOfChild.second);
// would the current parent form a terminal?
if (children(parentOfChild.second) == 1) {
delta += sizeOf(parentOfChild.second) *
data_.costTermination;
}
}
// could we save birth costs?
} else if (getFrameOfNode(child) != 0) {
delta -= data_.costBirth * sizeOf(child);
}
}
if (!hasChild(parent) and
getFrameOfNode(parent) !=
data_.problemGraph.numberOfFrames() - 1) {
delta -= data_.costTermination * sizeOf(parent);
}
return { v0, v1, delta, 0 };
}
}
inline void applyMove(const EdgeOperation move)
{
const auto frame0 = this->getFrameOfNode(move.v0);
const auto frame1 = this->getFrameOfNode(move.v1);
if (frame0 == frame1) {
this->merge(move.v0, move.v1);
} else {
if (frame0 == frame1 - 1) {
this->setParent(move.v1, move.v0);
} else if (frame0 == frame1 + 1) {
this->setParent(move.v0, move.v1);
}
}
this->objective_ += move.delta;
}
inline size_t getFrameOfNode(const size_t vertex)
{
return data_.problemGraph.frameOfNode(vertex);
}
inline void logObj()
{
data_.timer.stop();
std::stringstream stream;
stream << data_.timer.get_elapsed_seconds() << " "
<< "inf" // bound
<< " " << objective_ << " "
<< "nan" // gap
<< " 0 0 0" // violated constraints;
<< " 0 0 0" // termination/birth/bifuraction constr.
<< " 0 0\n";
{
std::ofstream file(data_.solutionName + "-optimization-log.txt",
std::ofstream::out | std::ofstream::app);
file << stream.str();
file.close();
}
data_.timer.start();
}
inline lineage::Solution getSolution()
{
const auto& graph = data_.problemGraph.graph();
Solution solution;
solution.edge_labels.resize(graph.numberOfEdges(), 1);
for (size_t edge = 0; edge < graph.numberOfEdges(); ++edge) {
const auto& v0 = graph.vertexOfEdge(edge, 0);
const auto& v1 = graph.vertexOfEdge(edge, 1);
const auto& frame0 = data_.problemGraph.frameOfNode(v0);
const auto& frame1 = data_.problemGraph.frameOfNode(v1);
if (frame0 == frame1) {
if (findRep(v0) == findRep(v1)) {
solution.edge_labels[edge] = 0;
}
} else if (frame0 == frame1 - 1) { // v0 could be parent to v1
if (findRep(v0) == findParent(v1)) {
solution.edge_labels[edge] = 0;
}
} else if (frame0 == frame1 + 1) { // v1 could be parent to v0
if (findParent(v0) == findRep(v1)) {
solution.edge_labels[edge] = 0;
}
} else {
throw std::runtime_error(
"Edge spanning over more than two frames found!");
}
}
return solution;
}
inline typename EVA::value_type getObjective() const { return objective_; }
protected:
using Partition = andres::Partition<size_t>;
Data& data_;
std::vector<std::map<size_t, typename EVA::value_type>> vertices_;
Partition partition_;
std::vector<size_t> parents_;
std::vector<size_t> children_;
std::vector<size_t> sizes_;
typename EVA::value_type objective_{ .0 };
};
template <class EVA = std::vector<double>>
class GreedyLineageAgglomeration : public DynamicLineage<EVA>
{
public:
GreedyLineageAgglomeration(Data& data)
: DynamicLineage<EVA>(data)
, editions_(data.problemGraph.graph().numberOfVertices())
{
}
// dummy function to be compatible with standard interface.
void setMaxIter(const size_t maxIter) { ; }
inline void increaseEdition(const size_t v0, const size_t v1)
{
if (!this->edgeExists(v0, v1)) {
throw std::runtime_error(
"Cannot increase edition of an edge that does not exist!");
}
if (v0 > v1) {
++editions_[v1][v0];
} else {
++editions_[v0][v1];
}
}
inline size_t getEdition(const size_t v0, const size_t v1)
{
if (v0 > v1) {
return editions_[v1][v0];
} else {
return editions_[v0][v1];
}
}
inline void proposeMove(const size_t v0, const size_t v1)
{
increaseEdition(v0, v1); // invalidate old moves along (v0, v1)
auto move = DynamicLineage<EVA>::proposeMove(v0, v1);
if (move.delta <= .0) {
move.edition = getEdition(move.v0, move.v1);
queue_.push(move);
}
}
inline bool virtual applyBestOperationAndUpdate()
{
while (!queue_.empty()) {
const auto move = queue_.top();
queue_.pop();
if (move.delta >= 0) {
return false;
} else if (!this->edgeExists(move.v0, move.v1)) {
continue;
} else if (move.edition != getEdition(move.v0, move.v1)) {
continue;
}
this->applyMove(move);
std::vector<size_t> neighbours;
for (auto v : { move.v0, move.v1 }) {
for (auto w : this->vertices_[v]) {
neighbours.push_back(w.first);
}
}
for (auto v : neighbours) {
for (auto w : this->vertices_[v]) {
proposeMove(v, w.first);
}
}
return true;
}
return false;
}
inline void virtual optimize()
{
// initial queue of operations.
for (size_t v0 = 0; v0 < this->vertices_.size(); ++v0) {
for (const auto& other : this->vertices_[v0]) {
const auto v1 = other.first;
proposeMove(v0, v1);
}
}
size_t iter = 0;
while (applyBestOperationAndUpdate()) {
if (not silent_)
this->logObj();
++iter;
}
if (not silent_) {
this->data_.timer.stop();
std::cout << "[GLA] Stopping after " << iter << " moves in "
<< this->data_.timer.get_elapsed_seconds()
<< "s. Obj=" << this->objective_ << std::endl;
this->data_.timer.start();
}
}
inline void setSilent(const bool flag) { silent_ = flag; }
protected:
std::priority_queue<typename DynamicLineage<EVA>::EdgeOperation> queue_;
std::vector<std::map<size_t, size_t>> editions_;
bool silent_{ false };
};
} // namespace heuristics
} // namespace lineage
#endif
|
Teach your child the importance of good sportsmanship.Not too long ago, my 10-year-old daughter's indoor soccer team finished their game and lined up to do the traditional end-of-game walk with the other team. If your own child has ever played in a team sport, you likely have seen this walk a hundred times before. Win or lose, each member of the team is expected to essentially tell the other players they did well and good game. This is a classic way to end a game on a positive note and to exhibit good sportsmanship, win or lose.
The opposing team in this case, however, had a unique way of showing their good sportsmanship. They all licked their hands before holding them out for our own girls to "low-five" as they walked down the line. Our girls saw this, and they refused to touch the other girls' slimy, slobbery, germ-ridden hands. You may be wondering if our girls' team beat this other team. The truth is that they beat the other team pretty harshly, but there is no score that would justify the level of poor sportsmanship that the other team exhibited.
As a parent, I can only hope the parents or coach on the other team reprimanded their girls for this unsportsmanlike behavior. This is not the kind of behavior any parent would be proud to see in their own child. However, this is just one of many ways unsportsmanlike behavior is exhibited. From tears on the field to pushing, shoving, "trash talking" and more, there are many different behaviors that are associated with poor sportsmanship.
The fact is that good sportsmanship is a quality that can play a role in your child's ability to react to other situations throughout life. Competition may occur on the field, but it also plays a part in the college admission process, a run for a place on the school board, the job application process and so much more. Teaching your child how to be a good sport now can help him or her to handle wins and losses throughout life with grace. So how can you help your child build a healthy "win-or-lose" attitude?
A Positive Parental Role Model
No parent takes pride in seeing other players, either from their child's own team or on the opposing team, be better than their own child. Parents simply want their child to be the best. However, somewhere between the desire to see your kid to aim for the stars and the truth of reality is the fact that there always will be someone or some team that is better. As a parent, you can talk negatively about these better players or better teams, or you can talk positively about them. You can use these interactions with better competition to point out areas where your own child can improve and to teach your child to respect those with skills and talents that are worthy of respect. This is a great opportunity to teach your child to turn lemons into lemonade.
You Win Some, You Lose Some
Very few children really are the best at what they do. There is always someone who either is better now or who is working hard to be better in the near future. A team that was on top this season may not be the top team the next season. While you want your child to work hard and strive to win, it is unrealistic to expect a child or his or her team to win all of the time. Children will inevitably be disappointed after a loss. This is understandable and justified, especially if he or she has been working hard and did his or her personal best. As a parent, your response to a loss is every bit as important as your response to a win. The fact is that an entire team can play their best, and they may simply be out-matched. Teaching kids that losses do happen, even when they try their hardest, can help them to cope with their defeat. Show them that you are proud of their performance and effort at each game rather than letting the tally mark under the "W" column dictate this.
A Lesson Learned
The fact is that a child or a team simply will not improve very quickly when they are blowing out the competition on a regular basis. To be the best, you have to play the best. You have to be challenged by the best, and sometimes this means a loss will occur. Within each game, whether a win or loss, lies an opportunity for growth, development and improvement. After each game, regardless of the outcome, talk to your child about what he or she did well and what he or she thinks could have been done better. Rather than tell your child what you think, ask your child his or her personal opinion on the matter and what the coach said. Then, remind your child that these are areas that he or she can work on for the next game.
Nobody likes to lose, but challenge and loss are the motivators that make us all better. Whether on the field, in the workplace or any number of other environments, challenge and loss are vital to developing that ever-important trait that true winners in life have. That trait is perseverance.Content by Kim Daugherty .
|
Groundhogs, as a species, have a large range in size. There are the medium-sized rodents I grew up with, averaging around 4 kg, and groundhogs—like a certain Phil—that are probably more like 14 kg. This is the likely source of my earlier confusion, as that's a huge discrepancy in size. Evidently, it's all in the diet, much like humans.
Where I grew up, in rural Northern Minnesota, we called the groundhog a woodchuck; I thought that the groundhog was some fat cat, East Coast, liberal rodent. As it would turn out, they are actually one in the same creature—Marmota monax, a member of the squirrel family. Woodchucks spend a lot of their time in burrows. It is their safe haven from their many predators, and they are quick to flee to it at the first sign of danger. They will sometimes emit a loud whistle on their way to alert others in the area that something is awry. Groundhogs enjoy raiding our gardens and digging up sod, thereby destroying what we've spent countless hours toiling upon.
Look for groundhog signs. You might not even know there is a groundhog around until your garden has been devoured or your tractor damaged by a collapsed groundhog den. Things to look for are large nibble marks on your prized veggies, gnaw marks on the bark of young fruit trees, root vegetables pulled up (or their tops trimmed off), groundhog-sized holes (25–30 cm) anywhere near your garden, or mounds of dirt near said holes. If you see these signs, take action. Don't wait or it will be too late! If you know it will be a problem and do nothing, you can't blame the animal.
Set groundhog traps. This technique takes some skill as you need to be able to pick a spot in the path of the animal, camouflage it, and mask your strong human scent. Setting a spring trap, whether coil or long-spring, is usually just a matter of compressing the springs and setting a pin that keeps the jaws open into the pan or trigger. Make sure your trap is anchored securely with a stake. Check your traps often, and dispatch the animal quickly and humanely. Shooting them in the head or a hearty whack to the head with club will do the trick. If you can't deal with this, you have no business setting traps. Call a professional.
Guns kill groundhogs. I have never shot a groundhog. I rarely have had problems with them, and they move so damned fast it is difficult to get a shot off. If I had to, I know how I would do it. First, be sure it is legal in your area, and be sure to follow gun safety protocols. After that, it's just a matter of learning where your target is going to be comfortable and let their guard down. I would follow their tracks back to their den, find a spot downwind to sit with a clear shooting lane, and make sure nothing you shouldn't hit with a bullet is down range. Then, I would wait, my sights set on the den, until the groundhog stuck its head up—quick and easy.
Demolish the groundhog burrows. If you find a couple holes around your yard, they are likely the entrances to an elaborate tunnel maze carved into the earth beneath you. About all you can do, short of digging the whole mess up, is to try and fill it in from the top side. First, fill it with a bunch of rocks and then soil—make sure to really pack it in. This will make it difficult for the groundhog to reclaim its hole without a lot of work. You probably want to do this in tandem with other control methods such as trapping, shooting, or fumigating to prevent the groundhog from just digging a new hole.
Do some landscaping and build barriers. As with the control of many pests, it is advisable to keep a yard free of brush, undercover, and dead trees. These types of features are attractive to groundhogs as cover, and without it, they are less likely to want to spend time there. If you want to keep a groundhog out of an area, consider a partially buried fence. This will require a lot of work, but it is going to help a lot. Make sure it extends up at least a meter, and that it is buried somewhere around 30 cm deep. Angle the fencing outward 90 degrees when you bury it, and it will make digging under it a very daunting task for your furry friend.
Try using fumigants to kill groundhogs. What is nice about this product is that you can kill the animal and bury it all in one stroke. The best time to do this is in the spring when the mother will be in the den with her still helpless young. Also, the soil will likely be damp, which helps a lot. You should definitely follow the directions on the package, but the way they usually work is that you cover all but one exit, set off the smoke bomb, shove it down the hole, and quickly cover it up. Check back in a day or two to see if there is any sign of activity, and if so, do it again or consider a different control method. It is important that you don't do this if the hole is next to your house or if there is any risk of a fire.
Poisons are a last resort. I am not a fan of poisons because it is difficult to target what will eat said poison in the wild. Also, you are left with the issue of where the groundhog will die and how bad it will smell if it is somewhere under your house. Or, if it is outside somewhere, who will be affected by eating the dead animal? Where does it end? If you want to use poison, you're on your own.
Use live traps. This is a good option for those of you not too keen on killing things. Try jamming the door open and leaving bait inside for the taking a couple of times so they get used to it. Then, set it normally and you've got your groundhog (or a neighborhood cat). Now what? The relocation is just as important; you need to choose a place that is far away from other humans and can likely support a groundhog. Good luck.
Predator urine. The idea is simple: form a perimeter around an area you want to protect. If the groundhog doesn't recognize the smell as a natural predator, it is probably not going to work too well. Look for brands that have wolf and bobcat urine. Apply regularly, or as the manufacturer recommends. Remember, if it rains, the urine has probably washed away.
Repellents. Another popular method involves pepper-based repellents. These deter groundhogs by tasting horrible and burning their mucous membranes. You can do a perimeter with powdered cayenne pepper or just apply it to the things you want spared in your garden. Be sure to wash your vegetables off before using them (which you should be doing anyway).
|
#ifndef HTTP_PROTOCOL__H
#define HTTP_PROTOCOL__H
#include <vector>
#include "Markup.h"
#include "Poco/HashMap.h"
#include <algorithm>
#include "Poco/Mutex.h"
#include "Poco/ScopedLock.h"
#include "Poco/HashMap.h"
#include "VDeviceConfig.h"
#include "Markup.h"
using Poco::FastMutex;
using Poco::ScopedLock;
class CHttpDevInfoProtocol
{
public:
std::vector<int> m_FaultChannelVec;
std::vector<int> m_RealChannelVec;
std::string m_strDevId;
std::string Serialize()
{
CMarkup xml;
xml.SetDoc("<?xml version=\"1.0\" encoding=\"gb2312\"?>");
xml.AddElem("Message");
xml.IntoElem();
xml.AddElem("Dev");
xml.AddAttrib("Id",m_strDevId);
xml.IntoElem();
if (m_FaultChannelVec.size()!=0)
{
xml.AddElem("FaultChannel");
xml.IntoElem();
for (int j=0;j< m_FaultChannelVec.size();j++)
{
int nChannel =m_FaultChannelVec[j];
xml.AddElem("Channel",nChannel+1);
}
xml.OutOfElem();
}
//ptr->second.m_FaultChannelVec.clear();
if (m_RealChannelVec.size()!=0)
{
xml.AddElem("RealChannel");
xml.IntoElem();
for (int j=0;j< m_RealChannelVec.size();j++)
{
int nChannel = m_RealChannelVec[j];
xml.AddElem("Channel",nChannel+1);
}
xml.OutOfElem();
}
xml.OutOfElem();
xml.OutOfElem();
return xml.GetDoc();
}
};
class HttpServerInfoProtocol
{
public:
HttpServerInfoProtocol()
{
}
~HttpServerInfoProtocol()
{
}
void AddFaultChannel(std::string strDevId,int nChannel)
{
ScopedLock<FastMutex> lock(m_Mutex);
Poco::HashMap<std::string, CHttpDevInfoProtocol>::Iterator ptr = m_devVec.find(strDevId);
if (ptr==m_devVec.end())
{
CHttpDevInfoProtocol dev;
dev.m_strDevId=strDevId;
dev.m_FaultChannelVec.push_back(nChannel);
m_devVec[strDevId]=dev;
return;
}
std::vector<int>::iterator itrChannel = find(ptr->second.m_FaultChannelVec.begin(),ptr->second.m_FaultChannelVec.end(),nChannel);
if (itrChannel!=ptr->second.m_FaultChannelVec.end())
{
return;
}
ptr->second.m_FaultChannelVec.push_back(nChannel);
}
void AddRealChannel(std::string strDevId,int nChannel)
{
ScopedLock<FastMutex> lock(m_Mutex);
Poco::HashMap<std::string,CHttpDevInfoProtocol>::Iterator ptr = m_devVec.find(strDevId);
if (ptr==m_devVec.end())
{
CHttpDevInfoProtocol dev;
dev.m_strDevId=strDevId;
dev.m_RealChannelVec.push_back(nChannel);
m_devVec[strDevId]=dev;
return;
}
std::vector<int>::iterator itrChannel = find(ptr->second.m_RealChannelVec.begin(),ptr->second.m_RealChannelVec.end(),nChannel);
if (itrChannel!=ptr->second.m_RealChannelVec.end())
{
return;
}
ptr->second.m_RealChannelVec.push_back(nChannel);
}
void RemoveRealChannel(std::string strDevId,int nChannel)
{
ScopedLock<FastMutex> lock(m_Mutex);
Poco::HashMap<std::string,CHttpDevInfoProtocol>::Iterator ptr = m_devVec.find(strDevId);
if (ptr==m_devVec.end())
{
return;
}
std::vector<int>::iterator itrChannel = find(ptr->second.m_RealChannelVec.begin(),ptr->second.m_RealChannelVec.end(),nChannel);
if (itrChannel!=ptr->second.m_RealChannelVec.end())
{
ptr->second.m_RealChannelVec.erase(itrChannel);
if (ptr->second.m_RealChannelVec.size()==0&&ptr->second.m_FaultChannelVec.size()==0)
{
m_devVec.erase(ptr);
}
}
}
void RemoveFaultChannel()
{
for (Poco::HashMap<std::string,CHttpDevInfoProtocol>::Iterator ptr=m_devVec.begin();ptr!=m_devVec.end();ptr++)
{
ptr->second.m_FaultChannelVec.clear();
}
}
bool GetDevInfo(const std::string& strDevId,CHttpDevInfoProtocol& devInfo)
{
ScopedLock<FastMutex> lock(m_Mutex);
Poco::HashMap<std::string, CHttpDevInfoProtocol>::Iterator ptr = m_devVec.find(strDevId);
if (ptr==m_devVec.end())
{
return false;
}
devInfo = ptr->second;
return true;
}
public:
std::string Serialize()
{
CMarkup xml;
xml.SetDoc("<?xml version=\"1.0\" encoding=\"gb2312\"?>");
xml.AddElem("Message");
xml.IntoElem();
xml.AddElem("CPU");
xml.AddAttrib("CpuType",m_strCPUType);
xml.AddAttrib("UseRatio",m_strCPUUseRatio);
xml.AddElem("Memory");
xml.AddAttrib("Size",m_fMemorySize);
xml.AddAttrib("UseRatio",m_strMmoryUseRatio);
ScopedLock<FastMutex> lock(m_Mutex);
for (Poco::HashMap<std::string,CHttpDevInfoProtocol>::Iterator ptr = m_devVec.begin();ptr!=m_devVec.end();ptr++)
{
xml.AddElem("Dev");
xml.AddAttrib("Id",ptr->first);
xml.IntoElem();
if (ptr->second.m_FaultChannelVec.size()!=0)
{
xml.AddElem("FaultChannel");
xml.IntoElem();
for (int j=0;j< ptr->second.m_FaultChannelVec.size();j++)
{
int nChannel =ptr->second.m_FaultChannelVec[j];
xml.AddElem("Channel",nChannel+1);
}
xml.OutOfElem();
}
//ptr->second.m_FaultChannelVec.clear();
if ( ptr->second.m_RealChannelVec.size()!=0)
{
xml.AddElem("RealChannel");
xml.IntoElem();
for (int j=0;j< ptr->second.m_RealChannelVec.size();j++)
{
int nChannel = ptr->second.m_RealChannelVec[j];
xml.AddElem("Channel",nChannel+1);
}
xml.OutOfElem();
}
xml.OutOfElem();
}
return xml.GetDoc();
}
public:
std::string strDevId;
std::string m_strCPUType;
std::string m_strCPUUseRatio;
double m_fMemorySize;
std::string m_strMmoryUseRatio;
Poco::HashMap<std::string, CHttpDevInfoProtocol> m_devVec;
FastMutex m_Mutex;
};
class CHttpAddVPlatform
{
public:
CHttpAddVPlatform()
{
}
~CHttpAddVPlatform()
{
}
void UnSerialize(std::string& strMsg)
{
CMarkup xml;
xml.SetDoc(strMsg);
if(xml.FindElem("Message")==false)
{
return ;
}
xml.IntoElem();
if (xml.FindElem("VPlatforms")==false)
{
return ;
}
xml.IntoElem();
while(xml.FindElem("VirtualDevice"))
{
VDeviceConfig vplatform;
vplatform.m_sDeviceId=xml.GetAttrib("DeviceID");
vplatform.m_sExtendPara1=xml.GetAttrib("ExtendPara1");
vplatform.m_sPasswd=xml.GetAttrib("Passwd");
vplatform.m_sPlatformName=xml.GetAttrib("PlatformName");
vplatform.m_sPtzParam=xml.GetAttrib("PtzParam");
vplatform.m_sServerIP=xml.GetAttrib("IP");
vplatform.m_sUser=xml.GetAttrib("User");
vplatform.m_lPort=atol(xml.GetAttrib("Port").c_str());
vplatform.m_lSupportNM=atol(xml.GetAttrib("SupportNM").c_str());
vplatform.m_lSupportMutiStream=atol(xml.GetAttrib("SupportMutiStream").c_str());
vplatform.m_lPtzType=atol(xml.GetAttrib("PtzType").c_str());
vplatform.m_lMaxConnect=atol(xml.GetAttrib("DeviceMaxConnect").c_str());
m_VPlatformVec.push_back(vplatform);
}
}
public:
std::vector<VDeviceConfig> m_VPlatformVec;
};
class CHttpRemoveVPlatorm
{
public:
CHttpRemoveVPlatorm()
{
}
~CHttpRemoveVPlatorm()
{
}
void UnSerialize(std::string& strMsg)
{
CMarkup xml;
xml.SetDoc(strMsg);
//if(xml.FindElem("Message")==false)
//{
// return ;
//}
//xml.IntoElem();
if (xml.FindElem("VPlatforms")==false)
{
return ;
}
xml.IntoElem();
while(xml.FindElem("VirtualDevice"))
{
std::string strDevId=xml.GetAttrib("DeviceID");
m_VPlatformVec.push_back(strDevId);
}
}
public:
std::vector<std::string> m_VPlatformVec;
};
#endif
|
In my next few blogs, I will provide an overview of Voltage Source Converter (VSC) HVDC technology and its suitability for Smart Grids operation and control discussed.
VSC HVDC is based upon transistor technology and was developed in the 1990′s. The switching element is the Insulated Gate Bipolar Thyristor (IGBT), which can be switched on and off by applying a suitable voltage to the gate (steering electrode). Because of the more switching operations, and the nature of the semiconductor devices itself, the converter losses are generally higher than those of HVDC classic converters.
VSC HVDC is commonly used with underground or submarine cables with a transfer capacity in the range of 10 – 1000 MW, and is suitable to serve as a connection to a wind farm or supply a remote load. VSC HVDC technology has very fast steer and control functionality and is suitable for meshed networks. It is characterised by compactness of the converter stations, due to the reduced need for AC harmonic filters and reactive power compensation. Power flow reversal in VSC systems is achieved by reversal of the current, whereas in HVDC classic systems the voltage polarity has to change. An important consequence of this voltage source behavior is the ability to use cheaper and easier to install XLPE cables, instead of the mass-impregnated cables that are needed for HVDC classic.
Currently, only twelve VSC HVDC projects are in service. A few examples include: Estlink, which connects Estonia to Finland (350 MW), and BorWin1, connecting an offshore wind farm to Northern Germany (400 MW). Both are equipped with ±150 kV submarine cables, and the Trans Bay project in California (400 MW) that consists of 90 km ±200 kV submarine cable.
Most projects have submarine cable, but some projects include long lengths of underground cable, such as Murraylink (220 MW, 177 km underground cable), and Nord E.On 1 (400 MW, 75km underground cable).
The 500 MW East-West interconnector between Ireland and Great Britain, operating at ±200 kV, is scheduled to go into service in 2012. A 2000 MW 65 km cable interconnector ±320kV as part of the Trans European Network—between Spain and France—is scheduled for commissioning in 2013, and will represent the highest power rating for a VSC HVDC system installed at this time.
Make sure to check back next Tuesday for my next blog on the comparison between HVDC classic and VSC HVDC.
By: Peter Vaessen
|
The Operations Layer defines the operational processes and procedures necessary to deliver Information Technology (IT) as a Service. This layer leverages IT Service Management concepts that can be found in prevailing best practices such as ITIL and MOF. The main focus of the Operations Layer is to execute the business requirements defined at the Service Delivery Layer. Cloud-like service attributes cannot be achieved through technology alone and require a high level of IT Service Management maturity.
Change Management process is responsible for controlling the life cycle of all changes. The primary objective of Change Management is to eliminate or at least minimize disruption while desired changes are made to services. Change Management focuses on understanding and balancing the cost and risk of making the change versus the benefit of the change to either the business or the service. Driving predictability and minimizing human involvement are the core principles for achieving a mature Service Management process and ensuring changes can be made without impacting the perception of continuous availability.
Standard (Automated) Change
Non-Standard (Mechanized) Change
It is important to note that a record of all changes must be maintained, including Standard Changes that have been automated. The automated process for Standard Changes should include the creation and population of the change record per standard policy in order to make sure auditability.
Automating changes also enables other key principles such as:
The Service Asset and Configuration Management process is responsible for maintaining information on the assets, components, and infrastructure needed to provide a service. Critical configuration data for each component, and its relationship to other components, must be accurately captured and maintained. This configuration data should include past and current states and future-state forecasts, and be easily available to those who need it. Mature Service Asset and Configuration Management processes are necessary for achieving predictability.
A virtualized infrastructure adds complexity to the management of Configuration Items (CIs) due to the transient nature of the relationship between guests and hosts in the infrastructure. How is the relationship between CIs maintained in an environment that is potentially changing very frequently?
A service comprises software, platform, and infrastructure layers. Each layer provides a level of abstraction that is dependent on the layer beneath it. This abstraction hides the implementation and composition details of the layer. Access to the layer is provided through an interface and as long as the fabric is available, the actual physical location of a hosted VM is irrelevant. To provide Infrastructure as a Service (IaaS), the configuration and relationship of the components within the fabric must be understood, whereas the details of the configuration within the VMs hosted by the fabric are irrelevant.
The Configuration Management System (CMS) will need to be partitioned, at a minimum, into physical and logical CI layers. Two Configuration Management Databases (CMDBs) might be used; one to manage the physical CIs of the fabric (facilities, network, storage, hardware, and hypervisor) and the other to manage the logical CIs (everything else). The CMS can be further partitioned by layer, with separate management of the infrastructure, platform, and software layers. The benefits and trade-offs of each approach are summarized below.
CMS Partitioned by Layer
CMS Partitioned into Physical and Logical
Table 2: Configuration Management System Options
Partitioning logical and physical CI information allows for greater stability within the CMS, because CIs will need to be changed less frequently. This means less effort will need to be expended to accurately maintain the information. During normal operations, mapping a VM to its physical host is irrelevant. If historical records of a VM’s location are needed, (for example, for auditing or Root Cause Analysis) they can be traced through change logs.
The physical or fabric CMDB will need to include a mapping of fault domains, upgrade domains, and Live Migration domains. The relationship of these patterns to the infrastructure CIs will provide critical information to the Fabric Management System.
The Release and Deployment Management processes are responsible for making sure that approved changes to a service can be built, tested, and deployed to meet specifications with minimal disruption to the service and production environment. Where Change Management is based on the approval mechanism (determining what will be changed and why), Release and Deployment Management will determine how those changes will be implemented.
The primary focus of Release and Deployment Management is to protect the production environment. The less variation is found in the environment, the greater the level of predictability – and, therefore, the lower the risk of causing harm when new elements are introduced. The concept of homogenization of physical infrastructure is derived from this predictability principle. If the physical infrastructure is completely homogenized, there is much greater predictability in the release and deployment process.
While complete homogenization is the ideal, it may not be achievable in the real world. Homogenization is a continuum. The closer an environment gets to complete homogeneity, the more predictable it becomes and the fewer the risks. Full homogeneity means not only that identical hardware models are used, but all hardware configuration is identical as well. When complete hardware homogeneity is not feasible, strive for configuration homogeneity wherever possible.
Figure 2: Homogenization Continuum
The Scale Unit concept drives predictability in Capacity Planning and agility in the release and deployment of physical infrastructure. The hardware specifications and configurations have been pre-defined and tested, allowing for a more rapid deployment cycle than in a traditional data center. Similarly, known quantities of resources are added to the data center when the Capacity Plan is triggered. However, when the Scale Unit itself must change (for example, when a vendor retires a hardware model), a new risk is introduced to the private cloud.
There will likely be a period where both n and n-1 versions of the Scale Unit exist in the infrastructure, but steps can be taken to minimize the risk this creates. Work with hardware vendors to understand the life cycle of their products and coordinate changes from multiple vendors to minimize iterations of the Scale Unit change. Also, upgrading to the new version of the Scale Unit should take place one Fault Domain at a time wherever possible. This will make sure that if an incident occurs with the new version, it can be isolated to a single Fault Domain.
Homogenization of the physical infrastructure means consistency and predictability for the VMs regardless of which physical host they reside on. This concept can be extended beyond the production environment. The fabric can be partitioned into development, test, and pre-production environments as well. Eliminating variability between environments enables developers to more easily optimize applications for a private cloud and gives testers more confidence that the results reflect the realities of production, which in turn should greatly improve testing efficiency.
The virtualized infrastructure enables workloads to be transferred more easily between environments. All VMs should be built from a common set of component templates housed in a library, which is used across all environments. This shared library includes templates for all components approved for production, such as VM images, the gold OS image, server role templates, and platform templates. These component templates are downloaded from the shared library and become the building blocks of the development environment. From development, these components are packaged together to create a test candidate package (in the form of a virtual hard disk (VHD) that is uploaded to the library. This test candidate package can then be deployed by booting the VHD in the test environment. When testing is complete, the package can again be uploaded to the library as a release candidate package – for deployment into the pre-production environment, and ultimately into the production environment.
Since workloads are deployed by booting a VM from a VHD, the Release Management process occurs very quickly through the transfer of VHD packages to different environments. This also allows for rapid rollback should the deployment fail; the current release can be deleted and the VM can be booted off the previous VHD.
Virtualization and the use of standard VM templates allow us to rethink software updates and patch management. As there is minimal variation in the production environment and all services in production are built with a common set of component templates, patches need not be applied in production. Instead, they should be applied to the templates in the shared library. Any services in production using that template will require a new version release. The release package is then rebuilt, tested, and redeployed, as shown below.
Figure 3: The Release Process
This may seem counter-intuitive for a critical patch scenario, such as when an exploitable vulnerability is exposed. But with virtualization technologies and automated test scripts, a new version of a service can be built, tested, and deployed quite rapidly.
Variation can also be reduced through standardized, automated test scenarios. While not every test scenario can or should be automated, tests that are automated will improve predictability and facilitate more rapid test and deployment timelines. Test scenarios that are common for all applications, or the ones that might be shared by certain application patterns, are key candidates for automation. These automated test scripts may be required for all release candidates prior to deployment and would make sure further reduction in variation in the production environment.
Knowledge Management is the process of gathering, analyzing, storing, and sharing knowledge and information within an organization. The goal of Knowledge Management is to make sure that the right people have access to the information they need to maintain a private cloud. As operational knowledge expands and matures, the ability to intelligently automate operational tasks improves, providing for an increasingly dynamic environment.
An immature approach to Knowledge Management costs organizations in terms of slower, less-efficient problem solving. Every problem or new situation that arises becomes a crisis that must be solved. A few people may have the prior experience to resolve the problem quickly and calmly, but their knowledge is not shared. Immature knowledge management creates greater stress for the operations staff and usually results in user dissatisfaction with frequent and lengthy unexpected outages. Mature Knowledge Management processes are necessary for achieving a service provider’s approach to delivering infrastructure. Past knowledge and experience is documented, communicated, and readily available when needed. Operating teams are no longer crisis-driven as service-impacting events grow less frequent and are quickly resolves when they do occur.
When designing a private cloud, development of the Health Model will drive much of the information needed for Knowledge Management. The Health Model defines the ideal states for each infrastructure component and the daily, weekly, monthly, and as-needed tasks required to maintain this state. The Health Model also defines unhealthy states for each infrastructure component and actions to be taken to restore their health. This information will form the foundation of the Knowledge Management database.
Aligning the Health Model with alerts allows these alerts to contain links to the Knowledge Management database describing the specific steps to be taken in response to the alert. This will help drive predictability as a consistent, proven set of actions will be taken in response to each alert.
The final step toward achieving a private cloud is the automation of responses to each alert as defined in the Knowledge Management database. Once these responses are proven successful, they should be automated to the fullest extent possible. It is important to note, though, that automating responses to alerts does not make them invisible and forgotten. Even when alerts generate a fully automated response they must be captured in the Service Management system. If the alert indicates the need for a change, the change record should be logged. Similarly, if the alert is in response to an incident, an incident record should be created. These automated workflows must be reviewed regularly by Operations staff to make sure the automated action achieves the expected result. Finally, as the environment changes over time, or as new knowledge is gained, the Knowledge Management database must be updated along with the automated workflows that are based on that knowledge.
The goal of Incident Management is to resolve events that are impacting, or threaten to impact, services as quickly as possible with minimal disruption. The goal of Problem Management is to identify and resolve root causes of incidents that have occurred as well as identify and prevent or minimize the impact of incidents that may occur.
Pinpointing the root cause of an incident can become more challenging when workloads are abstracted from the infrastructure and their physical location changes frequently. Additionally, incident response teams may be unfamiliar with virtualization technologies (at least initially) which could also lead to delays in incident resolution. Finally, applications may have neither a robust Health Model nor expose all of the health information required for a proactive response. All of this may lead to an increase in reactive (user initiated) incidents which will likely increase the Mean-Time-to-Restore-Service (MTRS) and customer dissatisfaction.
This may seem to go against the resiliency principle, but note that virtualization alone will not achieve the desired resiliency unless accompanied by highly mature IT Service Management (ITSM) maturity and a robust automated health monitoring system.
The drive for resiliency requires a different approach to troubleshooting incidents. Extensive troubleshooting of incidents in production negatively impacts resiliency. Therefore, if an incident cannot be quickly resolved, the service can be rolled back to the previous version, as described under Release and Deployment. Further troubleshooting can be done in a test environment without impacting the production environment. Troubleshooting in the production environment may be limited to moving the service to different hosts (ruling out infrastructure as the cause) and rebooting the VMs. If these steps do not resolve the issue, the rollback scenario could be initiated.
Minimizing human involvement in incident management is critical for achieving resiliency. The troubleshooting scenarios described earlier could be automated, which will allow for identification and possible resolution of the root much more quickly than non-automated processes. But automation may mask the root cause of the incident. Careful consideration should be given to determining which troubleshooting steps should be automated and which require human analysis.
Human Analysis of Troubleshooting
If a compute resource fails, it is no longer necessary to treat the failure as an incident that must be fixed immediately. It may be more efficient and cost effective to treat the failure as part of the decay of the Resource Pool. Rather than treat a failed server as an incident that requires immediate resolution, treat it as a natural candidate for replacement on a regular maintenance schedule, or when the Resource Pool reaches a certain threshold of decay. Each organization must balance cost, efficiency, and risk as it determines an acceptable decay threshold – and choose among these courses of action:
The benefits and trade-off of each of the options are listed below:
Option 4 is the least desirable, as it does not take advantage of the resiliency and cost reduction benefits of a private cloud. A well-planned Resource Pool and Reserve Capacity strategy will account for Resource Decay.
Option 1 is the most recommended approach. A predictable maintenance schedule allows for better procurement planning and can help avoid conflicts with other maintenance activities, such as software upgrades. Again, a well-planned Resource Pool and Reserve Capacity strategy will account for Resource Decay and minimize the risk of exceeding critical thresholds before the scheduled maintenance.
Option 3 will likely be the only option for self-contained Scale Unit scenarios, as the container must be replaced as a single Scale Unit when the decay threshold is reached.
The goal of Request Fulfillment is to manage requests for service from users. Users should have a clear understanding of the process they need to initiate to request service and IT should have a consistent approach for managing these requests.
Much like any service provider, IT should clearly define the types of requests available to users in the service catalog. The service catalog should include an SLA on when the request will be completed, as well as the cost of fulfilling the request, if any.
The types of requests available and their associated costs should reflect the actual cost of completing the request and this cost should be easily understood. For example, if a user requests an additional VM, its daily cost should be noted on the request form, which should also be exposed to the organization or person responsible for paying the bill.
It is relatively easy to see the need for adding resources, but more difficult to see when a resource is no longer needed. A process for identifying and removing unused VMs should be put into place. There are a number of strategies to do this, depending on the needs of a given organization, such as:
The benefits and trade-offs of each of these approaches are detailed below:
Option 4 affords the greatest flexibility, while still working to minimize server sprawl. When a user requests a VM, they have the option of setting an expiration date with no reminder (for example, if they know they will only be using the workload for one week). They could set an expiration deadline with a reminder (for example, a reminder that the VM will expire after 90 days unless they wish to renew). Lastly, the user may request no expiration date if they expect the workload will always be needed. If the last option is chosen, it is likely that underutilized VMs will still be monitored and owners notified.
Finally, self-provisioning should be considered, if appropriate, when evaluating request fulfillment options to drive towards minimal human involvement. Self-provisioning allows great agility and user empowerment, but it can also introduce risks depending on the nature of the environment in which these VMs are introduced.
For an enterprise organization, the risk of bypassing formal build, stabilize, and deploy processes may or may not outweigh the agility benefits gained from the self-provisioning option. Without strong governance to make sure each VM has an end-of-life strategy, the fabric may become congested with VM server sprawl. The pros and cons of self-provisioning options are listed in the next diagram:
The primary decision point for determining whether to use self-provisioning is the nature of the environment. Allowing developers to self-provision into the development environment greatly facilitates agile development, and allows the enterprise to maintain release management controls as these workloads are moved out of development and into test and production environments.
A user-led community environment isolated from enterprise mission-critical applications may also be a good candidate for self-provisioning. As long as user actions are isolated and cannot impact mission critical applications, the agility and user empowerment may justify the risk of giving up control of release management. Again, it is essential that in such a scenario, expiration timers are included to prevent server sprawl.
The goal of Access Management is to make sure authorized users have access to the services they need while preventing access by unauthorized users. Access Management is the implementation of security policies defined by Information Security Management at the Service Delivery Layer.
Maintaining access for authorized users is critical for achieving the perception of continuous availability. Besides allowing access, Access Management defines users who are allowed to use, configure, or administer objects in the Management Layer. From a provider’s perspective, it answers questions like:
From a consumer’s perspective, it answers questions such as:
Access Management is implemented at several levels and can include physical barriers to systems such as requiring access smartcards at the data center, or virtual barriers such as network and Virtual Local Area Network (VLAN) separation, firewalling, and access to storage and applications.
Taking a service provider’s approach to Access Management will also make sure that resource segmentation and multi-tenancy is addressed.
Resource Pools may need to be segmented to address security concerns around confidentiality, integrity, and availability. Some tenants may not wish to share infrastructure resources to keep their environment isolated from others. Access Management of shared infrastructure requires logical access control mechanisms such as encryption, access control rights, user groupings, and permissions. Dedicated infrastructure also relies on physical access control mechanisms, where infrastructure is not physically connected, but is effectively isolated through a firewall or other mechanisms.
The goal of systems administration is to make sure that the daily, weekly, monthly, and as-needed tasks required to keep a system healthy are being performed.
Regularly performing ongoing systems administration tasks is critical for achieving predictability. As the organization matures and the Knowledge Management database becomes more robust and increasingly automated, systems administration tasks is no longer part of the job role function. It is important to keep this in mind as an organization moves to a private cloud. Staff once responsible for systems administration should refocus on automation and scripting skills – and on monitoring the fabric to identify patterns that indicate possibilities for ongoing improvement of existing automated workflows.
|
#include <bits/stdc++.h>
using namespace std;
int shiftI = 0, shiftQ = 0;
void Swap (int * a, int * b) {
int temp = *a;
*a = *b;
*b = temp;
}
int Partition (vector<int> &Arr, int start, int end) {
int i, j, p = Arr[end];
for (i = start - 1, j = start; j < end; j++) {
if (Arr[j] < p) {
i += 1;
Swap(&Arr[i], &Arr[j]);
shiftQ += 1;
}
}
Swap (&Arr[i + 1], &Arr[end]);
shiftQ += 1;
return (i + 1);
}
void QuickSort (vector<int> &Arr, int start, int end) {
if (start < end) {
int pivot = Partition (Arr, start, end);
QuickSort (Arr, start, pivot - 1);
QuickSort (Arr, pivot + 1, end);
}
}
void InsertionSort (vector<int> &Arr) {
int len = Arr.size();
for (int i = 1; i < len; i++) {
int Val = Arr[i], j = i - 1;
while ((j >= 0) && (Arr[j] > Val)) {
Arr[j + 1] = Arr[j];
j -= 1;
shiftI += 1;
}
Arr[j + 1] = Val;
}
}
int main (void) {
vector <int> Arr1, Arr2;
int Size;
cin >> Size;
for (int i = 0; i < Size; i++) {
int temp;
cin >> temp;
Arr1.push_back(temp);
Arr2.push_back(temp);
}
InsertionSort (Arr1);
QuickSort (Arr2, 0, Size - 1);
cout << shiftI - shiftQ << endl;
return 0;
}
|
Published May 2008
Properly located digital signage in high traffic areas on school campuses provides students and faculty with a convenient resource to stay up to date about the latest school news and activities.
Signage in Education
By Anthony D. Coppedge
Technology gets high marks.
Digital media and communications have come to play a vital role in people’s everyday lives, and a visit to the local K-12 school, college or university campus quickly illustrates the many ways in which individuals rely on audio and visual technologies each day. The shift from analog media to digital, represented by milestones ranging from the replacement of the Walkman by the MP3 player to the DTV transition currently enabling broadcasts beyond the home to mobile devices, has redefined the options that larger institutions, including those in our educational system, have for sharing information across the campus and facilities.
Flexible And Efficient
Digital signage, in particular, is proving to be a flexible and efficient tool for delivering specific and up-to-date information within the educational environment. As a high-resolution, high-impact medium, it lives up to the now-widespread expectation that visual media be crisp and clear, displayed on a large screen. Although the appeal of implementing digital signage networks does stem, in part, from plummeting screen prices and sophisticated content delivery systems, what’s equally or more important is that digital signage provides valuable information to the people who need it, when and where they need it. On school campuses—whether preschool, elementary, high school or post-secondary institutions—it does so effectively, for both educational purposes and for the security and safety of staff, administration and the student body as a whole.
School campuses have begun leveraging digital signage technology in addition to, or in place of, printed material, such as course schedules, content and location; time-sensitive school news and updates; maps and directions; welcome messages for visitors and applicants; and event schedules. Digital signage simplifies creation and delivery of multiple channels of targeted content to different displays on the network. Although a display in the college admissions office might provide prospective students with a glimpse into student life, for example, another display outside a lab or seminar room might present the courses or lectures scheduled for that space throughout the day.
This model of a distribution concept illustrates a school distributing educational content over a public TV broadcast network.
At the K-12 level, digital signage makes it easy to deliver information such as team or band practice schedules, or to post the cafeteria menu and give students information encouraging sound food choices. Digital signage in the preschool and daycare setting makes it easy for teachers and caregivers to share targeted educational programming with their classes.
Among the most striking benefits of communicating through digital signage is the quality of the pictures and the flexibility with which images, text and video can be combined in one or more windows to convey information. Studies have shown that dynamic signage is noticed significantly more often than are static displays and, furthermore, that viewers are more likely to remember that dynamic content.
Though most regularly updated digital signage content tends to be text-based, digital signage networks also have the capacity to enable the live campus-wide broadcast of key events: a speech by a visiting dignitary, the basketball team’s first trip to the state or national tournament, or even the proceedings at commencement and graduation. When time is short, it’s impractical to gather the entire student body in one place or there simply isn’t the time or means to deliver the live message in any other way.
The ability to share critical information to the entire school community, clearly and without delay, has made digital signage valuable as a tool for emergency response and communications. Parents, administrators, teachers and students today can’t help but be concerned about the school’s ability to respond quickly and effectively to a dangerous situation, whether the threat be from another person, an environmental hazard, an unpredictable weather system or some other menace.
Digital signage screens installed across a school campus can be updated immediately to warn students and staff of the danger, and to provide unambiguous instructions for seeking shelter or safety: where to go and what to do.
Although early digital signage systems relied on IP-based networks and point-to-point connections between a player and each display, current solutions operate on far less costly and much more scalable platforms. Broadcast-based digital signage models allow content to be distributed remotely from a single data source via transport media, such as digital television broadcast, satellite, broadband and WiMAX.
The staff member responsible for maintaining the digital signage network can use popular content creation toolsets to populate both dynamic and static displays. This content is uploaded to a server that, in turn, feeds the digital signage network via broadcast, much like datacasting, to the receive site for playout. By slotting specific content into predefined display templates, each section with its own playlist, the administrator can schedule display of multiple elements simultaneously or a single-window static, video or animated display.
The playlist enables delivery of the correct elements to the targeted display both at the scheduled time and in the appropriate layout. In networks with multicast-enabled routers, the administrator can schedule unique content for displays in different locations.
In the case of delivering emergency preparedness or response information across a campus, content can be created through the same back-office software used for day-to-day digital signage displays. Within the broadcast-based model, three components ensure the smooth delivery of content to each display.
A transmission component serves as a content hub, allocating bandwidth and inserting content into the broadcast stream based on the schedule dictated by the network’s content management component. Content is encapsulated into IP packets that, in turn, are encapsulated into MPEG2 packets for delivery.
Generic content distribution model for digital signage solution.
The content management component of the digital signage network provides for organization and scheduling of content, as well as targeting of that content to specific receivers. Flexibility in managing the digital signage system enables distribution of the same emergency message across all receivers and associated displays, or the delivery of select messages to particular displays within the larger network.
With tight control over the message being distributed, school administrators can immediately provide the information that students and staff in different parts of the campus need to maintain the safest possible environment. Receivers can be set to confirm receipt of content, in turn assuring administrative and emergency personnel that their communications are, in fact, being conveyed as intended. On the receiving end, the third component of the system, content, is extracted from the digital broadcast stream and fed to the display screen.
The relationships that many colleges and universities share with public TV stations provide an excellent opportunity for establishing a digital signage network. Today, the deployed base of broadcast-based content distribution systems in public TV stations is capable of reaching 50% of the US population. These stations’ DTV bandwidth is used not only for television programming, but also to generate new revenues and aggressively support public charters by providing efficient delivery of multimedia content for education, homeland security and other public services.
Educational institutions affiliated with such broadcasters already have the technology, and much of the necessary infrastructure, in place to launch a digital signage network. In taking advantage of the public broadcaster’s content delivery system, the college or university also can tap into the station’s existing links with area emergency response agencies.
As digital signage technology continues to evolve, educational institutions will be able to extend both urgent alerts and more mundane daily communications over text and email messaging. Smart content distribution systems will push consistent information to screens of all sizes, providing messages not only to displays, but also to the cell phones and PDAs so ubiquitous in US schools.
The continued evolution of MPH technology will support this enhancement in delivery of messages directly to each student. MPH in-band mobile DTV technology leverages ATSC DTV broadcasts to enable extensions of digital signage and broadcast content directly to personal devices, whether stationary or on the move. Rather than rely on numerous unrelated systems, such as ringing bells, written memos and intercom announcements, schools can unify messaging and its delivery, in turn reducing the redundancy involved in maintaining communications with the student body.
An effective digital signage network provides day-to-day benefits for an elementary school, high school, college or university while providing invaluable emergency communications capabilities that increasingly are considered a necessity, irrespective of whether they get put to the test. The selection of an appropriate digital signage model depends, of course, on the needs of the organization.
Educational institutions share many of the same concerns held by counterparts in the corporate world, and key among those concerns is the simple matter of getting long-term value and use out of their technical investments. However, before even addressing the type of content the school wishes to create and distribute, the systems integrator, consultant or other AV and media professional should work with the eventual operators of the digital signage network to identify and map out the existing workflow. Once the system designer, integrator or installer has evaluated how staff currently work in an emergency to distribute information, he then can adjust established processes and adapt them to the digital signage model.
The administrative staff who will be expected to update or import schedules to the digital signage system will have a much lower threshold of acceptance for a workflow that is completely unfamiliar or at odds with all their previous experience. An intuitive, easy-to-use system is more likely to be used in an emergency if it has become familiar in everyday practice.
Turnkey digital signage solutions provide end-to-end functionality without forcing users and integrators to work with multiple systems and interfaces. The key in selecting a vendor lies in ensuring that they share the same vision and are moving in the same direction as the end user.
In addition to providing ease of use, digital signage solutions for the education market also must provide a high level of built-in security, preventing abuse or misuse by hackers, or by those without the knowledge, experience or authority to distribute content over the network. Because the network is a conduit for emergency messaging, its integrity must be protected. So, the installer must not only identify the number of screens to be used and where, but also determine who gets access to the system and how that access remains secure.
Scalable systems that can grow in number of displays or accommodate infrastructure improvements and distribution of higher-bandwidth content will provide the long-term utility that makes the investment worthwhile. By going into the project with an understanding of existing infrastructure, such as cabling, firewalls, etc., and the client’s goals, the professional is equipped to advise the customer as to the necessity, options and costs for enhancing or improving on that infrastructure. As with any other significant deployment of AV technology, the installation of a digital signage network also requires knowledge of the site, local building codes, the availability of power and so forth.
Ralph Bachofen, senior director of Product Management and Marketing, Triveni Digital, has more than 15 years of experience in voice and multimedia over Internet Protocol (IP), telecommunications and the semiconductor business.
The infrastructure requirements of a school in deploying a digital signage network will vary, depending on the type of content being delivered through the system. HD and streaming content clearly are bandwidth hogs, whereas tickers and other text-based messages put a low demand on bandwidth. Most facilities today are equipped with Gigabit Ethernet networks that can handle
the demands of live video delivery and lighter content.
However, even bandwidth-heavy video can be delivered by less robust networks, as larger clips can be “trickled” over time to the site, as long as storage on the unit is adequate. There is no set standard for the bandwidth required, just as there is no single way to use a digital signage solution. It all depends on how the system will be used, and that’s an important detail to address up front.
Most digital signage solutions feature built-in content-creation tools and accept content from third-party applications, as well. Staff members who oversee the system thus can use familiar applications to create up-to-date content for the school’s digital signage network. This continuity in workflow adds to the value and efficiency of the network in everyday use, reducing the administrative burden while serving as a safeguard in the event of an emergency.
For educational institutions, the enormous potential of the digital signage network can open new doors for communicating with students and staff, but only if it is put to use effectively. Comprehensive digital signage solutions offer ease of use to administration, deliver clear and useful messaging on ordinary days and during crises, and feature robust design and underlying technology that supports continual use well into the future.
|
//
// Created by mrityunjay kumar on 2019-07-02.
//
#pragma once
#include <vector>
#include <algorithm>
#include <functional>
#include <memory>
#include <map>
#include <unordered_map>
#include <type_traits>
#include <unistd.h>
#include <iostream>
#include <sstream>
#include <fstream>
#include <iostream>
#include <cstdarg>
#include <string>
//#ifdef TEST_REPLAY_SPEED_FEAT
// #ifndef LOG_FOLDER
// #define LOG_FOLDER "/home/jay/prev_logs/"
// #endif
//#elif TEST_WRITE_SPEED_FEAT
// #ifndef LOG_FOLDER
// #define LOG_FOLDER "/home/jay/prev_logs/"
// #endif
//#else
// #ifndef LOG_FOLDER
// #define LOG_FOLDER "/home/AzureUser/prev_logs/"
// #endif
//#endif
#ifndef LOG_FOLDER
#define LOG_FOLDER "/home/AzureUser/prev_logs/"
#endif
#ifdef LOG_FOLDER
#define LOG_FOLDER_NAME LOG_FOLDER
#endif
#define INFO_SUFFIX "info/"
#define R_SUFFIX "replay_logs/"
#define W_SUFFIX "write_logs/"
#define STR3(STR1) STR1 INFO_SUFFIX
#define STR2(STR1) STR1 R_SUFFIX
#define STR4(STR1) STR1 W_SUFFIX
#define INFO_LOG_FOLDER_NAME STR3(LOG_FOLDER)
#define REPLAY_SAVER_PATH STR2(LOG_FOLDER)
#define WRITE_STATS_SAVER_PATH STR4(LOG_FOLDER)
struct LOGGING_CONST {
static std::string WHOLE_LOG_STRING;
static std::string WHOLE_INFO_STRING;
static void setlog(int threads, int runTime){
std::string SUFFIX_STRING = "GenLogThd"+std::to_string(threads)+".Time."+std::to_string(runTime)+"/";
WHOLE_LOG_STRING = LOG_FOLDER + SUFFIX_STRING;
WHOLE_INFO_STRING = LOG_FOLDER + SUFFIX_STRING + "info/";
}
};
bool util_mkpath( const std::string& path );
/**
* Singleton Logger Class.
*/
class OutputDataSerializer
{
public:
uint64_t UUID();
/**
* Logs a message
* @param _oss message to be logged.
*/
// void Log(std::ostream& _oss);
/**
* Logs a message
* @param sMessage message to be logged.
*/
void TimedLog (const char* sMessage,size_t len,size_t count);
void Log(const char* sMessage,size_t len);
void LLog(const char* sMessage,size_t len);
void Log(const std::string& sMessage);
void Log(const std::vector<long long unsigned int>* str);
void BloopLog();
void MapWriter(const std::map<std::string,long>& fileMap);
void MapWriter(const std::unordered_map<std::string,long>& fileMap);
/**
* Variable Length Logger function
* @param format string for the message to be logged.
*/
// void Log(const char * format);
/**
* << overloaded function to Logs a message
* @param sMessage message to be logged.
*/
OutputDataSerializer& operator<<(const std::string& sMessage);
/**
* Funtion to create the instance of logger class.
* @return singleton object of Clogger class..
*/
static OutputDataSerializer* GetLogger(int thread_id,const std::string& parent_folder_name);
public:
static void flush_all();
static void close_all();
OutputDataSerializer(int thread_id, const std::string& parent_folder_name,bool bloop=false);
~OutputDataSerializer(){
if(awesome_ostream.is_open()){
// gmtx.lock ();
std::cout << "Flushing for thread_id:" << thread_id << " Skip Close";
awesome_ostream << std::flush;
// awesome_ostream.close ();
// gmtx.unlock ();
}
}
void flush(bool bloop= false);
// static uint64_t _UUID;
private:
size_t wpos{0};
bool is_bloop_act;
int thread_id;
// size_t nBufSize;
// char *buffer;
static size_t BUFSIZE;
static constexpr uint64_t increment_value = uint64_t(0x1000);
/**
* Default constructor for the Logger class.
*/
OutputDataSerializer(int thread_id);
/**
* copy constructor for the Logger class.
*/
OutputDataSerializer(const OutputDataSerializer&){}; // copy constructor is private
/**
* assignment operator for the Logger class.
*/
OutputDataSerializer& operator=(const OutputDataSerializer&){ return *this; }; // assignment operator is private
/**
* Singleton logger class object pointer.
**/
static std::unordered_map<int, OutputDataSerializer*> m_pThis;
//static OutputDataSerializer* m_pThis;
/**
* Log file stream object.
**/
std::ofstream awesome_ostream;
static std::unordered_map<int,std::ofstream> m_FileStream;
static std::unordered_map<int,std::stringstream> m_Logfile;
static std::unordered_map<int,std::ofstream> m_FileStreamTimedLog;
static std::unordered_map<int,std::FILE *> m_FileStream3;
};
|
How We Found the Missing Memristor
The memristor--the functional equivalent of a synapse--could revolutionize circuit design
Image: Bryan Christie Design
THINKING MACHINE This artist's conception of a memristor shows a stack of multiple crossbar arrays, the fundamental structure of R. Stanley Williams's device. Because memristors behave functionally like synapses, replacing a few transistors in a circuit with memristors could lead to analog circuits that can think like a human brain.
It’s time to stop shrinking. Moore’s Law, the semiconductor industry’s obsession with the shrinking of transistors and their commensurate steady doubling on a chip about every two years, has been the source of a 50-year technical and economic revolution. Whether this scaling paradigm lasts for five more years or 15, it will eventually come to an end. The emphasis in electronics design will have to shift to devices that are not just increasingly infinitesimal but increasingly capable.
Earlier this year, I and my colleagues at Hewlett-Packard Labs, in Palo Alto, Calif., surprised the electronics community with a fascinating candidate for such a device: the memristor. It had been theorized nearly 40 years ago, but because no one had managed to build one, it had long since become an esoteric curiosity. That all changed on 1 May, when my group published the details of the memristor in Nature.
Combined with transistors in a hybrid chip, memristors could radically improve the performance of digital circuits without shrinking transistors. Using transistors more efficiently could in turn give us another decade, at least, of Moore’s Law performance improvement, without requiring the costly and increasingly difficult doublings of transistor density on chips. In the end, memristors might even become the cornerstone of new analog circuits that compute using an architecture much like that of the brain.
For nearly 150 years, the known fundamental passive circuit elements were limited to the capacitor (discovered in 1745), the resistor (1827), and the inductor (1831). Then, in a brilliant but underappreciated 1971 paper, Leon Chua, a professor of electrical engineering at the University of California, Berkeley, predicted the existence of a fourth fundamental device, which he called a memristor. He proved that memristor behavior could not be duplicated by any circuit built using only the other three elements, which is why the memristor is truly fundamental.
Memristor is a contraction of ”memory resistor,” because that is exactly its function: to remember its history. A memristor is a two-terminal device whose resistance depends on the magnitude and polarity of the voltage applied to it and the length of time that voltage has been applied. When you turn off the voltage, the memristor remembers its most recent resistance until the next time you turn it on, whether that happens a day later or a year later.
Think of a resistor as a pipe through which water flows. The water is electric charge. The resistor’s obstruction of the flow of charge is comparable to the diameter of the pipe: the narrower the pipe, the greater the resistance. For the history of circuit design, resistors have had a fixed pipe diameter. But a memristor is a pipe that changes diameter with the amount and direction of water that flows through it. If water flows through this pipe in one direction, it expands (becoming less resistive). But send the water in the opposite direction and the pipe shrinks (becoming more resistive). Further, the memristor remembers its diameter when water last went through. Turn off the flow and the diameter of the pipe ”freezes” until the water is turned back on.
That freezing property suits memristors brilliantly for computer memory. The ability to indefinitely store resistance values means that a memristor can be used as a nonvolatile memory. That might not sound like very much, but go ahead and pop the battery out of your laptop, right now—no saving, no quitting, nothing. You’d lose your work, of course. But if your laptop were built using a memory based on memristors, when you popped the battery back in, your screen would return to life with everything exactly as you left it: no lengthy reboot, no half-dozen auto-recovered files.
But the memristor’s potential goes far beyond instant-on computers to embrace one of the grandest technology challenges: mimicking the functions of a brain. Within a decade, memristors could let us emulate, instead of merely simulate, networks of neurons and synapses. Many research groups have been working toward a brain in silico: IBM’s Blue Brain project, Howard Hughes Medical Institute’s Janelia Farm, and Harvard’s Center for Brain Science are just three. However, even a mouse brain simulation in real time involves solving an astronomical number of coupled partial differential equations. A digital computer capable of coping with this staggering workload would need to be the size of a small city, and powering it would require several dedicated nuclear power plants.
Memristors can be made extremely small, and they function like synapses. Using them, we will be able to build analog electronic circuits that could fit in a shoebox and function according to the same physical principles as a brain.
A hybrid circuit—containing many connected memristors and transistors—could help us research actual brain function and disorders. Such a circuit might even lead to machines that can recognize patterns the way humans can, in those critical ways computers can’t—for example, picking a particular face out of a crowd even if it has changed significantly since our last memory of it.
The story of the memristor is truly one for the history books. When Leon Chua, now an IEEE Fellow, wrote his seminal paper predicting the memristor, he was a newly minted and rapidly rising professor at UC Berkeley. Chua had been fighting for years against what he considered the arbitrary restriction of electronic circuit theory to linear systems. He was convinced that nonlinear electronics had much more potential than the linear circuits that dominate electronics technology to this day.
Chua discovered a missing link in the pairwise mathematical equations that relate the four circuit quantities—charge, current, voltage, and magnetic flux—to one another. These can be related in six ways. Two are connected through the basic physical laws of electricity and magnetism, and three are related by the known circuit elements: resistors connect voltage and current, inductors connect flux and current, and capacitors connect voltage and charge. But one equation is missing from this group: the relationship between charge moving through a circuit and the magnetic flux surrounded by that circuit—or more subtly, a mathematical doppelgänger defined by Faraday’s Law as the time integral of the voltage across the circuit. This distinction is the crux of a raging Internet debate about the legitimacy of our memristor [see sidebar, ”Resistance to Memristance ”].
Chua’s memristor was a purely mathematical construct that had more than one physical realization. What does that mean? Consider a battery and a transformer. Both provide identical voltages—for example, 12 volts of direct current—but they do so by entirely different mechanisms: the battery by a chemical reaction going on inside the cell and the transformer by taking a 110â¿¿V ac input, stepping that down to 12 V ac, and then transforming that into 12 V dc. The end result is mathematically identical—both will run an electric shaver or a cellphone, but the physical source of that 12 V is completely different.
Conceptually, it was easy to grasp how electric charge could couple to magnetic flux, but there was no obvious physical interaction between charge and the integral over the voltage.
Chua demonstrated mathematically that his hypothetical device would provide a relationship between flux and charge similar to what a nonlinear resistor provides between voltage and current. In practice, that would mean the device’s resistance would vary according to the amount of charge that passed through it. And it would remember that resistance value even after the current was turned off.
He also noticed something else—that this behavior reminded him of the way synapses function in a brain.
Even before Chua had his eureka moment, however, many researchers were reporting what they called ”anomalous” current-voltage behavior in the micrometer-scale devices they had built out of unconventional materials, like polymers and metal oxides. But the idiosyncrasies were usually ascribed to some mystery electrochemical reaction, electrical breakdown, or other spurious phenomenon attributed to the high voltages that researchers were applying to their devices.
As it turns out, a great many of these reports were unrecognized examples of memristance. After Chua theorized the memristor out of the mathematical ether, it took another 35 years for us to intentionally build the device at HP Labs, and we only really understood the device about two years ago. So what took us so long?
It’s all about scale. We now know that memristance is an intrinsic property of any electronic circuit. Its existence could have been deduced by Gustav Kirchhoff or by James Clerk Maxwell, if either had considered nonlinear circuits in the 1800s. But the scales at which electronic devices have been built for most of the past two centuries have prevented experimental observation of the effect. It turns out that the influence of memristance obeys an inverse square law: memristance is a million times as important at the nanometer scale as it is at the micrometer scale, and it’s essentially unobservable at the millimeter scale and larger. As we build smaller and smaller devices, memristance is becoming more noticeable and in some cases dominant. That’s what accounts for all those strange results researchers have described. Memristance has been hidden in plain sight all along. But in spite of all the clues, our finding the memristor was completely serendipitous.
In 1995, I was recruited to HP Labs to start up a fundamental research group that had been proposed by David Packard. He decided that the company had become large enough to dedicate a research group to long-term projects that would be protected from the immediate needs of the business units. Packard had an altruistic vision that HP should ”return knowledge to the well of fundamental science from which HP had been withdrawing for so long.” At the same time, he understood that long-term research could be the strategic basis for technologies and inventions that would directly benefit HP in the future. HP gave me a budget and four researchers. But beyond the comment that ”molecular-scale electronics” would be interesting and that we should try to have something useful in about 10 years, I was given carte blanche to pursue any topic we wanted. We decided to take on Moore’s Law.
At the time, the dot-com bubble was still rapidly inflating its way toward a resounding pop, and the existing semiconductor road map didn’t extend past 2010. The critical feature size for the transistors on an integrated circuit was 350 nanometers; we had a long way to go before atomic sizes would become a limitation. And yet, the eventual end of Moore’s Law was obvious. Someday semiconductor researchers would have to confront physics-based limits to their relentless descent into the infinitesimal, if for no other reason than that a transistor cannot be smaller than an atom. (Today the smallest components of transistors on integrated circuits are roughly 45 nm wide, or about 220 silicon atoms.)
That’s when we started to hang out with Phil Kuekes, the creative force behind the Teramac (tera-operation-per-second multiarchitecture computer)—an experimental supercomputer built at HP Labs primarily from defective parts, just to show it could be done. He gave us the idea to build an architecture that would work even if a substantial number of the individual devices in the circuit were dead on arrival. We didn’t know what those devices would be, but our goal was electronics that would keep improving even after the devices got so small that defective ones would become common. We ate a lot of pizza washed down with appropriate amounts of beer and speculated about what this mystery nanodevice would be.
We were designing something that wouldn’t even be relevant for another 10 to 15 years. It was possible that by then devices would have shrunk down to the molecular scale envisioned by David Packard or perhaps even be molecules. We could think of no better way to anticipate this than by mimicking the Teramac at the nanoscale. We decided that the simplest abstraction of the Teramac architecture was the crossbar, which has since become the de facto standard for nanoscale circuits because of its simplicity, adaptability, and redundancy.
The crossbar is an array of perpendicular wires. Anywhere two wires cross, they are connected by a switch. To connect a horizontal wire to a vertical wire at any point on the grid, you must close the switch between them. Our idea was to open and close these switches by applying voltages to the ends of the wires. Note that a crossbar array is basically a storage system, with an open switch representing a zero and a closed switch representing a one. You read the data by probing the switch with a small voltage.
Like everything else at the nanoscale, the switches and wires of a crossbar are bound to be plagued by at least some nonfunctional components. These components will be only a few atoms wide, and the second law of thermodynamics ensures that we will not be able to completely specify the position of every atom. However, a crossbar architecture builds in redundancy by allowing you to route around any parts of the circuit that don’t work. Because of their simplicity, crossbar arrays have a much higher density of switches than a comparable integrated circuit based on transistors.
But implementing such a storage system was easier said than done. Many research groups were working on such a cross-point memory—and had been since the 1950s. Even after 40 years of research, they had no product on the market. Still, that didn’t stop them from trying. That’s because the potential for a truly nanoscale crossbar memory is staggering; picture carrying around the entire Library of Congress on a thumb drive.
One of the major impediments for prior crossbar memory research was the small off-to-on resistance ratio of the switches (40 years of research had never produced anything surpassing a factor of 2 or 3). By comparison, modern transistors have an off-to-on resistance ratio of 10 000 to 1. We calculated that to get a high-performance memory, we had to make switches with a resistance ratio of at least 1000 to 1. In other words, in its off state, a switch had to be 1000 times as resistive to the flow of current as it was in its on state. What mechanism could possibly give a nanometer-scale device a three-orders-of-magnitude resistance ratio?
We found the answer in scanning tunneling microscopy (STM), an area of research I had been pursuing for a decade. A tunneling microscope generates atomic-resolution images by scanning a very sharp needle across a surface and measuring the electric current that flows between the atoms at the tip of the needle and the surface the needle is probing. The general rule of thumb in STM is that moving that tip 0.1 nm closer to a surface increases the tunneling current by one order of magnitude.
We needed some similar mechanism by which we could change the effective spacing between two wires in our crossbar by 0.3 nm. If we could do that, we would have the 1000:1 electrical switching ratio we needed.
Our constraints were getting ridiculous. Where would we find a material that could change its physical dimensions like that? That is how we found ourselves in the realm of molecular electronics.
Conceptually, our device was like a tiny sandwich. Two platinum electrodes (the intersecting wires of the crossbar junction) functioned as the ”bread” on either end of the device. We oxidized the surface of the bottom platinum wire to make an extremely thin layer of platinum dioxide, which is highly conducting. Next, we assembled a dense film, only one molecule thick, of specially designed switching molecules. Over this ”monolayer” we deposited a 2- to 3-nm layer of titanium metal, which bonds strongly to the molecules and was intended to glue them together. The final layer was the top platinum electrode.
The molecules were supposed to be the actual switches. We built an enormous number of these devices, experimenting with a wide variety of exotic molecules and configurations, including rotaxanes, special switching molecules designed by James Heath and Fraser Stoddart at the University of California, Los Angeles. The rotaxane is like a bead on a string, and with the right voltage, the bead slides from one end of the string to the other, causing the electrical resistance of the molecule to rise or fall, depending on the direction it moves. Heath and Stoddart’s devices used silicon electrodes, and they worked, but not well enough for technological applications: the off-to-on resistance ratio was only a factor of 10, the switching was slow, and the devices tended to switch themselves off after 15 minutes.
Our platinum devices yielded results that were nothing less than frustrating. When a switch worked, it was spectacular: our off-to-on resistance ratios shot past the 1000 mark, the devices switched too fast for us to even measure, and having switched, the device’s resistance state remained stable for years (we still have some early devices we test every now and then, and we have never seen a significant change in resistance). But our fantastic results were inconsistent. Worse yet, the success or failure of a device never seemed to depend on the same thing.
We had no physical model for how these devices worked. Instead of rational engineering, we were reduced to performing huge numbers of Edisonian experiments, varying one parameter at a time and attempting to hold all the rest constant. Even our switching molecules were betraying us; it seemed like we could use anything at all. In our desperation, we even turned to long-chain fatty acids—essentially soap—as the molecules in our devices. There’s nothing in soap that should switch, and yet some of the soap devices switched phenomenally. We also made control devices with no molecule monolayers at all. None of them switched.
We were frustrated and burned out. Here we were, in late 2002, six years into our research. We had something that worked, but we couldn’t figure out why, we couldn’t model it, and we sure couldn’t engineer it. That’s when Greg Snider, who had worked with Kuekes on the Teramac, brought me the Chua memristor paper from the September 1971 IEEE Transactions on Circuits Theory. ”I don’t know what you guys are building,” he told me, ”but this is what I want.”
To this day, I have no idea how Greg happened to come across that paper. Few people had read it, fewer had understood it, and fewer still had cited it. At that point, the paper was 31 years old and apparently headed for the proverbial dustbin of history. I wish I could say I took one look and yelled, ”Eureka!” But in fact, the paper sat on my desk for months before I even tried to read it. When I did study it, I found the concepts and the equations unfamiliar and hard to follow. But I kept at it because something had caught my eye, as it had Greg’s: Chua had included a graph that looked suspiciously similar to the experimental data we were collecting.
The graph described the current-voltage (I-V) characteristics that Chua had plotted for his memristor. Chua had called them ”pinched-hysteresis loops”; we called our I-V characteristics ”bow ties.” A pinched hysteresis loop looks like a diagonal infinity symbol with the center at the zero axis, when plotted on a graph of current against voltage. The voltage is first increased from zero to a positive maximum value, then decreased to a minimum negative value and finally returned to zero. The bow ties on our graphs were nearly identical [see graphic, ”Bow Ties”].
That’s not all. The total change in the resistance we had measured in our devices also depended on how long we applied the voltage: the longer we applied a positive voltage, the lower the resistance until it reached a minimum value. And the longer we applied a negative voltage, the higher the resistance became until it reached a maximum limiting value. When we stopped applying the voltage, whatever resistance characterized the device was frozen in place, until we reset it by once again applying a voltage. The loop in the I-V curve is called hysteresis, and this behavior is startlingly similar to how synapses operate: synaptic connections between neurons can be made stronger or weaker depending on the polarity, strength, and length of a chemical or electrical signal. That’s not the kind of behavior you find in today’s circuits.
Looking at Chua’s graphs was maddening. We now had a big clue that memristance had something to do with our switches. But how? Why should our molecular junctions have anything to do with the relationship between charge and magnetic flux? I couldn’t make the connection.
Two years went by. Every once in a while I would idly pick up Chua’s paper, read it, and each time I understood the concepts a little more. But our experiments were still pretty much trial and error. The best we could do was to make a lot of devices and find the ones that worked.
But our frustration wasn’t for nothing: by 2004, we had figured out how to do a little surgery on our little sandwiches. We built a gadget that ripped the tiny devices open so that we could peer inside them and do some forensics. When we pried them apart, the little sandwiches separated at their weakest point: the molecule layer. For the first time, we could get a good look at what was going on inside. We were in for a shock.
What we had was not what we had built. Recall that we had built a sandwich with two platinum electrodes as the bread and filled with three layers: the platinum dioxide, the monolayer film of switching molecules, and the film of titanium.
But that’s not what we found. Under the molecular layer, instead of platinum dioxide, there was only pure platinum. Above the molecular layer, instead of titanium, we found an unexpected and unusual layer of titanium dioxide. The titanium had sucked the oxygen right out of the platinum dioxide! The oxygen atoms had somehow migrated through the molecules and been consumed by the titanium. This was especially surprising because the switching molecules had not been significantly perturbed by this event—they were intact and well ordered, which convinced us that they must be doing something important in the device.
The chemical structure of our devices was not at all what we had thought it was. The titanium dioxide—a stable compound found in sunscreen and white paint—was not just regular titanium dioxide. It had split itself up into two chemically different layers. Adjacent to the molecules, the oxide was stoichiometric TiO 2 , meaning the ratio of oxygen to titanium was perfect, exactly 2 to 1. But closer to the top platinum electrode, the titanium dioxide was missing a tiny amount of its oxygen, between 2 and 3 percent. We called this oxygen-deficient titanium dioxide TiO 2-x , where x is about 0.05.
Because of this misunderstanding, we had been performing the experiment backward. Every time I had tried to create a switching model, I had reversed the switching polarity. In other words, I had predicted that a positive voltage would switch the device off and a negative voltage would switch it on. In fact, exactly the opposite was true.
It was time to get to know titanium dioxide a lot better. They say three weeks in the lab will save you a day in the library every time. In August of 2006 I did a literature search and found about 300 relevant papers on titanium dioxide. I saw that each of the many different communities researching titanium dioxide had its own way of describing the compound. By the end of the month, the pieces had fallen into place. I finally knew how our device worked. I knew why we had a memristor.
The exotic molecule monolayer in the middle of our sandwich had nothing to do with the actual switching. Instead, what it did was control the flow of oxygen from the platinum dioxide into the titanium to produce the fairly uniform layers of TiO 2 and TiO 2-x . The key to the switching was this bilayer of the two different titanium dioxide species [see diagram, ”How Memristance Works”]. The TiO 2 is electrically insulating (actually a semiconductor), but the TiO 2-x is conductive, because its oxygen vacancies are donors of electrons, which makes the vacancies themselves positively charged. The vacancies can be thought of like bubbles in a glass of beer, except that they don’t pop—they can be pushed up and down at will in the titanium dioxide material because they are electrically charged.
Now I was able to predict the switching polarity of the device. If a positive voltage is applied to the top electrode of the device, it will repel the (also positive) oxygen vacancies in the TiO 2-x layer down into the pure TiO 2 layer. That turns the TiO 2 layer into TiO 2-x and makes it conductive, thus turning the device on. A negative voltage has the opposite effect: the vacancies are attracted upward and back out of the TiO 2 , and thus the thickness of the TiO 2 layer increases and the device turns off. This switching polarity is what we had been seeing for years but had been unable to explain.
On 20 August 2006, I solved the two most important equations of my career—one equation detailing the relationship between current and voltage for this equivalent circuit, and another equation describing how the application of the voltage causes the vacancies to move—thereby writing down, for the first time, an equation for memristance in terms of the physical properties of a material. This provided a unique insight. Memristance arises in a semiconductor when both electrons and charged dopants are forced to move simultaneously by applying a voltage to the system. The memristance did not actually involve magnetism in this case; the integral over the voltage reflected how far the dopants had moved and thus how much the resistance of the device had changed.
We finally had a model we could use to engineer our switches, which we had by now positively identified as memristors. Now we could use all the theoretical machinery Chua had created to help us design new circuits with our devices.
Triumphantly, I showed the group my results and immediately declared that we had to take the molecule monolayers out of our devices. Skeptical after years of false starts and failed hypotheses, my team reminded me that we had run control samples without molecule layers for every device we had ever made and that those devices had never switched. And getting the recipe right turned out to be tricky indeed. We needed to find the exact amounts of titanium and oxygen to get the two layers to do their respective jobs. By that point we were all getting impatient. In fact, it took so long to get the first working device that in my discouragement I nearly decided to put the molecule layers back in.
A month later, it worked. We not only had working devices, but we were also able to improve and change their characteristics at will.
But here is the real triumph. The resistance of these devices stayed constant whether we turned off the voltage or just read their states (interrogating them with a voltage so small it left the resistance unchanged). The oxygen vacancies didn’t roam around; they remained absolutely immobile until we again applied a positive or negative voltage. That’s memristance: the devices remembered their current history. We had coaxed Chua’s mythical memristor off the page and into being.
Emulating the behavior of a single memristor, Chua showed, requires a circuit with at least 15 transistors and other passive elements. The implications are extraordinary: just imagine how many kinds of circuits could be supercharged by replacing a handful of transistors with one single memristor.
The most obvious benefit is to memories. In its initial state, a crossbar memory has only open switches, and no information is stored. But once you start closing switches, you can store vast amounts of information compactly and efficiently. Because memristors remember their state, they can store data indefinitely, using energy only when you toggle or read the state of a switch, unlike the capacitors in conventional DRAM, which will lose their stored charge if the power to the chip is turned off. Furthermore, the wires and switches can be made very small: we should eventually get down to a width of around 4 nm, and then multiple crossbars could be stacked on top of each other to create a ridiculously high density of stored bits.
Greg Snider and I published a paper last year showing that memristors could vastly improve one type of processing circuit, called a field-programmable gate array, or FPGA. By replacing several specific transistors with a crossbar of memristors, we showed that the circuit could be shrunk by nearly a factor of 10 in area and improved in terms of its speed relative to power-consumption performance. Right now, we are testing a prototype of this circuit in our lab.
And memristors are by no means hard to fabricate. The titanium dioxide structure can be made in any semiconductor fab currently in existence. (In fact, our hybrid circuit was built in an HP fab used for making inkjet cartridges.) The primary limitation to manufacturing hybrid chips with memristors is that today only a small number of people on Earth have any idea of how to design circuits containing memristors. I must emphasize here that memristors will never eliminate the need for transistors: passive devices and circuits require active devices like transistors to supply energy.
The potential of the memristor goes far beyond juicing a few FPGAs. I have referred several times to the similarity of memristor behavior to that of synapses. Right now, Greg is designing new circuits that mimic aspects of the brain. The neurons are implemented with transistors, the axons are the nanowires in the crossbar, and the synapses are the memristors at the cross points. A circuit like this could perform real-time data analysis for multiple sensors. Think about it: an intelligent physical infrastructure that could provide structural assessment monitoring for bridges. How much money—and how many lives—could be saved?
I’m convinced that eventually the memristor will change circuit design in the 21st century as radically as the transistor changed it in the 20th. Don’t forget that the transistor was lounging around as a mainly academic curiosity for a decade until 1956, when a killer app—the hearing aid—brought it into the marketplace. My guess is that the real killer app for memristors will be invented by a curious student who is now just deciding what EE courses to take next year.
About the Author
R. STANLEY WILLIAMS, a senior fellow at Hewlett-Packard Labs, wrote this month’s cover story, ”How We Found the Missing Memristor.” Earlier this year, he and his colleagues shook up the electrical engineering community by introducing a fourth fundamental circuit design element. The existence of this element, the memristor, was first predicted in 1971 by IEEE Fellow Leon Chua, of the University of California, Berkeley, but it took Williams 12 years to build an actual device.
|
BBC Home
Explore the BBC
BBC News
Launch consoleBBC NEWS CHANNEL
Last Updated: Thursday, 7 December 2006, 16:20 GMT
Tornado was like 'Wizard of Oz'
Destruction caused by tornado
Thunder, lightning, skies turning black, being bombarded by bricks and feeling like they were in The Wizard of Oz... many people in north-west London were shocked to find themselves in the middle of a tornado on Thursday.
Kensal Rise residents spoke of being plunged into darkness, some saying it was so thick they thought they were trapped in smoke from a fire.
But seconds later the gloom lifted to reveal the damage.
"It literally was The Wizard of Oz," a shopkeeper, identified only as Amanda, said.
"It was just sucking things up into it. There were tiles. There were bits of wood. There were trees."
Actress Maya Sendall said she was "absolutely shrieking with shock" on the telephone as the tornado neared her home.
She said: "It sounded like a train going by and it shook the house. I saw the rubbish bin going by and my hedge flattened by about two or three feet.
There was a thunder clap and it felt like the house was falling in on us
Julia Haughton
"I did not hear too much of the crashing because the sound of the wind was astonishing."
Student Julia Haughton, 22, who heard the tornado, said: "There was a thunder clap and it felt like the house was falling in on us. It was really frightening.
"My boyfriend ran out just as it had gone through our back yard and pulled down our neighbour's tree. When I came out, all the windows were smashed."
'Jetliner' sound
Local resident Daniel Bidgood told BBC London 94.9FM: "I was in my living room and I heard a big crack of lightning and thunder, then as I went to the window I heard a sound which was like standing behind a jetliner.
"I could see a huge cloud rolling up the street, making this tremendous sound. I went to try to take a picture of it but a shower of debris smashed all the windows of my house."
Colin Brewer, who lives in nearby Trevelyan Gardens, saw "a swirl starting to form" and then "clumps of all sorts of things flying into the air".
He had seen trees collapse in the road and people being hit on the head by flying objects.
Kevin O'Leary, 56, said: "When the hailstones started coming down I told everyone to get in the lorry and as we were doing that I heard this great roar and almighty bang.
I work on climate change at Greenpeace and here was evidence of it on my own doorstep
Frank Hewetson
"Then we got bombarded with bricks and God knows what."
Frank Hewetson, a Greenpeace logistics coordinator, was buffeted by the tornado as he cycled near his home in Chevening Road.
He said: "The sky was dark, I heard a noise like a jet engine and then I saw this column of debris.
"There was debris flying around and I was lucky not to be hit.
"I work on climate change at Greenpeace and here was evidence of it on my own doorstep.
"Some people will say it's not climate change, but I don't think we've had too many twisters in Kensal Rise."
The BBC is not responsible for the content of external internet sites
Has China's housing bubble burst?
How the world's oldest clove tree defied an empire
Why Royal Ballet principal Sergei Polunin quit
Americas Africa Europe Middle East South Asia Asia Pacific
|
Sunday, January 10, 2016
A clumsy language
When you see the waking and dreaming states as part, but not necessarily all, of the same reality, and yourself as a cloud drifting through that reality, changing density, shape, and color as you go, and reality doing the same, and lovingly embrace the possibility that you and reality do not exist at all, or that you and she do exist, not as you have learned and are in the habit of thinking, but as stars in a galaxy still pondering its desire to be born, and borne, you have come to what might be called the first day, which, in a clumsy language all its own, is best expressed in the words, “I love you,” and then, together, move on, you finally know the meaning of the rings, around the pebble, in the pond.
No comments:
|
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
/**
Type of match between two items
*/
typedef NS_ENUM(NSInteger, MUKArrayDeltaMatchType) {
/**
No match: items are different
*/
MUKArrayDeltaMatchTypeNone,
/**
Partial match: items are not equal because they change from source array
to destination array
*/
MUKArrayDeltaMatchTypeChange,
/**
Complete match
*/
MUKArrayDeltaMatchTypeEqual
};
/**
A match between two items in source array and destination array
*/
@interface MUKArrayDeltaMatch : NSObject
/**
Index of matched item in source array
*/
@property (nonatomic, readonly) NSUInteger sourceIndex;
/**
Index of matched item in destination array
*/
@property (nonatomic, readonly) NSUInteger destinationIndex;
/**
Type of match
*/
@property (nonatomic, readonly) MUKArrayDeltaMatchType type;
/**
Designated initializer
*/
- (instancetype)initWithType:(MUKArrayDeltaMatchType)type sourceIndex:(NSUInteger)sourceIndex destinationIndex:(NSUInteger)destinationIndex NS_DESIGNATED_INITIALIZER;
/**
@returns YES when two movements are equal
*/
- (BOOL)isEqualToArrayDeltaMatch:(MUKArrayDeltaMatch *)match;
@end
/**
Comparator which takes two items and returns match type
*/
typedef MUKArrayDeltaMatchType (^MUKArrayDeltaMatchTest)(id object1, id object2);
/**
An object which tells you diffs between two arrays
*/
@interface MUKArrayDelta : NSObject
/**
Source array
*/
@property (nonatomic, copy, readonly, nullable) NSArray *sourceArray;
/**
Destination array
*/
@property (nonatomic, copy, readonly, nullable) NSArray *destinationArray;
/**
Inserted indexes. Indexes refer to destinationArray.
*/
@property (nonatomic, readonly) NSIndexSet *insertedIndexes;
/**
Deleted indexes. Indexes refer to sourceArray.
*/
@property (nonatomic, readonly) NSIndexSet *deletedIndexes;
/**
Set of MUKArrayDeltaMatch objects which represent equal matches
*/
@property (nonatomic, readonly) NSSet<MUKArrayDeltaMatch *> *equalMatches;
/**
Set of MUKArrayDeltaMatch objects which represent partial matches
*/
@property (nonatomic, readonly) NSSet<MUKArrayDeltaMatch *> *changes;
/**
Set of MUKArrayDeltaMatch objects which represent movements.
A match contained in movements set is contained inside equalMatches or
changes set, too.
*/
@property (nonatomic, readonly) NSSet<MUKArrayDeltaMatch *> *movements;
/**
Designated initializer.
@param sourceArray Source array
@param destinationArray Destination array
@param matchTest A block to compare source and destination items.
It may be nil but you lose changes detection.
@returns A fully initialized delta between sourceArray and destinationArray
*/
- (instancetype)initWithSourceArray:(nullable NSArray *)sourceArray destinationArray:(nullable NSArray *)destinationArray matchTest:(nullable MUKArrayDeltaMatchTest)matchTest NS_DESIGNATED_INITIALIZER;
/**
@returns YES when two deltas are equal
*/
- (BOOL)isEqualToArrayDelta:(MUKArrayDelta *)arrayDelta;
/**
@returns Projected source index to destination taking into account only insertions
before, deletions before and movements happened so far
*/
- (NSUInteger)intermediateDestinationIndexForMovement:(MUKArrayDeltaMatch *)movement;
@end
NS_ASSUME_NONNULL_END
|
Mercury in the Morning
The planet Mercury -- the planet closest to the Sun -- is just peeking into view in the east at dawn the next few days. It looks like a fairly bright star. It's so low in the sky, though, that you need a clear horizon to spot it, and binoculars wouldn't hurt.
Mercury is a bit of a puzzle. It has a big core that's made mainly of iron, so it's quite dense. Because Mercury is so small, the core long ago should've cooled enough to form a solid ball. Yet the planet generates a weak magnetic field, hinting that the core is still at least partially molten.
The solution to this puzzle may involve an iron "snow" deep within the core.
The iron in the core is probably mixed with sulfur, which has a lower melting temperature than iron. Recent models suggest that the sulfur may have kept the outer part of the core from solidifying -- it's still a hot, thick liquid.
As this mixture cools, though, the iron "freezes" before the sulfur does. Small bits of solid iron fall toward the center of the planet. This creates convection currents -- like a pot of boiling water. The motion is enough to create a "dynamo" effect. Like a generator, it produces electrical currents, which in turn create a magnetic field around the planet.
Observations earlier this year by the Messenger spacecraft seem to support that idea. But Messenger will provide much better readings of what's going on inside Mercury when it enters orbit around the planet in 2011.
Script by Damond Benningfield, Copyright 2008
For more skywatching tips, astronomy news, and much more, read StarDate magazine.
|
/***
* Created by Zhang Ti-kui
*
*/
#ifndef __LIZARD_PWLCM__H
#define __LIZARD_PWLCM__H
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <time.h>
#include "lizard.h"
#include "toolex.h"
#include "pwlcm.h"
/*-- States and control parameters for PWLCM1 and PWLCM2 --*/
static uint32_t P1 = 0;
static uint32_t P2 = 0;
static uint32_t X0 = 0;
static uint32_t X1 = 0;
/* -- Seed key and initial vector for LIZARD-PWLCM ---*/
static char *Kstr="f2942e83948d92f948b938a3913493";
static char *IVstr="8f918e48d93f3016";
uint8_t Kbin[122];
uint8_t IVbin[66];
uint32_t low_mask=0x0000ffff;
/***
*Initialize an interger by mostleft_bit in left and random bits generated by LIZARD for the others
*/
static inline void init_integer(uint32_t *data, uint32_t mostleft_bit, int length){
*data =mostleft_bit;
for(int i=0; i<length; i++){
*data <<=1;
*data |= (uint32_t)(step());
}
}
static inline void lizard_pwlcm_init(){
_construct(Kbin,IVbin);
init_integer(&X0, 0,NN);
init_integer(&X1, 0,NN);
init_integer(&P1,3, NN-3);
init_integer(&P2,2, NN-3);
init(X0,P1,X1,P2);
}
static inline void LizardPwlcm_seed(uint64_t IV){
hex2binArray(Kstr,Kbin);
hex2binArray(IVstr,IVbin);
lizard_pwlcm_init();
}
static inline uint32_t LizardPwlcm(){
return (pwlcm_cross(step()) << 16) | (pwlcm_cross(step()) & low_mask);
//return (uint64)pwlcm_cross(step());
}
#endif
|
//aggregate.cpp
#include <iostream>
#include "aggregate.h"
#include "iterator.h"
using namespace std;
Aggregate::Aggregate() //定义抽象聚合类型的构造函数
{
}
Aggregate::~Aggregate() //定义抽象聚合类型的析构函数
{
}
ConcreteAggregate::ConcreteAggregate() //定义具体聚合类型的构造函数
{
//向聚合类型中填充具体数据,数量为SIZE(5)个,保存在int类型的数组中
for (int i = 0; i < SIZE; i++)
m_objs[i] = i;
}
ConcreteAggregate::~ConcreteAggregate() //定义具体聚合类型的析构函数
{
}
//定义生成迭代器的函数
Iterator *ConcreteAggregate::create_iterator()
{
return new ConcreteIterator(this);
}
//定义从聚合类型中获取具体元素的函数
Object ConcreteAggregate::get_item(int idx)
{
if (idx < this->get_size())
return m_objs[idx];
return -1;
}
int ConcreteAggregate::get_size() //定义获取聚合中有效数据个数的函数
{
return SIZE;
}
|
#ifndef __RQUATERNION_H
#define __RQUATERNION_H
#include "rutils.h"
struct rquaternion {
public:
float x,y,z,w;
rquaternion(float _x,float _y,float _z,float _w) { x=_x;y=_y;z=_z;w=_w;}
rquaternion(const rquaternion &q) { x=q.x;y=q.y;z=q.z;w=q.w; }
rquaternion(rvector &axis,float angle);
rquaternion() {};
rquaternion operator * (rquaternion& p);
rvector v() { return rvector(x,y,z); }
rquaternion Conjugate();
rvector Transform(rvector& v);
};
#endif
|
If you have ever used the Windows Copy (Ctrl+C) to copy objects to the clipboard and then the Windows Paste (Ctrl+V) to copy/paste AutoCAD object(s), then you know that those clipboard object(s) will have the lower left-hand corner of their extents as the base point (not very precise)... and this always reminds me of some of the graphic editing applets (e.g.: Paint or even the wonderful AutoCAD Button Editor!) that have you draw a circle like a rectangle. (annoying to say the least!)
With AutoCAD you can use the keyboard shortcut of (Ctrl+Shft+C) to pick a base point for your clipboard object(s). COPYBASE is the actual command, and then you can paste to a precise point in the destination AutoCAD DWG file using the keyboard shortcut of (Ctrl+Shift+V). This is the PASTEBLOCK command or you can also use the PASTEORIG command if the COPYBASEd object(s) go in the same exact spot in the receiving DWG file.
Also it is important to note: If you do use the Ctrl+Shift+V PASTEBLOCK method and want to leave it as a block, AutoCAD will assign a name for the block, which is something like "A$C11A06AFD" or "A$C1F7A5022" ... Either use the RENAME command, or use EXPLODE or XPLODE, also watch your layers, with regards to the object(s) original layers and where this new "block" is being INSERTed... or where they go if they are EXPLODEd vs. XPLODEd. (I will save that for a whole different post).
|
#include "Database.h"
#include <iostream>
Database::Database(char const *filename)
{
database = NULL;
open(filename);
}
Database::~Database()
{
}
bool Database::open(char const *filename)
{
if(sqlite3_open(filename, &database) == SQLITE_OK)
return true;
return false;
}
vector<vector<string> > Database::query(char const *query)
{
sqlite3_stmt *statement;
vector<vector<string> > results;
if(sqlite3_prepare_v2(database, query, -1, &statement, 0) == SQLITE_OK)
{
int cols = sqlite3_column_count(statement);
int result = 0;
while(true)
{
result = sqlite3_step(statement);
if(result == SQLITE_ROW)
{
vector<string> values;
for(int col = 0; col < cols; col++)
{
values.push_back((char const *)sqlite3_column_text(statement, col));
}
results.push_back(values);
}
else
{
break;
}
}
sqlite3_finalize(statement);
}
string error = sqlite3_errmsg(database);
if(error != "not an error") cout << query << " " << error << endl;
return results;
}
void Database::close()
{
sqlite3_close(database);
}
|
Ragtime and blues fused ‘All That Jazz’
By Laura Szepesi
Published: Sunday, March 17, 2013, 7:09 p.m.
Updated: Monday, March 18, 2013
EDITOR'S NOTE: Thursday marks the 85th birthday of well-known Connellsville jazz trombonist Harold Betters. We salute him with this four-part series, starting today with a brief history of jazz music.
In 1979, actor Roy Scheider brought the life of Broadway dancer / director Bob Fosse to the big screen in the film “All That Jazz.”
“All” is the perfect way to describe jazz music.
Jazz was born around 1900 in New Orleans — about the same time as the earliest music recordings became available to the public. It grew out of ragtime, which many sources claim is the first true American music.
Like jazz, ragtime has Southern roots, but was also flavored by the southern Midwest. It was popular from the late 1800s to around 1920. It developed in African American communities, a mix of march music (from composers such as John Philip Sousa), black songs and dances including the cakewalk.
Ragtime: Dance on
Eventually, ragtime spread across the United States via printed sheet music, but its roots were as live dance music in the red light districts of large cities such as St. Louis and New Orleans. Ernest Hogan is considered ragtime's father. He named it ragtime because of the music's lively ragged syncopation.
Ragtime faded as jazz's following grew. However, composers enjoyed major success in ragtime's early years. Scott Joplin's 1899 “Maple Leaf Rag” was a hit, as was his “The Entertainer,” which was resurrected as a Top 5 hit when it was featured in the 1974 movie “The Sting” starring Robert Redford and Paul Newman.
Born of ragtime, jazz was also heavily influenced by the blues. Blues originated in the late 1800s, but in the deep South. It is an amalgam of Negro spirituals, work songs, shouts, chants and narrative lyrics.
Fused with blues
Like jazz, the blues comes in many forms: delta, piedmont, jump and Chicago blues. Its popularity grew after World War II when electric guitars — rather than acoustic guitars — became popular. By the early 1970s, blues had formed another hybrid: blues rock.
While ragtime is jangly and spirited, the blues takes after its name: blue, or melancholy. Its name is traced to 1912 when Hart Ward copyrighted the first blues song, “Dallas Blues.”
Jazz — as a mix of ragtime and blues — has fused into many styles since its emergence.
In the 1910s, New Orleans jazz was the first to take off. In the 1930s and 1940s, Big Band swing, Kansas City jazz and bebop prevailed. Other forms include cool jazz and jazz rock; today, there's even cyber jazz.
Jazz: Always changing
The late jazz trombone player J.J. Johnson summed jazz up as restless. “It won't stay put ... and never will,” he was quoted as saying, according to various sources.
Johnson's sentiment is heartily endorsed by Connellsville jazz trombonist Harold Betters. Betters turns 85 years old this week. He will share decades of his memories about music and growing up in Connellsville as his March 21 birthday approaches.
Laura Szepesi is a freelance writer.
Tuesday: Just how did Harold Betters decide to play the trombone?
- Uniontown police investigate shooting injury
- Upper Tyrone family helps pet overcome paralysis
- Several Fayette boroughs have contested races
- Recap of the death of Connellsville police officer McCray Robb in 1882
- Connellsville police officer recognized 131 years after death
- Fayette County man accused of receiving stolen property, multiple drug offenses
- Connellsville set to debut model-railroad train in 2014
- Connellsville airport will remain open
- Connellsville mayoral candidate Joshua DeWitt held for trial in chop shop case
- South Connellsville man charged in pedestrian accident
- Connellsville council to make appointments, reappointments
You must be signed in to add comments
To comment, click the Sign in or sign up at the very top of this page.
Subscribe today! Click here for our subscription offers.
|
Stampa Pagina
C) Gravity and the other “forces”. (Second part).
C) Gravity and the other “forces”. (Second Part).
Article from:
Tempo nuovo, Naples 1973, nn.5-6: The unigravitational field
by Renato Palmieri
In the modern physics the methods for the measure of mass are the most improper one could imagine on the theoretical plan. The apparent precision of the determinations derives only by the usage of a certain conventional meter which usually leaves almost unchanged the relations with a mass assumed as sample: but providing that the compared masses are in strictly analogue conditions respect to the various fundamental factors, as density, magnetism, electricity, distance by other masses, etc. Varying this factors, is necessary to keep it into account modifying the formulas with various empirical arrangements, with which approximative results are obtained even though – normally – sufficiently exact to the practical scope. For the modern uses, in facts, not only are indispensable the absolute values of the elements in play, generally basing the conservation of the values of the relation inside determined limits of approximation (15).
Of all the proceedings commonly used to measure the masses, the only one which has theoretical validity is right what modern physics has denied into theory, to solve the irremediable contradictions of the relativistic formalism, and which is bound to the ancient concept of the quantity of matter: the mass is at the origin nothing else than the numerical ensemble of the elementary particles (photons) constituting a body, so that its effective value results by the product of corpuscular density for the volume. The conventional formulas which measure the masses by their effects (forces, accelerations, speed, energy) all of them contain all the basic errors which limit the validity inside particular fields, preventing a correct and universal vision of the phenomena.
So the most simple and famous of the formulas of Dynamic ma (second Newton’s principle) is empirical and approximative, not only because it consider the mass subjected m as inert and not contributing to Fbut most of all because it doesn’t keep in account the fact that the field of the mass which is source of F (let’s call it mo ), applied to m , has a total value which varies with the spatial extension of m . To clarify the matter, of fundamental theoretical importance, we resort to an exemplificative scale. Let’s suppose that the field of mo has the value 32 as measure of the punctual intensity applied to the closer volumetric unitary part of m: for the progressively farther parts such intensity progressive decrease according to a scale determined by the distance, in relations to the peculiar characters of the field (in conventional terms: gravitational, electric, magnetic, strong nuclear, weak nuclear, etc.). Let’s express in numbers one of the possible scales, for a value of m (in uniform density) variable by 1 to 32. If this unitary parts extents much into space respect to the field of mo , so that the punctual of this decrease very fast through m , where there is the sequent scale of partial values, total and medium of the field applied for the successive increments of m(remaining unvaried the distance between mo and the surface of m ) (table and fig. 1):
Unitary parts…………. Partial field….………….Total field……………Medium field
…….of m………………………of mo……………………….of mo………………………of mo
………1………………………….32………………………….32………………………….32 / 1 = 32
………2………………………….28………………………….60…………………………60 / 2 = 30
………3………………………….21…………………………..81……….………………..81 / 3 = 27
………4………………………..…11………………………….92………………………..92 / 4 = 23
………5-8…………………………4………………………….96……………………….96 / 8 = 12
……..9-16…………………………3………………………….99………………………99 / 16 = 6,1875
…….17-32………………………..1………………………….100……………………100 / 32 = 3,125
The scale of the medium field of mo , which is obtained by dividing the total field for the number of parts of m to which it results applied, coincides with the one of the acceleration suffered by m for effect of mo The acceleration is proportional to the medium applied field: being F the total field, it derived from a = F/m .
While is concretely defined the meaning of the second principle of dynamic, although inside the limits of its unidirectionality, it becomes moreover evident the interpretation of convenience which is given by the official physics, which reads it in opposite way depending on who applies it to the cosmic gravitation (F proportional to the masses, a constant) or to the other “forces” considered non gravitational (F non proportional, a inversely proportional to the masses). It is easy to observe by the table that up to a certain limit, signed by the horizontal line, the total field of mo increase almost proportionally to the mass m; therefore the medium field and the suffered acceleration remain almost univariate (diminishing only a little bit). Which is what applies, as we have seen, in the sidereal gravitation of bodies of small mass respect to bodies of high mass (Galileo’s experiment, “Newton’s tube”, meteorites). Over such limit, instead, it is the total field the one remaining almost unchanged, increasing by little with the increasing of the mass: the medium field and the suffered acceleration result, as a consequence, almost inversely proportional to the mass. We have seen that it is found in the phenomena in which the interacting masses are not too much in disequilibrium between them, that is in those of a type commonly defined non gravitational.
The analysis of the table reveals facts of extreme interest, which establish a perfect unity between the apparently disparate macro- and microcosmic interactions, confirming the discourse done so far. The band which stays over the horizontal line can be defined of “almost proportionality of F with m“. In it m results much restricted spatially in the field of mo and its weight (total field of mo applied to m , apart from the inverse one) it’s almost proportional to the same m : 32, 60, 81 are between them almost as 1, 2, 3; the acceleration (32, 30, 27) remains almost unchanged with the increasing of m (Galileo’s falling bodies, “Newton’s tube”, meteorites). The prevailing events in this strip are of “collision” of m towards mo , because of the high accelerations impressed by mo , superior to the accelerations addressed to the external fields.
The band which extends below the horizontal line presents instead the “almost non proportionality of F with m” character. In it m is sufficiently extended in the field of mo , because the total value of this changes little(92, 96, 99, 100) at the increasing of m (4, 8, 16, 32); the acceleration (23; 12; 6,1875; 3,125) results therefore almost inversely proportional to m. The prevailing events are of “escape” of m by mo , because the increasingly lower accelerations impressed by mo in direction of its own field become inferior to the ones that m undergoes in opposite direction by the external fields. Therefore phenomenally such events appear “repulsive”: mo seems “repelling” m . So it is explained the repulsiveness manifested by the particles to which an equal sign is attributed (electrons between them, protons withing them, etc.), but behave in this manner for the only fact of having almost identical masses: the same phenomenon – it has already been said – manifests between the celestial bodies, because of the equilibrated attraction of analogue masses in all directions.
The horizontal line in the end, represents the limit value of m , non excessively inferior to mo , for which the total field of of mo appears “almost proportional” to m compared with minor masses, but “almost non proportional” to m in relation to higher masses. The prevailing events are now of “orbiting” of m respect to mo, for the reached equilibrium between the accelerations directed towards mo and those directed towards the external fields: so behave planets, satellites, asteroids respect to greater celestial bodies and electrons respect to protons in the microcosm, etc. I repeat in every case that here we speak about prevalence of events of a determined type, because particular conditions of speed and direction in the gravitational motions can produce different results respect to the general survey.
In that limit line is located also the explanation of the so called “barrier of potential”, which the scholastic physics has invented to justify the watershed between “attractiveness” and “repulsiveness” of the nuclear forces (16). Over certain reciprocal distances the interactions between particles with not dissimilar masses generally manifest with escape events: only in rare cases of particular directions and translatory speeds, which carry such particles at very short distance one to the other, they end up with undergoing reciprocally an intensity of field prevailing over the one of the external fields and therefore collide (here is the “potential well”!) or enter in mutual orbiting, instead of escaping each other. The passages of the “barrier of potential” in a verse or in the other, although rare, don’t have to surprise more than con-similar events, which can verify on macro cosmic scale, surprise us (escapes for values of m in the band of proportionality or collisions and orbits for the values of m in the band of non proportionality).
Therefore there’s no reason, to explain the reciprocal orbit of two protons in the helium nucleus (particle α, or helium), to resort to an attractive “nuclear force”, acting in the “potential well” and different by the one which, over the “potential barrier”, is called “electrostatic” and would induce the protons themselves to repel each other, bonding vice versa the electron to the proton: a scary mess, equivalent to thinking that in a system of binary stars acts a different force than the gravitational one which ties planets and satellites to a greater celestial body. The most difficult equilibrium of such binary systems, and often also multiple (as in the nucleus of the complex atoms), normally becomes stabilized by external orbits of bodies or corpuscles, singularly less attractive but prevailing in number, as neutrons, mesons, electrons, etc., or – in the macrocosm – satellite celestial bodies, planetary systems, asteroids, cosmic dust. Missing this “cloud” of surrounding fields, the equilibrium is decisively instable, as the one of two electrons in the so called “positronium”, absurdly considered as the couple of a negative electron and of a positive one!
Concluding the analysis of the table, observing that, if the 32 unitary parts of m are instead condensed in a much restricted space of the field of mo , such that the partial field of mo variates by little for each part of m (for ex., between the values 32 and 28), will be found a total field applied very high (medium field 30 for 32 parts of m = total field 960), almost proportional to the mass of m : the acceleration impressed is equal to 30, that is almost equal to the one of an isolated part of m . So it becomes always more precise the sense of all the current survey, that is summed in the passive influence of density, that is of density of the subjected mass, which contributes with its own spatial extension to the effective value of the acting field. The gravitational cohesive interaction between the various parts of m , with the reciprocally attractive accelerations which coagulate it around the common mass center, reduces the value of the overall acceleration directed towards mo to a medium value, corresponding to the medium applied field of mo .
If to the passive influence of the density we add the active, that is the effect of density of mo , being it also intensifier of the field value (17), and let’s consider that this dual effects manifests in the two senses, that is taking as acting field both mo respect to m being m respect to mo , we understand perfectly the fact that the Newtonian formula of gravitation, free of every reference to density, gives insignificant values for the interactions in the nuclear microcosm, where density is very high (18).
In reality, the gravitational interactivity of a certain mass measured in absolute (mass as quantity of matter: number of elementary particles constituting a body) varies enormously, in the effects of “force” felt by other masses, depending on the conditions of aggregation of the mass itself and also of the subjected ones: which has induced to erroneously believe that the electric, magnetic, nuclear “forces” are something completely different by the macro cosmic gravitation and that, next and over the mass and its prerogative of gravitational source, exist in nature electric or magnetic “charges”, “ exchange forces”, interactions of various type and name, antimatter, etc. Of such interactivity the fundamental factors are, in addition to the mass and to the distance – already present in Newton’s formula -, density of matter and its orientation wave (“magnetism”), which instead that formula ignores completely. But also the presence of mass and distance in the Newtonian law is distorted by erroneous reasonings or limited by the misunderstanding of very important phenomena. For what concerns mass, we are just dismantling the presumption of the proportionality of gravity to the masses and the current assumptions of the measures of mass; concerning the distance, I somewhere else dealt with the determining influence of the so called “red-shift” (motion towards red of the spectral lines) over the values of gravitational intensity (19). In fact the increasing of the wave lengths in relation to the distance is caused by a periodic concentration of the waves of the field: the period becomes just determined by the distance for a mechanism relative to the structure of the gravitational propagation, in which it is the explanation of the pulsation phenomena (“pulsar”) and of periodic variability of the stars and the reason of the enormous irradiation, otherwise unexplainable, of very far galaxies (“quasar”).
Regarding the two absent factors in Newton’s formula, it has been told of density, in its dual active and passive effect, and over it I will return afterwards. The other represented by the “magnetism”: also over this topic are necessary recalls to my precedent studies (20), of which I summarize here the more general conclusions, adding however some significant implications.
Magnetism essentially consists in the different ways and degrees of undulatory “polarization” of matter and in the consequent process of co-orientation of the fields. The disposal according to which matter tends to aggregate along the reciprocal lines of the multiple gravitational propagation defines the degrees of the natural magnetic scale (21). The progressive gravitational thickening, sieving the optimal positions in relation to the structure of the field, carries matter to gradually coordinate the axes of the single propagations, gradually organizing them around a principal axis of polarization, which is the one of a complex field (“dominion”) resulting by the composition of many particular fields. In turn different dominions tend to correlate the respective axes in various arrangements, the least possible by two optimal – one polar in the same sense and one equatorial anti parallel -, outside of them occur conditions of disequilibrium with cyclical exasperations (magnetic storms, solar protuberances, earthquakes and eruptions, hot interglacial ages, etc.). So the controversial polar disposition reason of the subatomic events of gravitational escape, for which the homologue poles of two magnets bounce one respect to the other (phenomenally they “repel” each other). Still, the equatorial parallel disposition, as the one of most of the planets respect to the central celestial body (due to the common belonging to only one original mass rotating in the same sense), provokes in the reciprocal motions of rotation a continuous slowdown, which in the end brings the orbiting bodies to constantly apply the same face and after to reverse the slower of the two respective rotations, passing by the equatorial anti parallelism (22). This in fact characterizes the condition of greater gravitational harmony, as in the biological organisms to mirror symmetry (bivalve mollusks, cerebral hemisphere, etc.) (23). The equatorial parallelism, instead, is responsible of the phenomena of counter current (parasite currents, or of Foucault), to which refer the “Lenz’s law” and the so called “self induction”: new confirm of the universal character of the unigravitational physics.
The predominance of an axis of propagation determines therefore an accentuated dipolarity of the masses (magnet; polarity of rotation and magnetic of the celestial bodies – respect to two principal axes: mega- and meso magnetic -; spin of the particles; equatoriality of the galactic and planetary systems; equatorial rings and bands – Saturn, Van Allen, galactic bands -; “polarizations” of light; zodiacal light; etc.). And because the lines of the gravitational propagation thicken along the axis (24), this is the place of the higher gravitational speeds both in the centripetal motion (collision motions), and in the centrifugal as outcome of a trajectory of missing collision (appearing “repulsion”): is what is found in the interaction between the poles of two magnets (25).
The process of magnetic orientation of the matter coincides with the progressive reduction of the atomic-molecular speeds. The polar zones, where the matter precipitates faster than in the equatorial bands (here the gravitational lines thinned and present also a minor punctual intensity), they also reach faster, respect to the equatorial zones, an ordered magnetic attitude, in which the atomic speeds are overall inferior. Therefore they, apart from the concomitant factors (as, in the case of Earth, the inclination of the axis of rotation on the plane of the orbit), are “colder” areas. It has been verified, for example, in the shells surrounding the solar poles, without any explanation by the official physics (26).
What has been said regarding the gravitational lines at the equator, explains the equatorial expansion of the bodies of the celestial systems. Decreasing the centripetal attraction by the poles towards the equator, increases in relation to the one of the external gravitational fields: as a consequence, the equilibrium between this and the central body establishes at radial distances progressively hinger; from here the expansion. It’s therefore the relation between the mutual gravitational intensity of the fields at determining the radius of the positions of equilibrium and the relative speeds of rotation, and are not these speeds to give birth out of nowhere an imaginary “centrifugal force” (27).
It is curious the fact that a gravitational field could be commonly distinct by a magnetic field because, among the other things, the first would be unipolar! (28) Indeed, a mass clearly not magnetic is such only because it presents a high multiplicity of axes, all insensitive, whose dispersion is in fact cause of the scarce interactivity of the mass. The magnetic orientation of matter therefore do make to the gravitation the second jump of intensity, after the one produced by density, by the weak proportional values of the macro cosmic masses to the very high of the so called electric “charges” and of the “nuclear energy”.
If we would restrict to the center of the Earth all the terrestrial mass in a very little sphere, dense as the nuclear matter, the terrestrial gravitational field would become enormously more intense equally at every other condition: at the same distance of the current radius of the Earth the apple would have fall on Newton’s head with a much higher weigh respect to the one which he was familiar with and his formula and his wrong calculations would pretend independent by the density of the masses (cfr. n. 17). In front of such hypothetical eventuality, Newton would have invented, for this phenomenon non accordant to his expectation, a new force of intensity duly multiplied respect to the known gravitation. He would then be forced to make a further enormous multiplication and the relative invention of “forces”, if the little sphere containing the entire mass of the Earth would coordinate in a optimal way the axes of all the particular propagations in a compact “dominion” at very high dipolarity, extraordinarily intensifying its own magnetic field, and so enhanced would interact with analogue little spheres.
At the origin of pseudo concepts as electric “charges”, “nuclear forces” and similar, over the two relative errors at density and magnetism, there is then the false reading of the “repulsive” phenomena, which seem such – and therefore strangers to the gravitational interaction, always attractive – only because seen by a mental deforming perspective: what appears “rejected” by something, is is reality, as we have seen, “attracted” by something else in different direction.
The validity of this observation extends to the scope of psychic phenomena, whose modality perfectly fall under the unigravitational analysis of the universe: that is, “hate” isn’t really “repulsion” for someone or somebody, but is prevalence, in the unconscious, of “love” for oneself (for his own organism, for his own physio-psychic sphere: self-defense, instinct of conservation) or for external objects different by the one from which we feel “rejected” (29). It is not therefore anything different by the peripheral “barrier” which prevents or hinder the inter penetration between two bodies, causing the “bounce” of the bumped body towards internal or external directions divergent by the one which goes towards the colliding. This means, on the philosophical level, that hate does not have a absolute value, always reducing to an excess of love: the latter is the only absolute function in the psychic area, as gravitational attraction in the physical area. Let’s add, as corollary, that self-preservation of the gravitational systems, which manifests as resistance to the violent inter penetration with other systems of analogue mass (remember the analysis done of the behavior of electrons, starts, etc.), is a necessary moment to allow the gravitational undulatory “composition” with other bodies and build with them wider and more complex harmonic structures. Which means that self-preservation is not the purpose of living beings, but is the way to love: it is so scientifically reversed the relation placed by Hobbes between man and humanity, the first as “wolf” for every other man, the second as society regulated by the “equilibrium of selfishness”: a relation which seemed validated by a distorted interpretation of the biological evolution. In its place Christ’s precept becomes recognized as a certain law of nature: “Thou shalt love thy neighbor as thyself “.
To a corpuscle which presents the character of scarce interactivity with the external particles is commonly attributed the qualification of “neutral”. This is due to a condition of magnetic disorder (expanded and “hot” corpuscles: for ex., the neutron, compared with compact, highly magnetic and “cold” proton), or to a different collocation of the “barrier of potential” of the particles constituting the corpuscle (as in the proton-electron system: there is normally equilibrium between attractive effects of a particle and “repulsive” effects of the other), or at a regular magnetic anti parallelism of equal composing particles (nucleus and saturated layers, with protons in anti parallel couples, as in the “inert” gas: there is equilibrium between attractive effects of a pole and “repulsive” effects of the other) (30). As for the value presumed unitary of the “charge” both positive and negative, it is preordinate by the caliber of our instruments, whose sensibility is at the limit of a certain gravitational intensity: this appear identical, because perceived at different distances from the field’s center of the proton of the electron which acts as a screen. In other terms, the instrumental perception arrives up to the “potential barrier” of the proton and to the one of the electron, which has the same intensity, because the first is much more distant by the proton than the second is of the electron (31).
Let’s now return to the problem of the measure of mass, which become executed in base of the gravitational effects of the masses themselves. Let’s now observe in this regard that the factors of density (active effect) and of the magnetism has in the gravitational interaction a much restricted radius of prevailing influence, over which remains almost exclusively sensible the nude factor of the amount of matter (still remaining measured in the effects instead that in absolute): right for this Newton’s formula can prescind without too much harm, in the measure of the macro cosmic gravitation, by those two factors, whose gravitational character is however clearly underlined by the similarity of the formulas of the electric and magnetic interaction with the Newtonian law.
Therefore depends by the method and by the instrument of measure employed, if the values of mass result by the calculation almost naked or altered by the coefficients of the very short distances (electric and magnetic “charges”, “nuclear forces”, etc.), which must be deducted to reach the pure and simple effect of the “quantity of matter”.
So, if I must measure the mass of a iron bar relatively to the sample mass of a second iron bar, I can use as a instrument of measure the same sample bar, trowing it with a known force against the other and measuring the acceleration impressed to this. In such case, however, the resulting will differ widely depending on the fact that the two bars are both magnetic (and that homologous or opposite poles are facing), or only one, or none. This because the method and the instrument of measure are sensible, in the interaction at a very short distance, at the gravitational magnetic effect.
For the same calculation I will also be able to use the Earth as instrument of measure, putting on a scales the two bar. The magnetic characters of the bars in front of the gravitational terrestrial field will then become almost irrelevant, enormous as a value of mass, but relatively weak for the value of dipolarity (magnetism): the scales will give me in any case the relation almost exact between the two masses. Naturally, using this method, I will take note of the practice proportionality of the force to the masses; using the other, of the almost non proportionality of the force applied to the masses.
Moving to the world of particles, we will meet completely analogue situations. Making the particles interact between them, we will mainly take over the gravitational effects perceptible at the shortest distances – density and magnetism – and we will pull out “charges”, mythological signs of “plus”, “minus” and “anti-”, nuclear forces of binding and exchange, and so on. Made the tare of all this ingredients, we will calculate the masses. It is the method of the two iron bar. Or we will use the electromagnetic fields, which are the equivalent – made the proportion with the particles – of the terrestrial gravitational field used in the method of the scales. And here is the “spectrograph of mass”, which will give values of mass closer to the naked ones of the quantity of matter, being sufficient for calculating them having at the beginning in the particles an equal condition of “charge”.
But in the reading of the results a coarse gap takes over respect to the measure of macro cosmic masses: presuming that the forces in play in the electric and magnetic fields are not of gravitational type, they become absolutely considered non proportional to the particles masses, so as the electromagnetic forces manifest if applied to macro cosmic masses. The analysis done over our table instead showed us that, in a gravitational field, passing by a certain order of magnitude and density of the subjected masses to an order of minor magnitudes and higher density, the gradual passage by the almost non proportionality to the almost proportionality of the field applied to the masses occurs. Errors will therefore inevitably intervene which, on the basis of the comparison of accelerations evaluated with the rigid criterion of proportionality reverse to the masses, will make assign to the particles not true values of mass.
Also without variations of density, results, for example, by the table which, if an sample object of mass 8 undergoes by the field an acceleration pair to 12, another body, to which the same gravitational field impress an acceleration = 24, would be evaluated of mass 4, while in actually it would have m < 4. Vice versa, if the referring object has mass 4 and acceleration 23, an accelerated body of 11,5 and evaluated therefore of mass 8 would have actually > 8. But the most paradoxical phenomenon is found in relation to variations of density of the subjected mass, most of all when such variations involve the passage by one to the other band of the table. From this is noticed that a total field = 100 causes on a mass = 32 an acceleration = 3,125. If now we condense all the mass in the space of the first two unitary parts (fig. 1), for which the medium applied field is 30, the total field of mo applied to m increases to 960, without mo minimally changing, and the acceleration impressed increases to 30. The formula F = m a forces us to instead suppose an invariant of F and in relation to it an acceleration always pair to 3,125. The calculated mass for an acceleration = 30 will therefore be 32 * 3,125 / 30 = 3,33, against an actual value about 10 times higher, being changed only density of the original mass = 32. The conclusion is astonishing: masses of the particles, evaluated in base of the accelerations impressed to them by the electromagnetic fields of intensity referred to macro cosmic effect, are inferior to the true, because such fields, almost non proportional respect to the macro cosmic masses, has instead an effect of almost proportionality over the very dense masses of the subatomic particles, which therefore become accelerated much more than the foreseen. The masses calculated generally retain an appearance of validity, being approximatively respected the values of relation, as it has been previously noticed; but over certain speeds, arriving particles at a deeper interaction in the reciprocal gravitational fields, the produced effects necessarily end up with overstepping by a plausible approximation: it is another of the reasons which force to the conjecture of a fiction “relativistic increase of mass” and lately to notice an increasing in the so called “cross section” of the ultrafast protons (32), without finding any logical explanation of the phenomena at issue. Here takes birth the enormous confusion which, as it is known, rage in the physics of particles, paralyzed by the lacking of a serious general theory of the macro- and microcosmic interactions. We have by now reached the “anti omega minus”: so an immediate block of the discoveries becomes necessary!
Let’s return at last to the sidereal gravitation and to Kepler’s and Newton’s laws. We rode all the way of the mistake which has made attribute universal value to formulas clumsily approximative. The orbital motion of the planets around the Sun, for the existing relation between the planetary masses and the Sun, (and so the one of satellites around planets) collocates almost along the horizontal line of our table, that is over values of field for which the total field of the Sun is still approximatively proportional to the masses of the single planets and therefore its variations seem depending only by the rays of the orbits. This way Kepler could believe exact his third law R/ T2 = constant, hypothesizing the universal value. At this point, Newton had nothing more to do than introducing it in the formulas of his second principle and of the circular motion, to fatally arrive at the so called law of universal gravitation:
F = G (m1 m2) / R2
This formula, – as it is obvious – of the same empirical value and approximative of its Kepler matrix, dressed surreptitiously of the same halo of universality, putting for three centuries out of the road the modern scientific thought. Without telling that Newton’s formula, by bringing into question (with a progress respect to the 2° principle F = m a) the second of two interacting masses, completely ignores the clearly determining action of all the surrounding masses, which go hiding in the role of that Cinderella by unknown parents, which is the “centrifugal force”! (cfr. n. 27) (33).
And yet of Kepler’s law was very easy to make the arithmetic counter proof, which would have right away demonstrated the absolute theoretical nullity, and also practical over a limited scope of relations. I reserve that litmus test as a conclusion to this work, having now to occupy of the so called “universal constant of gravitation” G (for constants and universal we mean, in the modern physics, some particular variables!), measured by Cavendish with the well known experiment. It was absolutely needed to calculate the planetary masses on the base of the Newtonian formula and could be obtained only empirically by a tiny model of the sidereal interaction. So Cavendish conceived his gravitational torsion balancewhich notes the force exercising in laboratory between two masses of known value.
But since the mass of the celestial bodies, starting with the one of the Earth, can be measured only in relation to the constant one, it follows that, if the experiment was theoretically wrong, today we would ignore the actual measures of the planetary masses. Well, this is precisely the fact: Cavendish’s experiment is affected by two fundamental mistakes, hard to detect on a small scale, but which carry us to measure, as it has been said, in place of a “universal constant” a modest variable: which is already been demonstrated by the fact that, among the fundamental constants, the gravitational one has been calculated with minor precision, not exceeding the approximation – scientifically ridiculous – of 1 / 500.
The first error consists into completely neglecting density of the interacting masses, which in the Newtonian formula is considered irrelevant. In Cavendish’s experience masses all have same density and disregarding the specific gravity of the constituent: according to the formula, the result should not be influenced by it. And instead, compatibly with the possible degree of precision of the instruments, one should find that the force exercised reciprocally is higher between masses of more dense material, because of the active and passive effect of density. The second error is into believing that such force, measured between masses of laboratory, is proportionally equal to the one which acts between the Sun and the planets. We have instead seen that planetary gravitation is characterized by a very strong imbalance between the masses in platy and that it is precisely this imbalance which gives to the force of the celestial body more character than almost proportionality to the mass of the minor celestial body. Not being realized in the laboratory such condition of enormous difference, the measured force has the character of almost non proportionality to the subjected mass and therefore constitute an absolutely improper model of the planetary gravitation.
After all, also in the cosmos, the proportionality of the force of gravity to the subjected masses is a fact approximatively valid only for the force of the greater celestial body respect to the minor one: Kepler’s third law and Newton’s consequent one demonstrate their inconsistency, if we try to verify in reverse, that is applying it to the force of the minor celestial body respect to the greater one. We will now numerically demonstrate what we already know by the theoretical analysis, that is that the field of the minor body quickly diminishes starting by the closer zones of the bigger body and therefore becomes in total almost non proportional to the mass of the bigger body, to which it gives an acceleration almost inversely proportional to the mass of the body itself.
We will therefore take in exam the reverse of the planets revolution around the Sun, that is precisely the revolution of the Sun respect to the Earth and the other planets. The difference from Ptolemy is in the term “respect to”, but we right away have to take the distances – and very clear – also from the actual vision, completely unfounded, of the phenomenon. After all, in the geometric reality of the spatial motions also the planets orbit, properly speaking, “respect to the Sun” and not “around the Sun”.
Let’s proceed with order. The logic and the Newtonian law itself tell us unequivocally that the effects of the gravitation between two or more bodies are reciprocal and differentiate only for the spatial and temporal dimensions of the provoked motions: in this also agrees the generic Einsteinian idea of the “curvature of space”. Now, if the gravitational solar field is such to cause the revolution of the Earth around the Sun, we must research the precise measure and modality of the mutual phenomenon which terrestrial gravitation produces over the mass of the Sun.
Let’s first see what modern cosmology think about it. At the voice “Moon” of the EST (Encyclopedia of Science and Technique, Mondadori, V edition) we read:
“The Earth and the Moon at present perform a revolution around their center of gravity or common center of mass (a point situated around 4670 km by the center of the Earth)in 277h 43m 1l,6s”.
At the voice “Celestial mechanics”:
“Both bodies [the Sun and the planet] describe, around the common center of gravity, two orbit having exactly the same shape and the dimensions of each orbit are inversely proportional to the mass of the body”.
“The only motion directly observable is the one of the planet around the Sun”.
From the above it is clear that the modern physics consider the two motions around the common center of gravity as synchronousone in opposition to the other, identical to those of two unequal balls which rotate over themselves at two extremities of a handlebar at variable length (for the ellipticity of the orbits) and in rotation around its own barycenter (fig. 2).
In first place we notice, as however it is noted by the EST, that the motion of the greater celestial body, for the narrowness of its orbit, over which is constantly in opposition at the minor body, isn’t astronomically verifiable: it is therefore, in the indicated terms, a hypothetic motion regarding the natural test. Indeed I argue that the timing of the two orbits is nonexistent – except in the limit case that the two masses are identical – and reciprocity of the motion must be otherwise.
My reasoning, indeed, follows the common one only up to a certain point. The non coincidence of the barycentric of the system with the center of mass of one of the two bodies and the variations of speed (for both local and general factors) determine the ellipticity of the orbits (fig. 2). As the Earth of T1 undertakes a motion tending to orbit around the Sun, this reciprocally moves by S1 on a route tend addressed to circumnavigate Earth. But being too weak the gravitational terrestrial force in relation to the solar mass, the Sun cannot embrace the terrestrial orbit in its own and limits itself to circumscribe the barycentric of the system. But here intervenes the substantial difference with the common reading of the phenomenon:
a). The barycenter of two orbiting bodies, respect to which it’s needed to consider the reciprocal revolution, is not the static one of a handlebars, that is such to rigidly constrain the elements of the system to a perfectly united motion: rather it is a dynamic barycenter, in the meaning which I will define hereinafter. Stillness of the barycenter intervenes only over a scale of phenomena in which the various parts of a system are concatenated in a overall warp of mutual fixity: for example, an iron object in any motion has a static barycenter determined by the reciprocal immobility of its macroscopic parts; but two atoms of the same object in continuous relative motion has between them only one dynamic barycenter.
b). While the static barycenter taken as reference by the Newtonian physics is constantly found on the jointing the two center of mass, the dynamic barycenter is constantly situated over the major axis of the orbit (apsidal line) by the part of the apoastro (aphelion, apogee, etc.): that is it refers at the moment in which two bodies, reached the maximum relative distance, return to undertake a prevailing reciprocal attraction and precipitate along the bends of the mutual gravitational fields. The distance of the barycenter from the two centers of field is in proportion inverse to the intensity of the two fields.
c). Respect to the dynamic barycenter so defined, the revolution of the Sun, far from being synchronous with the one of the Earth, is instead extremely slower and has as natural effect and at once evident proof of such slowness the rotation of the line of the apses: this is the famous “shift in perihelion”, for which explanation uselessly relativity has been bothered (fig 3).
In the measure which the Sun, weakly solicited by the terrestrial field, rotates around the barycenter of the system, makes also rotate in natural synchrony with such motion the line of the apses. The same happen, obviously, for all the planetary motions and signs the true period of the inverse revolution of the greater celestial body respect to the minor. Between two equivalent masses (as binary stars of equal mass and identical field, the two protons of helium atom, etc.) the reciprocal orbiting is synchronous and the true period of revolution is not signed by the completion of a round by each body (34), but by the entire rotation of the apsidal line (which is then the real reciprocal revolution of two bodies: rosette orbiting) (fig. 4).
Moreover we precise that the actual measure of the apsidal revolution is not to be traced to the only action so determined by the minor body over the greater, including in itself stresses also of other origin, whose entity is to be purified in our reasoning. Here therefore we refer to the additional part of the rotation of the apses, over the value of which classic mechanic manage, for better or worse, to account: For example, the shift in perihelion of Mercury is of 574″ of arc per century, of which only 42″ constitute the additional rotation (35).
So established the exact meaning of the Sun revolution in relation to the Earth, it only remains to finally apply in reverse the third Kepler’s law, as its check up and of all the speech done so far. The result is astonishing and is enclosed in a very easy calculation. Let’s move the reference point from the Sun to the Earth, considering respect to this the revolution of the Moon and the one now analyzed of the Sun itself.
Here is the table which derivate, in function of the values R and T of the lunar orbit taken as unit:
R………………. 1………………….149.500.000 / 384.000 = 389
R3……………….1….…………………….3893 = 58.863.869
T2……………….1…………………………x2 = 58.863.869
Whence x = 7672 lunar revolutions, that is about 590 years.
If then Kepler and Newton were right, that is if the terrestrial force of attraction was proportional to the masses of the Moon and the Sun, it would produce a solar revolution of 590 years. But the verification of this value is now easy: it will in fact be needed to compare it to the additional one of the apsidal terrestrial revolution, that is of a complete additional rotation of the perihelion of the Earth. Well this period has been calculated and it is of 34 millions years! (36) If we want to make the proper reserves on the calculations of the classic mechanic, because the actual apsidal rotation happens in the period of about 112.000 years (11,6″ of acre per year), this value is the minimum referable to the single solar revolution, if for absurd there wasn’t the other solicitations concurring: it, however, would result still much superior to the 590 years foreseen by the third Kepler’s law (which for its account already denies the ridiculous period of a year of the alleged synchronic revolution of the Sun, bound at handlebars with the Earth!).
There has been so the most evident mathematical confirmation of the assumption of this investigation: that is that gravity is only in the appearance proportional to the masses of the falling bodies in the direction of the force which a very big body exercises over a very small one, but it is not such not even approximately in the effect produced by the minor body over the greater, keeping along the median relations towards the “non proportionality ” – also only as a limit – of all the other forces. The period of the apsidal revolution is in fact enormously superior to the one which we would have in case of actual proportionality of the terrestrial attraction to the masses of the other celestial bodies.
Remains confirmed that the empirical result of Cavendish’s experiment and similar aimed at calculating the so called “universal gravitational constant” is not absolutely extensible as unity of measure to all the gravitational phenomena of the universe. Naturally the values attributed to the masses and to the density of the celestial bodies are all in absolute erroneous, because calculated exclusively in function of that false constant, without further verification.
But the graver consequence of Kepler’s an Newton’s mistake is represented by the apparently insurmountable barrier which it inserts between the gravitation and the other cosmic forces: a barrier which has so far frustrated the deep need of the human thought to realize in science the organic unity of all the laws in the universe.
(15) “(For Mach) measurable … is not mass in absolute, but the relation between masses, defined only in function of the reciprocal action exercised between the masses themselves” (A. Trebeschi: “Sapere” n 757, pag. 10).
16) W. R. Fuchs, cit. op., pag. 273.
(17) R. P., Introduction to the unigravitational physics, pages. 20-24; Physics of the unigravitational field, vol. 2°, pages. 48-50.
(18) “Tempo nuovo” n. 2/1973, pag. 49, note 11.
(19) R. P., The unigravitational physics, §§ 28-29; “Tempo nuovo ” n. 3/1972, pages. 53 and following.
(20) R. P., Physics of the unigravitational field, §§ 13-15, 38-48; Magnetism and earthquakes. The forecast of the earthquakes (“Tempo nuovo” nn. 1-2/1972); Magnetism and heat (“Tempo nuovo” nn .5-6/1972 e 2/1973).
(21) “Tempo nuovo” nn. 1-2/1972, pag. 43.
(22) As it is known, also the duration of the terrestrial day increases slowly because of the interaction Moon-Earth.
(23) “Tempo nuovo” nn. 1-2/1972, pages. 36-57.
(24) “Tempo nuovo” nn. 1-2/1972, pages. 34-35, figures 1-3.
(25) “Tempo nuovo” nn. 1-2/1972, pages. 35-36.
(26) “Scienza e Tecnica/73″, Mondadori, pag. 17.
(27) R. P., The unigravitational physics, pag. 64.
(28) O. M. Phillips, Geophysics, Mondadori, pag. 168.
(29) R. P., The unigravitational physics, pag. 84.
(30) The usual terms of para magnetism and diamagnetism refer exclusively to the effects of dipolarity. In the “natural magnetic scale” referred to the note 21, they indicate instead the different structural complexity (increasing from first to second) and extension of the dominions (respectively decreasing); so substances with low nuclear density are, for such scale, paramagnetic, and with high diamagnetic densities . Dipolarity depends most of all by the distribution of the protons in the most external nuclear layer and therefore the relative phenomena are highly recurring along the scale of densities. As a consequence, the value of the terms does not coincides in the current and in ours.
(31) R. P., Introduction to th unigravitational physics, pages. 19-20; Physics of the unigravitational field, § 60.
(32) “Sapere” n. 764, pages. 31 and following.
(33) At this point it is good to clear a concept extremely ambiguous in the common physics: the one of “absence of weight”, on which senseless opinions run (W. R. Fuchs, cit. op., pages. 232-234; Caianiello, De Luca e Ricciardi, cit. op., vol. 1°, pages. 116-117). It is in fact confused the absence of weight with the sensation of an absence of weight. The first verifies in the orbiting or in the point at zero speed between ascent and relapse of a body and it is due to the simultaneous action of a centripetal acceleration and of an equal centrifuge acceleration (R. P., the unigravitational physics, pag. 65). The second is felt, although weight is not null, inside a pressurized cabin at free fall and derivate s by the absence of an interaction of contact or of an impact with the walls of the cabin, whose speed is equal in absolute value and in the verse to the one of the internal bodies: in the contact and in the impact (R. P., Physics of the unigravitational field, §§ 61-69) the atomic peripheral speeds has opposite direction, determining events more or less accentuated of interpenetration or bounce and therefore the sensation of weight. This essentially comes from the disequilibrium – in verse and absolute value – between the accelerations suffered by the various parts of the body, which attracted all in direction of the center of a celestial body, are at the same time rejected by atomic-molecular interactions of escape respect to the surface of contact.
(34) “Tempo nuovo” nn. 5-6/1972, pages. 64-65.
(35) See ” Relativity ” In the Enciclopedia Italiana and in the EST. In the last one is written: “Because of the slight perturbations caused by a planet over the other, all the planetary orbits rotate very slightly, so that the position of their aphelions changes into time. In the case of Mercury, general relativity predicts an additional rotation and aphelions of about 43″ of arc per century, quantity which, in spite of its smallness, has been verified with satisfying accuracy”. But the calculation does not fit at all for the planet Mars (8,03″ against a prevision of l,35″ ). Cfr. R. P., Physics of the unigravitational field, § 26-(5).
(36) The additional motion of the perihelion is 3,8″ of arc for century: see J. A. ColemanLa relatività è facile (Relativity is easy), ed. Feltrinelli, pag. 109.
Let’s close this section with a recall to the other preceding relative to the unigravitational physics, quoted in the Bibliography of the author.
They represent – as it were – the “archeology” of the new physics, but are however still, yet in the necessary adjustments made after the magmatic originally thought, an essential complement of the current opera.
It is not moreover possible to put remedy to the real difficulty of their modern retrieval and re-reading, at centuries of distance by the first publications made.
In the economy of the present work could not find place, for example, the description of the unigravitational structure of the atom – from hydrogen to the more complex elements of the periodic system – , which is read in Physics of the unigravitational field (§§ 56-57), edited in 1969.
Actually, the punctual study of the elementary particle object of section 6, in the entire context of the laws of universal structuring which move from it, exempt by the necessity of exhaust all the formative inter medium passages, which can not fail to re present, in a in a way more or less obvious and divisible, in the inside of particles and corpuscles the general morphology of every macroscopic structure.
Between the premise to the first article of this section 4 (October 1997) and this closure (January 2005) more than seven years has passed. Such writing referred to the earthquakes happened in Italy, recent at that time, and introduced an article published in 1972 over magnetism and the earthquakes forecast. Today it should repeat identical, but with much more dramatic tones, after the tsunami of December 26th, 2004 which has devastated the coasts of a continent. Over two hundred thousand persons have died and all the wildlife was saved. A collateral effect of the foolishness of contemporary science (which would do well to study the effect of Barkhausen, natural for animals, instead of toying with the black holes fairytale, and similar).
Permalink link a questo articolo:
|
//
// Created by Michael Fong on 2016/11/12.
//
#ifndef LOSTINCOMPILATION_MATHCOMPILER_H
#define LOSTINCOMPILATION_MATHCOMPILER_H
#include "compile/Compilable.h"
/**
* MathCompiler compiles a given math equations from soure input
*
* @since 0.1
*/
class MathCompiler : public Compilable {
public:
MathCompiler();
virtual ~MathCompiler();
virtual bool compile(std::string& fname);
virtual bool compile(std::vector<std::string> content);
};
#endif //LOSTINCOMPILATION_MATHCOMPILER_H
|
#ifndef _GEOMETRY_H_
#define _GEOMETRY_H_
#include <iostream>
#include <iomanip>
#include <cmath>
#include <algorithm>
/************************************************************************/
/* CVector */
/************************************************************************/
double CNormalize(double angle);
class CVector {
public:
CVector() : _x(0), _y(0) {}
CVector(double x, double y) : _x(x), _y(y) {}
CVector(const CVector& v) : _x(v.x()), _y(v.y()) {}
bool setVector(double x, double y) {
_x = x;
_y = y;
return true;
}
double mod() const {
return std::sqrt(_x * _x + _y * _y);
}
double mod2() const {
return (_x * _x + _y * _y);
}
double dir() const {
return std::atan2(y(), x());
}
double theta(const CVector& v) {
//计算自身到v的夹角
double _theta;
_theta = std::atan2(_y, _x) - std::atan2(v.y(), v.x());
if (_theta > 3.14159265358979323846) return _theta - 2 * 3.14159265358979323846 ;
if (_theta < -3.14159265358979323846) return _theta + 2 * 3.14159265358979323846;
return _theta;
}
CVector rotate(double angle) const;
CVector unit() const {
CVector vector(_x, _y);
if (vector.mod() < 1e-8) {
std::cout << "WARNING Vector too small to have unit vector!\n";
return CVector(1, 0);
}
return CVector(vector.x() / vector.mod(),
vector.y() / vector.mod());
}
double x() const {
return _x;
}
double y() const {
return _y;
}
double value(double angle) const {
return mod() * std::cos(dir() - angle);
}
CVector operator +(const CVector& v) const {
return CVector(_x + v.x(), _y + v.y());
}
CVector operator -(const CVector& v) const {
return CVector(_x - v.x(), _y - v.y());
}
CVector operator *(double a) const {
return CVector(_x * a, _y * a);
}
double operator *(CVector b) const {
return double(_x * b.x() + _y * b.y()); //向量点乘
}
CVector operator /(double a) const {
return CVector(_x / a, _y / a);
}
CVector operator -() const {
return CVector(-1 * _x, -1 * _y);
}
friend std::ostream& operator <<(std::ostream& os, const CVector& v) {
return os << "(" << v.x() << ":" << v.y() << ")";
}
private:
double _x, _y;
};
/************************************************************************/
/* CGeoPoint */
/************************************************************************/
class CGeoPoint {
public:
CGeoPoint() : _x(0), _y(0) {}
~CGeoPoint() {}
CGeoPoint(double x, double y) : _x(x), _y(y) {}
CGeoPoint(const CGeoPoint& p) : _x(p.x()), _y(p.y()) {}
bool operator==(const CGeoPoint& rhs) {
return ((this->x() == rhs.x()) && (this->y() == rhs.y()));
}
double x() const {
return _x;
}
double y() const {
return _y;
}
void setX(double x) {
_x = x; // 2014/2/28 新增 设置x坐标 yys
}
void setY(double y) {
_y = y; // 2014/2/28 新增 设置y坐标 yys
}
bool fill(double x, double y) {
_x = x; //2018/4/14 新增 同时设置 wayne
_y = y;
return true;
}
double dist(const CGeoPoint& p) const {
return CVector(p - CGeoPoint(_x, _y)).mod();
}
double dist2(const CGeoPoint& p) const {
return CVector(p - CGeoPoint(_x, _y)).mod2();
}
CGeoPoint operator+(const CVector& v) const {
return CGeoPoint(_x + v.x(), _y + v.y());
}
CGeoPoint operator*(const double& a) const {
return CGeoPoint(_x * a, _y * a);
}
CVector operator-(const CGeoPoint& p) const {
return CVector(_x - p.x(), _y - p.y());
}
CGeoPoint midPoint(const CGeoPoint& p) const {
return CGeoPoint((_x + p.x()) / 2, (_y + p.y()) / 2);
}
friend std::ostream& operator <<(std::ostream& os, const CGeoPoint& v) {
return os << "(" << v.x() << ":" << v.y() << ")";
}
private:
double _x, _y;
};
/************************************************************************/
/* CGeoLine */
/************************************************************************/
class CGeoLine {
public:
CGeoLine() {}
CGeoLine(const CGeoPoint& p1, const CGeoPoint& p2) : _p1(p1), _p2(p2) {
calABC();
}
CGeoLine(const CGeoPoint& p, double angle) : _p1(p), _p2(p.x() + std::cos(angle), p.y() + std::sin(angle)) {
calABC();
}
void calABC() {
if(_p1.y() == _p2.y()) {
_a = 0;
_b = 1;
_c = -1.0 * _p1.y();
} else {
_a = 1;
_b = -1.0 * (_p1.x() - _p2.x()) / (_p1.y() - _p2.y());
_c = (_p1.x() * _p2.y() - _p1.y() * _p2.x()) / (_p1.y() - _p2.y());
}
}
//投影点
CGeoPoint projection(const CGeoPoint& p) const {
if (_p2.x() == _p1.x()) {
return CGeoPoint(_p1.x(), p.y());
} else {
// 如果该线段不平行于X轴也不平行于Y轴,则斜率存在且不为0。设线段的两端点为pt1和pt2,斜率为:
double k = (_p2.y() - _p1.y()) / (_p2.x() - _p1.x());
// 该直线方程为: y = k* ( x - pt1.x) + pt1.y
// 其垂线的斜率为 -1/k,垂线方程为: y = (-1/k) * (x - point.x) + point.y
// 联立两直线方程解得:
double x = (k * k * _p1.x() + k * (p.y() - _p1.y()) + p.x()) / (k * k + 1);
double y = k * (x - _p1.x()) + _p1.y();
return CGeoPoint(x, y);
}
}
CGeoPoint point1() const {
return _p1;
}
CGeoPoint point2() const {
return _p2;
}
bool operator==(const CGeoLine& rhs) {
return ((this->point1().x() == rhs.point1().x()) && (this->point1().y() == rhs.point1().y())
&& (this->point2().x() == rhs.point2().x()) && (this->point2().y() == rhs.point2().y()));
}
const double& a() const {
return _a;
}
const double& b() const {
return _b;
}
const double& c() const {
return _c;
}
private:
CGeoPoint _p1;
CGeoPoint _p2;
// 直线的解析方程 a*x+b*y+c=0 为统一表示,约定 a>= 0
double _a;
double _b;
double _c;
};
class CGeoLineLineIntersection {
public:
CGeoLineLineIntersection(const CGeoLine& line_1, const CGeoLine& line_2);
bool Intersectant() const {
return _intersectant;
}
const CGeoPoint& IntersectPoint() const {
return _point;
}
private:
bool _intersectant;
CGeoPoint _point;
};
/************************************************************************/
/* CGeoSegment / 线段 */
/************************************************************************/
class CGeoSegment: public CGeoLine {
public:
CGeoSegment() {}
CGeoSegment(const CGeoPoint& p1, const CGeoPoint& p2) : CGeoLine(p1, p2), _start(p1), _end(p2) {
_compareX = std::abs(p1.x() - p2.x()) > std::abs(p1.y() - p2.y());
_center = CGeoPoint((p1.x() + p2.x()) / 2, (p1.y() + p2.y()) / 2);
}
bool IsPointOnLineOnSegment(const CGeoPoint& p) const { // 直线上的点是否在线段上
if(_compareX) {
return p.x() > (std::min)(_start.x(), _end.x()) && p.x() < (std::max)(_start.x(), _end.x());
}
return p.y() > (std::min)(_start.y(), _end.y()) && p.y() < (std::max)(_start.y(), _end.y());
}
bool IsSegmentsIntersect(const CGeoSegment& p) const {
CGeoLineLineIntersection tmpInter(*this, p);
CGeoPoint interPoint = tmpInter.IntersectPoint();
return (IsPointOnLineOnSegment(interPoint) && p.IsPointOnLineOnSegment(interPoint));
}
CGeoPoint segmentsIntersectPoint(const CGeoSegment& p) const {
CGeoLineLineIntersection tmpInter(*this, p);
CGeoPoint interPoint = tmpInter.IntersectPoint();
if (IsPointOnLineOnSegment(interPoint) && p.IsPointOnLineOnSegment(interPoint))
return interPoint;
else return CGeoPoint(9999, 9999);
}
double dist2Point(const CGeoPoint& p) {
CGeoPoint tmpProj = projection(p);
if (IsPointOnLineOnSegment(tmpProj)) return p.dist(tmpProj);
else return std::min(_start.dist(p), _end.dist(p));
}
double dist2Segment(const CGeoSegment& s) {
if (IsSegmentsIntersect(s)) return 0;
else return std::min(dist2Point(s.point1()), dist2Point(s.point2()));
}
const CGeoPoint& start() const {
return _start;
}
const CGeoPoint& end() const {
return _end;
}
const CGeoPoint& center() {
return _center;
}
private:
CGeoPoint _start;
CGeoPoint _end;
CGeoPoint _center;
bool _compareX;
};
/************************************************************************/
/* CGeoShape */
/************************************************************************/
class CGeoShape {
public:
virtual ~CGeoShape() { }
virtual bool HasPoint( const CGeoPoint& p) const = 0;
};
/************************************************************************/
/* CGeoRectangle */
/************************************************************************/
class CGeoRectangle : public CGeoShape {
public:
CGeoRectangle() {
calPoint(0, 0, 0, 0);
}
CGeoRectangle( const CGeoPoint& leftTop, const CGeoPoint& rightDown) {
calPoint(leftTop.x(), leftTop.y(), rightDown.x(), rightDown.y());
}
CGeoRectangle( double x1, double y1, double x2, double y2) {
calPoint(x1, y1, x2, y2);
}
void calPoint(double x1, double y1, double x2, double y2) {
_point[0] = CGeoPoint(x1, y1);
_point[1] = CGeoPoint(x1, y2);
_point[2] = CGeoPoint(x2, y2);
_point[3] = CGeoPoint(x2, y1);
}
double dist2Point(const CGeoPoint& p) {
// std::cout << "GEO COMPUTE IS " <<fabs(p.x() - _point[0].x()) << " " << fabs(p.x() - _point[2].x()) << " " << fabs(_point[0].x() - _point[2].x()) << " " <<
// fabs(p.y() - _point[0].y()) << " " << fabs(p.y() - _point[2].y()) << " " << fabs(_point[0].x() - _point[2].y()) << std::endl;
if (fabs(p.x() - _point[0].x()) + fabs(p.x() - _point[2].x()) - fabs(_point[0].x() - _point[2].x()) < 1.0e-5 &&
fabs(p.y() - _point[0].y()) + fabs(p.y() - _point[2].y()) - fabs(_point[0].y() - _point[2].y()) < 1.0e-5) {
return 0; // the point is inside the rectangle
} else {
CGeoSegment s1(_point[0], _point[1]);
CGeoSegment s2(_point[1], _point[2]);
CGeoSegment s3(_point[2], _point[3]);
CGeoSegment s4(_point[3], _point[0]);
return std::min(s1.dist2Point(p), std::min(s2.dist2Point(p), std::min(s3.dist2Point(p), s4.dist2Point(p))));
}
}
virtual bool HasPoint(const CGeoPoint& p) const;
CGeoPoint _point[4];
};
/************************************************************************/
/* CGeoLineRectangleIntersection */
/************************************************************************/
class CGeoLineRectangleIntersection {
public:
CGeoLineRectangleIntersection(const CGeoLine& line, const CGeoRectangle& rect);
bool intersectant() const {
return _intersectant;
}
const CGeoPoint& point1() const {
return _point[0];
}
const CGeoPoint& point2() const {
return _point[1];
}
private:
bool _intersectant;
CGeoPoint _point[2];
};
/************************************************************************/
/* CGeoCircle */
/************************************************************************/
class CGeoCirlce : public CGeoShape {
public:
CGeoCirlce() { }
CGeoCirlce(const CGeoPoint& c, double r) : _radius(r), _center(c) { }
virtual bool HasPoint(const CGeoPoint& p) const ;
CGeoPoint Center() const {
return _center;
}
double Radius() const {
return _radius;
}
double Radius2() const {
return _radius * _radius;
}
private:
double _radius;
CGeoPoint _center;
};
/************************************************************************/
/* CGeoLineCircleIntersection */
/************************************************************************/
class CGeoLineCircleIntersection {
public:
CGeoLineCircleIntersection(const CGeoLine& line, const CGeoCirlce& circle);
bool intersectant() const {
return _intersectant;
}
const CGeoPoint& point1() const {
return _point1;
}
const CGeoPoint& point2() const {
return _point2;
}
private:
bool _intersectant;
CGeoPoint _point1;
CGeoPoint _point2;
};
/************************************************************************/
/* CGeoEllipse,此椭圆的轴与坐标轴垂直 ,方程为(x-c.x())^2/m^2+(y-c.y())^2/n^2 =1 */
/************************************************************************/
class CGeoEllipse: CGeoShape {
public:
CGeoEllipse() { }
CGeoEllipse(CGeoPoint c, double m, double n) : _xaxis(m), _yaxis(n), _center(c) { }
CGeoPoint Center() const {
return _center;
}
virtual bool HasPoint(const CGeoPoint& p) const ;
double Xaxis()const {
return _xaxis;
}
double Yaxis()const {
return _yaxis;
}
private:
double _xaxis;
double _yaxis;
CGeoPoint _center;
};
/************************************************************************/
/* CGeoLineCircleIntersection */
/************************************************************************/
class CGeoLineEllipseIntersection {
public:
CGeoLineEllipseIntersection(const CGeoLine& line, const CGeoEllipse& circle);
bool intersectant() const {
return _intersectant;
}
const CGeoPoint& point1() const {
return _point1;
}
const CGeoPoint& point2() const {
return _point2;
}
private:
bool _intersectant;
CGeoPoint _point1;
CGeoPoint _point2;
};
/*********************************************************************/
/* CGeoSegmentCircleIntersection */
/********************************************************************/
class CGeoSegmentCircleIntersection {
public:
CGeoSegmentCircleIntersection(const CGeoSegment& line, const CGeoCirlce& circle);
bool intersectant() const {
return _intersectant;
}
const CGeoPoint& point1() const {
return _point1;
}
const CGeoPoint& point2() const {
return _point2;
}
int size() {
return intersection_size;
}
private:
bool _intersectant;
int intersection_size;
CGeoPoint _point1;
CGeoPoint _point2;
};
#endif
|
//
// Created by ooooo on 2020/1/3.
//
#ifndef CPP_0589_SOLUTION1_H
#define CPP_0589_SOLUTION1_H
#include "Node.h"
/**
* recursion
*/
class Solution {
public:
void help(Node *node, vector<int> &vec) {
if (!node) return;
vec.push_back(node->val);
for (auto child: node->children) {
help(child, vec);
}
}
vector<int> preorder(Node *root) {
vector<int> vec;
help(root, vec);
return vec;
}
};
#endif //CPP_0589_SOLUTION1_H
|
Saturday, July 11, 2015
Because you're a reader of Linux Journal, you probably already know that Linux has a rich virtualization ecosystem. KVM is the de facto standard, and VirtualBox is widely used for desktop virtualization. Veterans should remember Xen (it's still in a good shape, by the way), and there is also VMware (which isn't free but runs on Linux as well). Plus, there are many lesser-known hypervisors like the educational lguest or hobbyist Xvisor. In such a crowded landscape, is there a place for a newcomer?
There likely is not much sense in creating yet another Linux-based "versatile" hypervisor (other than doing it just for fun, you know). But, there are some specific use cases that general-purpose solutions just don't address quite well. One such area is real-time virtualization, which is frequently used in industrial automation, medicine, telecommunications and high-performance computing. In these applications, dedicating a whole CPU or its core to the software that runs bare metal (with no underlying OS) is a way to meet strict deadline requirements. Although it is possible to pin a KVM instance to the processor core and pass through PCI devices to guests, tests show the worst-case latency may be above some realistic requirements (see Resources).
As usual with free software, the situation is getting better with time, but there is one other thing—security. Sensitive software systems go through rigorous certifications (like Common Criteria) or even formal verification procedures. If you want them to run virtualized (say, for consolidation purposes), the hypervisor must isolate them from non-certifiable workloads. This implies that the hypervisor itself must be small enough; otherwise, it may end up being larger (and more "suspicious") than the software it segregates, thus devastating the whole idea of isolation.
So, it looks like there is some room for a lightweight (for the real-time camp), small and simple (for security folks) open-source Linux-friendly hypervisor for real-time and certifiable workloads. That's where Jailhouse comes into play.
New Guy on the Block
Jailhouse was born at Siemens and has been developed as a free software project (GPLv2) since November 2013. Last August, Jailhouse 0.1 was released to the general public. Jailhouse is rather young and more of a research project than a ready-to-use tool at this point, but now is a good time to become acquainted it and be prepared to meet it in production.
From the technical point of view, Jailhouse is a static partitioning hypervisor that runs bare metal but cooperates closely with Linux. This means Jailhouse doesn't emulate resources you don't have. It just splits your hardware into isolated compartments called "cells" that are wholly dedicated to guest software programs called "inmates". One of these cells runs the Linux OS and is known as the "root cell". Other cells borrow CPUs and devices from the root cell as they are created (Figure 1).
Figure 1. A visualization of Linux running-bare metal (a) and under the Jailhouse hypervisor (b) alongside a real-time application. (Image from Yulia Sinitsyna; Tux image from Larry Ewing.)
Besides Linux, Jailhouse supports bare-metal applications, but it can't run general-purpose OSes (like Windows or FreeBSD) unmodified. As mentioned, there are plenty of other options if you need that. One day Jailhouse also may support running KVM in the root cell, thus delivering the best of both worlds.
As mentioned previously, Jailhouse cooperates closely with Linux and relies on it for hardware bootstrapping, hypervisor launch and doing management tasks (like creating new cells). Bootstrapping is really essential here, as it is a rather complex task for modern computers, and implementing it within Jailhouse would make it much more complex. That being said, Jailhouse doesn't meld with the kernel as KVM (which is a kernel module) does. It is loaded as a firmware image (the same way Wi-Fi adapters load their firmware blobs) and resides in a dedicated memory region that you should reserve at Linux boot time. Jailhouse's kernel module (jailhouse.ko, also called "driver") loads the firmware and creates /dev/jailhouse device, which the Jailhouse userspace tool uses, but it doesn't contain any hypervisor logic.
Jailhouse is an example of Asynchronous Multiprocessing (AMP) architecture. Compared to traditional Symmetric Multiprocessing (SMP) systems, CPU cores in Jailhouse are not treated equally. Cores 0 and 1 may run Linux and have access to a SATA hard drive, while core 2 runs a bare-metal application that has access only to a serial port. As most computers Jailhouse can run on have shared L2/L3 caches, this means there is a possibility for cache thrashing. To understand why this happens, consider that Jailhouse maps the same guest physical memory address (GPA) to a different host (or real) physical address for different inmates. If two inmates occasionally have the same GPA (naturally containing diverse data) in the same L2/L3 cache line due to cache associativity, they will interfere with each other's work and degrade the performance. This effect is yet to be measured, and Jailhouse currently has no dedicated means to mitigate it. However, there is a hope that for many applications, this performance loss won't be crucial.
Now that you have enough background to understand what Jailhouse is (and what it isn't), I hope you are interested in learning more. Let's see how to install and run it on your system.
Getting Up to Date
Sometimes you may need the very latest KVM and QEMU to give Jailhouse a try. KVM is part of the kernel, and updating the critical system component just to try some new software probably seems like overkill. Luckily, there is another way.
kvm-kmod is a tool to take KVM modules from one kernel and compile them for another, and it usually is used to build the latest KVM for your current kernel. The build process is detailed in the README, but in a nutshell, you clone the repository, initialize a submodule (it's the source for KVM), and run the configure script followed by make. When the modules are ready, just insmod them instead of what your distribution provides (don't forget to unload those first). If you want the change to be permanent, run make modules_install. kvm-kmod can take the KVM sources from wherever you point to, but the defaults are usually sufficient.
Compiling QEMU is easier but more time consuming. It follows the usual configure && make procedure, and it doesn't need to be installed system-wide (which is package manager-friendly). Just put /path/to/qemu/x86_64-softmmu/qemu-system-x86_64 instead of plain qemu-system-x86_64 in the text's examples.
Building Jailhouse
Despite having a 0.1 release now, Jailhouse still is a young project that is being developed at a quick pace. You are unlikely to find it in your distribution's repositories for the same reasons, so the preferred way to get Jailhouse is to build it from Git.
To run Jailhouse, you'll need a recent multicore VT-x-enabled Intel x86 64-bit CPU and a motherboard with VT-d support. By the time you read this article, 64-bit AMD CPUs and even ARM (v7 or better) could be supported as well. The code is already here (see Resources), but it's not integrated into the mainline yet. At least 1GB of RAM is recommended, and even more is needed for the nested setup I discuss below. On the software side, you'll need the usual developer tools (make, GCC, Git) and headers for your Linux kernel.
Running Jailhouse on real hardware isn't straightforward at this time, so if you just want to play with it, there is a better alternative. Given that you meet CPU requirements, the hypervisor should run well under KVM/QEMU. This is known as a nested setup. Jailhouse relies on some bleeding-edge features, so you'll need at least Linux 3.17 and QEMU 2.1 for everything to work smoothly. Unless you are on a rolling release distribution, this could be a problem, so you may want to compile these tools yourself. See the Getting Up to Date sidebar for more information, and I suggest you have a look at it even if you are lucky enough to have the required versions pre-packaged. Jailhouse evolves and may need yet unreleased features and fixes by the time you read this.
Make sure you have nested mode enabled in KVM. Both kvm-intel and kvm-amd kernel modules accept the nested=1 parameter, which is responsible just for that. You can set it manually, on the modprobe command line (don't forget to unload the previous module's instance first). Alternatively, add options kvm-intel nested=1 (or the similar kvm-amd line) to a new file under /etc/modprobe.d.
You also should reserve memory for Jailhouse and the inmates. To do this, simply add memmap=66M$0x3b000000 to the kernel command line. For one-time usage, do this from the GRUB menu (press e, edit the command line and then press F10). To make the change persistent, edit the GRUB_CMDLINE_LINUX variable in /etc/default/grub on the QEMU guest side and regenerate the configuration with grub-mkconfig.
Now, make a JeOS edition of your favorite distribution. You can produce one with SUSE Studio, ubuntu-vm-builder and similar, or just install a minimal system the ordinary way yourself. It is recommended to have the same kernel on the host and inside QEMU. Now, run the virtual machine as (Intel CPU assumed):
qemu-system-x86_64 -machine q35 -m 1G -enable-kvm -smp 4
↪-cpu kvm64,-kvm_pv_eoi,-kvm_steal_time,-kvm_asyncpf,
↪-kvmclock,+vmx,+x2apic -drive
↪-virtfs local,path=/path/to/jailhouse,
↪-device ide-hd,drive=disk -serial stdio
↪-serial file:com2.txt
Note, I enabled 9p (-virtfs) to access the host filesystem from the QEMU guest side; /path/to/jailhouse is where you are going to compile Jailhouse now. cd to this directory and run:
git clone jailhouse
cd jailhouse
Now, switch to the guest and mount the 9p filesystem (for example, with mount -t 9p host /mnt). Then, cd to /mnt/jailhouse and execute:
sudo make firmware_install
sudo insmod jailhouse.ko
This copies the Jailhouse binary image you've built to /lib/firmware and inserts the Jailhouse driver module. Now you can enable Jailhouse with:
sudo tools/jailhouse enable configs/qemu-vm.cell
As the command returns, type dmesg | tail. If you see "The Jailhouse is opening." message, you've successfully launched the hypervisor, and your Linux guest now runs under Jailhouse (which itself runs under KVM/QEMU). If you get an error, it is an indication that your CPU is missing some required feature. If the guest hangs, this is most likely because your host kernel or QEMU are not up to date enough for Jailhouse, or something is wrong with qemu-vm cell config. Jailhouse sends all its messages to the serial port, and QEMU simply prints them to the terminal where it was started (Figure 2). Look at the messages to see what resource (I/O port, memory and so on) caused the problem, and read on for the details of Jailhouse configuration.
Figure 2. A typical configuration issue: Jailhouse traps "prohibited" operation from the root cell.
Configs and Inmates
Creating Jailhouse configuration files isn't straightforward. As the code base must be kept small, most of the logic that takes place automatically in other hypervisors must be done manually here (albeit with some help from the tools that come with Jailhouse). Compared to libvirt or VirtualBox XML, Jailhouse configuration files are very detailed and rather low-level. The configuration currently is expressed in the form of plain C files (found under configs/ in the sources) compiled into raw binaries; however, another format (like DeviceTree) could be used in future versions.
Most of the time, you wouldn't need to create a cell config from scratch, unless you authored a whole new inmate or want the hypervisor to run on your specific hardware (see the Jailhouse for Real sidebar).
Cell configuration files contain information like hypervisor base address (it should be within the area you reserved with memmap= earlier), a mask of CPUs assigned to the cell (for root cells, it's 0xff or all CPUs in the system), the list of memory regions and the permissions this cell has to them, I/O ports bitmap (0 marks a port as cell-accessible) and the list of PCI devices.
Each Jailhouse cell has its own config file, so you'll have one config for the root cell describing the platform Jailhouse executes on (like qemu-vm.c, as you saw above) and several others for each running cell. It's possible for inmates to share one config file (and thus one cell), but then only one of these inmates will be active at a given time.
In order to launch an inmate, you need to create its cell first:
sudo tools/jailhouse cell create configs/apic-demo.cell
apic-demo.cell is the cell configuration file that comes with Jailhouse (I also assume you still use the QEMU setup described earlier). This cell doesn't use any PCI devices, but in more complex cases, it is recommended to unload Linux drivers before moving devices to the cell with this command.
Now, the inmate image can be loaded into memory:
sudo tools/jailhouse cell load apic-demo
↪inmates/demos/x86/apic-demo.bin -a 0xf0000
Jailhouse treats all inmates as opaque binaries, and although it provides a small framework to develop them faster, the only thing it needs to know about the inmate image is its base address. Jailhouse expects an inmate entry point at 0xffff0 (which is different from the x86 reset vector). apic-demo.bin is a standard demo inmate that comes with Jailhouse, and the inmate's framework linker script ensures that if the binary is mapped at 0xf0000, the entry point will be at the right address. apic-demo is just a name; it can be almost anything you want.
Finally, start the cell with:
sudo tools/jailhouse cell start apic-demo Now, switch back to the terminal from which you run QEMU. You'll see that lines like this are being sent to the serial port:
Calibrated APIC frequency: 1000008 kHz Timer fired, jitter: 38400 ns, min: 38400 ns, max: 38400 ns ... apic-demo is purely a demonstrational inmate. It programs the APIC timer (found on each contemporary CPU's core) to fire at 10Hz and measures the actual time between the events happening. Jitter is the difference between the expected and actual time (the latency), and the smaller it is, the less visible (in terms of performance) the hypervisor is. Although this test isn't quite comprehensive, it is important, as Jailhouse targets real-time inmates and needs to be as lightweight as possible.
Jailhouse also provides some means for getting cell statistics. At the most basic level, there is the sysfs interface under /sys/devices/jailhouse. Several tools exist that pretty-print this data. For instance, you can list cells currently on the system with:
sudo tools/jailhouse cell list The result is shown in Figure 3. "IMB-A180" is the root cell's name. Other cells also are listed, along with their current states and CPUs assigned. The "Failed CPUs" column contains CPU cores that triggered some fatal error (like accessing an unavailable port or unassigned memory region) and were stopped.
Figure 3. Jailhouse cell listing—the same information is available through the sysfs interface.
For more detailed statistics, run:
sudo tools/jailhouse cell stat apic-demo You'll see something akin to Figure 4. The data is updated periodically (as with the top utility) and contains various low-level counters like the number of hypercalls issued or I/O port accesses emulated. The lifetime total and per-second values are given for each entry. It's mainly for developers, but higher numbers mean the inmate causes hypervisor involvement more often, thus degrading the performance. Ideally, these should be close to zero, as jitter in apic-demo. To exit the tool, press Q.
Figure 4. Jailhouse cell statistics give an insight into how cells communicate with the hypervisor.
Tearing It Down
Jailhouse comes with several demo inmates, not only apic-demo. Let's try something different. Stop the inmate with:
sudo tools/jailhouse cell destroy apic-demo JAILHOUSE_CELL_DESTROY: Operation not permitted What's the reason for this? Remember the apic-demo cell had the "running/locked" state in the cell list. Jailhouse introduces a locked state to prevent changes to the configuration. A cell that locks the hypervisor is essentially more important than the root one (think of it as doing some critical job at a power plant while Linux is mostly for management purposes on that system). Luckily, apic-demo is a toy inmate, and it unlocks Jailhouse after the first shutdown attempt, so the second one should succeed. Execute the above command one more time, and apic-demo should disappear from the cell listing.
Now, create tiny-demo cell (which is originally for tiny-demo.bin, also from the Jailhouse demo inmates set), and load 32-bit-demo.bin into it the usual way:
sudo tools/jailhouse cell create configs/tiny-demo.cell sudo tools/jailhouse cell load tiny-demo ↪inmates/demos/x86/32-bit-demo.bin -a 0xf0000 sudo tools/jailhouse cell start tiny-demo Look at com2.txt in the host (the same directory you started QEMU from). Not only does this show that cells can be re-used by the inmates provided that they have compatible resource requirements, it also proves that Jailhouse can run 32-bit inmates (the hypervisor itself and the root cell always run in 64-bit mode).
When you are done with Jailhouse, you can disable it with:
sudo tools/jailhouse disable For this to succeed, there must be no cells in "running/locked" state.
This is the end of our short trip to the Jailhouse. I hope you enjoyed your stay. For now, Jailhouse is not a ready-to-consume product, so you may not see an immediate use of it. However, it's actively developed and somewhat unique to the Linux ecosystem, and if you have a need for real-time application virtualization, it makes sense to keep a close eye on its progress.
Jailhouse for Real
QEMU is great for giving Jailhouse a try, but it's also possible to test it on real hardware. However, you never should do this on your PC. With a low-level tool like Jailhouse, you easily can hang your root cell where Linux runs, which may result in filesystem and data corruption.
Jailhouse comes with a helper tool to generate cell configs, but usually you still need to tweak the resultant file. The tool depends on Python; if you don't have it on your testing board, Jailhouse lets you collect required data and generate the configuration on your main Linux PC (it's safe):
sudo tools/jailhouse config collect data.tar # Copy data.tar to your PC or notebook and untar tools/jailhouse config create -r path/to/untarred/data ↪configs/myboard.c The configuration tool reads many files under /proc and /sys (either collected or directly), analyzes them and generates memory regions, a PCI devices list and other things required for Jailhouse to run.
Post-processing the generated config is mostly a trial-and-error process. You enable Jailhouse and try to do something. If the system locks up, you analyze the serial output and decide if you need to grant access. If you are trying to run Jailhouse on a memory-constrained system (less than 1GB of RAM), be careful with the hypervisor memory area, as the configuration tool currently can get it wrong. Don't forget to reserve memory for Jailhouse via the kernel command line the same way you did in QEMU. On some AMD-based systems, you may need to adjust the Memory Mapped I/O (MMIO) regions, because Jailhouse doesn't support AMD IOMMU technology yet, although the configuration tool implies it does.
To capture Jailhouse serial output, you'll likely need a serial-to-USB adapter and null modem cable. Many modern motherboards come with no COM ports, but they have headers you can connect a socket to (the cabling is shown in Figure a). Once you connect your board to the main Linux PC, run minicom or similar to see the output (remember to set the port's baud rate to 115200 in the program's settings).
Figure a. A must-have toolkit to run Jailhouse bare metal: serial-to-USB converter, null modem cable (attached) and mountable COM port. (Image from Yulia Sinitsyna.)
Static System Partitioning and KVM (KVM Forum 2013 Slides):
Jailhouse AMD64 Port:
Jailhouse ARM Port:
No comments:
Post a Comment
|
The diagnosis of Trichotillomania (TM) is synonymous with the act of recurrently pulling one’s own body hair resulting in noticeable thinning or baldness. (American Psychiatric Association, Diagnostic and statistical manual of mental disorders, 2000, p. 674) Sites of hair pulling can include any area of the body in which hair is found, but the most common sites are the scalp, eyelashes, eyebrows, and the pubis area. (Kraemer, 1999, p. 298) The disorder itself is categorized in the DSM-IV-TR as an “Impulse Control Disorder Not Elsewhere Classified” along with disorders like Pathological Gambling, Pyromania, Kleptomania, and Intermittent Explosive Disorder. Although TM was previously considered to be a rare disorder, more recent research indicates that prevalence rates of TM may be as high as 2% of the general population. (Kraemer, 1999, p. 298) This prevalence rate is significantly higher than the lifetime prevalence rate of .6% that is cited as a potential baseline among college students the DSM-IV-TR. (4th ed., text rev.; DSM-IV-TR; American Psychiatric Association, 2000, p. 676) The condition appears to be more common among women and the period of onset is typically in childhood or adolescence. (Kraemer, 1999, p. 298) As is customary with most DSM-IV-TR diagnoses, the act of hair pulling cannot be better accounted for by another mental disorder (like delusions, for example) or a general medical condition. Like every disorder in the DSM-IV-TR, the disturbance must cause significant distress or impairment in functioning. (4th ed., text rev.; DSM-IV-TR; American Psychiatric Association, 2000, p. 675)
Alopecia is a key concept that must be understood in order to complete the differential diagnosis of TM. Alopecia is a condition of baldness in the most general sense. (Shiel, Jr. & Stoppler, 2008, p. 14) Other medically related causes of alopecia should be considered in the differential diagnosis of TM, especially when working with an individual who deny pulling their hair. The common suspects include male-pattern baldness, Discoid Lupus Erythematosus (DLE), Lichen Planopilaris (also known as Acuminatus), Folliculitis Decalvans, Pseudopelade of Brocq, and Alopecia Mucinosa (Follicular Mucinosis). (4th ed., text rev.; DSM-IV-TR; American Psychiatric Association, 2000, p. 676) Comprehensive coverage of these medical conditions is beyond the scope of this article – all of the aforementioned confounding variables can be eliminated by a general practitioner.
There are a number of idiosyncratic features associated with TM that bear mentioning. Although the constellation of features covered here is not sufficient to warrant a diagnosis in isolation, they can aid in the differential diagnosis process. Alopecia, regardless of the cause, has been known to lead sufferers to tremendous feats of avoidance so that the hair loss remains undetected. Simply avoiding social functions or other events where the individual (and their attendant hair loss) might be uncovered is a common occurrence. In cases where individual’s focus of attention is on the head or scalp, it is not uncommon for affected individuals to attempt to hide hair loss by adopting complimentary hair styles or wearing other headwear (e.g., hats, wigs, etc). These avoidance behaviors will be the target of exposure and response prevention later in this article.
In addition to avoidant behavior and elaborate attempts to “cover it up,” individuals with TM frequently present with clinically significant difficulty in areas such as self-esteem and mood. Comorbidity, or the presence of one or more disorders in the addition to a primary diagnosis, is the rule not the exception in the stereotypical presentation of TM. Mood disorders (like depression) are the most common (65%) – anxiety (57%), chemical use (22%), and eating disorders (20%) round out the top four mostly likely candidates for comorbidity. (Kraemer, 1999, p. 298) These comorbidity rates are not overly surprising since they parallel prevalence rates across the wider population – perhaps with the notable exception of the high rate of comorbid eating disorders. We can speculate about the source of comorbidity – one possible hypothesis is that a few people who suffer TM also suffer from a persistent cognitive dissonance associated with having happy-go-lucky personality trait which leads them “let the chips fall where they may.” They are individuals prone to impulsivity, but they are subdued and controlled the shame, guilt, frustration, fear, rage, and helplessness associated with the social limitations placed on them by the disorder. (Ingram, 2012, p. 269) On the topic of personality, surprisingly enough, research suggests that personality disorders do not share significant overlap with TM. This includes Borderline Personality Disorder (BPD) despite the fact that BPD is often associated with self-harming behavior. (Kraemer, 1999, p. 299)
Differentiating TM from Obsessive-Compulsive Disorder (OCD) can be challenging in some cases. TM is similar to OCD because there is a “sense of gratification” or “relief” when pulling the hair out. Unlike individuals with OCD, individuals with TM do not perform their compulsions in direct response to an obsession and/or according to rules that must be rigidly adhered to. (4th ed., text rev.; DSM-IV-TR; American Psychiatric Association, 2000, p. 676) There are, however, observed similarities between OCD and TM regarding phenomenology, neurological test performance, response to SSRI’s, and contributing elements of familial and/or genetic factors. (Kraemer, 1999, p. 299) Due to the large genetic component contributions of both disorders, obtaining a family history (vis-à-vis a detailed genogram) is highly recommended. The comprehensive genogram covering all mental illness can be helpful in the discovery the comorbid conditions identified above as well.
There is some suggestion that knowledge of events associated with onset is “intriguing, but unnecessary for successful treatment.” (Kraemer, 1999, p. 299) I call shenanigans. There is a significant connection between the onset of TM and the patient enduring loss, perceived loss, and/or trauma. Time is well spent exploring the specific environmental stressors that precipitated the disorder. Although ignoring circumstances surrounding onset might be prudent when employing strict behavioral treatment paradigms, it seems like a terrible waste of time to endure suffering without identifying some underlying meaning or purpose that would otherwise be missed if we overlook onset specifics. “Everything can be taken from a man but one thing: the last of human freedoms – to choose one’s attitude in any given set of circumstances, to choose one’s own way.” (Frankl, 1997, p. 86) If we acknowledge that all behavior is purposeful, then we must know and understand the circumstances around onset if we will ever understand the purpose of said behavior. I liken this to a difference in professional opinion and personal preference because either position can be reasonably justified, but in the end the patient should make the ultimate decision about whether or not to explore onset contributions vis-à-vis “imagery dialogue” or a similar technique. (Young, Klosko, & Weishaar, 2003, p. 123) If such imagery techniques are unsuccessful or undesired by the client, a psychodynamic conversation between “internal parts of oneself” can add clarity to the persistent inability of the client to delay gratification. (Ingram, 2012, p. 292) Such explorations are likely to be time consuming, comparatively speaking, and should not be explored with patients who are bound by strict EAP requirements or managed care restrictions on the type and length of treatment. Comorbid developmental disabilities and cognitive deficits may preclude this existential exploration. I employ the exploration of existential issues of origin in the interest of increasing treatment motivation, promoting adherence, enhancing the therapeutic milieu, and thwarting subsequent lapses by anchoring cognitive dissonance to a concrete event.
TM represents a behavioral manifestation of a fixed action patterns (FAPs) that is rigid, consistent, and predicable. FAPs are generally thought to have evolved from our most primal instincts as animals – they are believed to contain fundamental behavioral ‘switches’ that enhance the survivability of the human species. (Lambert & Kinsley, 2011, p. 232) The nature of FAPs that leads some researchers to draw parallels to TM is that FAPs appear to be qualitatively “ballistic.” It’s an “all or nothing” reaction that is comparable to an action potential traveling down the axon of a neuron. Once they are triggered they are very difficult to suppress and may have a tendency to “kindle” other effects. (Lambert & Kinsley, 2011, p. 233)
There are some unique considerations when it comes to assessing a new patient with TM. Because chewing on or ingesting the hair is reported in nearly half of TM cases, the attending clinician should always inquire about oral manipulation and associated gastrointestinal pain associated with a connected hair mass in the stomach or bowel (trichobezoar). Motivation for change should be assessed and measured because behavioral interventions inherently require a great deal of effort. Family and social systems should not be ignored since family dynamics can exacerbate symptomatlogy vis-à-vis pressure to change (negative reinforcement), excessive attention (positive reinforcement), or both. (Kraemer, 1999, p. 299)
What remains to be seen is the role of stress in the process of “triggering” a TM episode. Some individuals experience an “itch like” sensation as a physical antecedent that remits once the hair is pulled. This “itch like” sensation is far from universal. Some clinicians and researchers believe that the abnormal grooming behavior found in TM is “elicited in response to stress” with the necessary but not sufficient condition of “limited options for motoric behavior and tension release.” (Kraemer, 1999, p. 299) Although this stress hypothesis may materialize as a tenable hypothesis in some cases, it’s by no means typical. Most people diagnosed with TM report that the act of pulling typically occurs during affective states of relaxation and distraction. Most individuals whom suffer from TM do not report clinically significant levels of anxiety as the “trigger” of bouts of hair pulling. We could attribute this to an absence of insight regarding anxiety related triggers or, perhaps anxiety simply does not play a significant role in the onset and maintenance of hair pulling episodes. Regardless of the factors that trigger episodes, a comprehensive biopsychosocial assessment that includes environmental stressors (past, present and anticipated) should be explored.
The options for treatment of TM are limited at best. SSRIs have demonstrated some potential in the treatment of TM, but more research is needed before we can consider SSRIs as a legitimate first-line treatment. SSRIs are worth a shot as an adjunct treatment in cases of chronic, refractory, or treatment resistant TM. I would consider recommending a referral to a psychiatrist (not a general practitioner) for a medication review due in part to the favorable risk profile of the most recent round of SSRIs. Given the high rate of comorbidity with mood and anxiety disorders – if either is anxiety or depression are comorbid, SSRIs will likely be recommended regardless. Killing two birds with one stone is the order of the day, but be mindful that some medication can interfere with certain treatment techniques like imaginal or in vivo exposure. (Ledley, Marx, & Heimberg, 2010, p. 141) Additional research is needed before anxiolytic medications can be recommended in the absence of comorbid anxiety disorders (especially with children). Hypnosis and hypnotic suggestion in combination with other behavioral interventions may be helpful for some individuals, but I don’t know enough about it at this time to recommend it. Call me skeptical, or ignorant, but I prefer to save the parlor tricks for the circus…
Habit reversal is no parlor trick. My goal isn’t to heal the patient; that would create a level of dependence I am not comfortable with… my goal is to teach clients how to heal themselves. Okay, but how? The combination of Competing Response Training, Awareness/Mindfulness Training, Relaxation Training, Contingency Management, Cognitive Restructuring, and Generalization Training is the best hope for someone who seeks some relief from TM. Collectively I will refer to this collection of techniques as Habit Reversal.
Competing Response Training is employed in direct response to hair pulling or in situations where hair pulling might be likely. In the absence of “internal restraints to impulsive behavior,” artificial circumstances are created by identifying substitute behaviors that are totally incompatible with pulling hair. (Ingram, 2012, p. 292) Just like a compulsive gambling addict isn’t in any danger if spends all his money on rent, someone with TM is much less likely to pull hair if they are doing something else with their hands.
Antecedents, or triggers, are sometimes referred to as discriminative stimuli. (Ingram, 2012, p. 230) “We sense objects in a certain way because of our application of priori intuitions…” (Pirsig, 1999, p. 133) Altering the underlying assumptions entrenched in maladaptive priori intuitions is the core purpose of Awareness and Mindfulness Training. “There is a lack of constructive self-talk mediating between the trigger event and the behavior. The therapist helps the client build intervening self-messages: Slow down and think it over; think about the consequences.” (Ingram, 2012, p. 221) The connection to contingency management should be self evident. Utilizing a customized self-monitoring record, the patient begins to acquire the necessary insight to “spot” maladaptive self talk. “Spotting” is not a new or novel concept – it is central component of Abraham Low’s revolutionary self help system Recovery International. (Abraham Low Self-Help Systems, n.d.) The customized self-monitoring record should invariably include various data elements such as precursors, length of episode, number of hairs pulled, and a subjective unit of distress representing the level of “urge” or desire to pull hair. (Kraemer, 1999) The act of recording behavior (even in the absence of other techniques) is likely to produce significant reductions in TM symptomatlogy. (Persons, 2008, p. 182-201) Perhaps more importantly, associated activities, thoughts, and emotions that may be contributing to the urge to pull should be codified. (Kraemer, 1999, p. 300) In session, this record can be reviewed and subsequently tied to “high risk circumstances” and “priori intuitions” involving constructs such as anger, frustration, depression, and boredom.
Relaxation training is a critical component if we subscribe to the “kindling” hypothesis explained previously. Relaxation is intended to reduce the urges that inevitably trigger the habit. Examples abound, but diaphragmatic breathing, progressive relaxation, and visualization are all techniques that can be employed in isolation or in conjunction with each other.
Contingency Management is inexorably tied to the existential anchor of cognitive dissonance described above. My emphasis on this element is where my approach might differ from some other clinicians. “You are free to do whatever you want, but you are responsible for the consequences of everything that you do.” (Ingram, 2012, p. 270) This might include the client writing down sources of embarrassment, advantages of controlling the symptomatlogy of TM, etc. (Kraemer, 1999) The moment someone with pyromania decides that no fire worth being imprisoned, they will stop starting fires. The same holds true with someone who acknowledges the consequences of pulling their hair.
How do we define success? Once habit reversal is successfully accomplished in one setting or situation, the client needs to be taught how to generalize that skill to other contexts. A hierarchical ranking of anxiety provoking situations can be helpful in this process since self-paced graduated exposure is likely to increase tolerability for the anxious client. (Ingram, 2012, p. 240) If skills are acquired, and generalization occurs, we can reasonably expect a significant reduction in TM symptomatlogy. The challenges are significant, cognitive behavioral therapy is much easier said than done. High levels of treatment motivation are required for the behavioral elements, and moderate to high levels of insight are exceptionally helpful for the cognitive elements. In addition, this is an impulse control disorder… impulsivity leads to treatment noncompliance and termination. The combination of all the above, in addition to the fact that TM is generally acknowledged as one of the more persistent and difficult to treat disorders, prevents me from providing any prognosis other than “this treatment will work as well as the client allows it to work.”
Abraham Low Self-Help Systems. (n.d.). Recovery international terms and definitions. Retrieved August 2, 2012, from http://www.lowselfhelpsystems.org/system/recovery-international-language.asp
American Psychiatric Association. (2000). Diagnostic and statistical manual of mental disorders (4th ed., text rev.). Washington, DC: Author.
Frankl, V. E. (1997). Man’s search for meaning (rev. ed.). New York, NY: Pocket Books.
Ingram, B. L. (2012). Clinical case formulations: Matching the integrative treatment plan to the client (2nd ed.). Hoboken, NJ: John Wiley & Sons.
Kraemer, P. A. (1999). The application of habit reversal in treating trichotillomania. Psychotherapy: Theory, Research, Practice, Training, 36(3), 298-304. doi: 10.1037/h0092314
Lambert, K. G., & Kinsley, C. H. (2011). Clinical neuroscience: Psychopathology and the brain (2nd ed.). New York: Oxford University Press.
Ledley, D. R., Marx, B. P., & Heimberg, R. G. (2010). Making cognitive-behavioral therapy work: Clinical process for new practitioners (2nd ed.). New York, NY: Guilford Press.
Persons, J. B. (2008). The case formulation approach to cognitive-behavior therapy. New York, NY: Guilford Press.
Pirsig, R. M. (1999). Zen and the art of motorcycle maintenance: An inquiry into values (25th Anniversary ed.). New York: Quill.
Shiel, W. C., Jr., & Stoppler, M. C. (Eds.). (2008). Webster’s new world medical dictionary (3rd ed.). Hoboken, NJ: Wiley Publishing.
Young, J. E., Klosko, J. S., & Weishaar, M. E. (2003). Schema therapy: A practitioner’s guide. New York: Guilford Press.
|
// Author: b1tank
// Email: [email protected]
//=================================
/*
154_find-minimum-in-rotated-sorted-array-ii LeetCode
Solution:
- l + (r - l) / 2 ---- to avoid int overflow !!!
- be careful with different left and right boundary change patterns
- move less aggressively with the right pointer to tackle duplicates
*/
#include <iostream>
#include <vector>
#include <string>
using namespace std;
class Solution {
public:
int findMin(vector<int>& nums) {
// int s = nums.size();
// int l = 0;
// int r = s - 1;
// while (l < r) {
// int mid = l + (r - l) / 2;
// if (nums[mid] < nums[r] || nums[mid] < nums[l] || (nums[mid] == nums[r] && nums[mid] > nums[l])) {
// r = mid;
// } else if (nums[mid] > nums[r] || nums[mid] > nums[l]) {
// l = mid + 1;
// } else {
// r--;
// l++;
// }
// }
// return nums[l];
int s = nums.size();
int l = 0;
int r = s - 1;
while (l < r) {
int mid = l + (r - l) / 2;
if (nums[mid] < nums[r]) {
r = mid;
} else if (nums[mid] > nums[r]) {
l = mid + 1;
} else {
r--; // move less aggressively just the right pointer; move left pointer will fail with the case [1, 2, 2] by missing the lowest int on the left
}
}
return nums[l];
}
};
|
Introduction to principles of chemistry and fundamentals of inorganic and biochemistry. Structure and chemistry of carbohydrates, lipids, proteins, biochemistry of enzymes, metabolism, body fluids and radiation effects. On-line materials includes the course syllabus, copies of the lecture slides and animations, interactive Periodic Table, chapter summaries and practice exams. This course is targeted towards Health Science Majors.
Introduction to principles of chemistry. This course is targeted towards Chemistry Majors.
Laboratory experiments to develop techniques in organic chemistry and illustrate principles. On-line materials include step-by-step prelabs for many of the experiments that students will be conducting.
Theoretical principles of quantitative and instrumental analysis. Emphasis is placed on newer analytical tools and equipment.
Intermediate level course. Includes a discussion of the structure, function and metabolism of proteins, carbohydrates and lipids. In addition, there is a review of enzymes, DNA and RNA.
This course stresses theory and application of modern chromatographic methods. On-line materials include the course syllabus, copies of course lecture slides and animations.
A 'short course' covering the use of a mass spectrometer as a GC detector. Basic instrumentation, data treatment and spectral interpretation methods will be discussed. On-line materials include copies of course lecture slides and tables to assist in the interpretation of mass spectra.
Coverage of statistical methods in Analytical Chemistry. Course includes basic statistics, experimental design, modeling, exploratory data analysis and other multivariate techniques. On-line materials include the course syllabus, homework problems and copies of the lecture slides.
A survey of the basic equipment, data and methodology of Analytical methods that rely on radioisotopic materials. On-line materials include the course syllabus, homework problems. copies of the lecture slides and animations.
Why I missed the exam
|
#ifndef INFERENCE_H
#define INFERENCE_H
/* Estimates parameters of maximum entropy model */
lbfgsfloatval_t *InferPairModel(alignment_t *ali, options_t *options);
#endif /* INFERENCE_H */
|
Now that we’ve said a lot about individual operators on vector spaces, I want to go back and consider some other sorts of structures we can put on the space itself. Foremost among these is the idea of a bilinear form. This is really nothing but a bilinear function to the base field: . Of course, this means that it’s equivalent to a linear function from the tensor square: .
Instead of writing this as a function, we will often use a slightly different notation. We write a bracket , or sometimes , if we need to specify which of multiple different inner products under consideration.
Another viewpoint comes from recognizing that we’ve got a duality for vector spaces. This lets us rewrite our bilinear form as a linear transformation . We can view this as saying that once we pick one of the vectors , the bilinear form reduces to a linear functional , which is a vector in the dual space . Or we could focus on the other slot and define .
We know that the dual space of a finite-dimensional vector space has the same dimension as the space itself, which raises the possibility that or is an isomorphism from to . If either one is, then both are, and we say that the bilinear form is nondegenerate.
We can also note that there is a symmetry on the category of vector spaces. That is, we have a linear transformation defined by . This makes it natural to ask what effect this has on our form. Two obvious possibilities are that and that . In the first case we’ll call the bilinear form “symmetric”, and in the second we’ll call it “antisymmetric”. In terms of the maps and , we see that composing with the symmetry swaps the roles of these two functions. For symmetric bilinear forms, , while for antisymmetric bilinear forms we have .
This leads us to consider nondegenerate bilinear forms a little more. If is an isomorphism it has an inverse . Then we can form the composite . If is symmetric then this composition is the identity transformation on . On the other hand, if is antisymmetric then this composition is the negative of the identity transformation. Thus, the composite transformation measures how much the bilinear transformation diverges from symmetry. Accordingly, we call it the asymmetry of the form .
Finally, if we’re working over a finite-dimensional vector space we can pick a basis for , and get a matrix for . We define the matrix entry . Then if we have vectors and we can calculate
In terms of this basis and its dual basis , we find the image of the linear transformation . That is, the matrix also can be used to represent the partial maps and . If is symmetric, then the matrix is symmetric , while if it’s antisymmetric then .
|
The Gram-Schmidt Process
Now that we have a real or complex inner product, we have notions of length and angle. This lets us define what it means for a collection of vectors to be “orthonormal”: each pair of distinct vectors is perpendicular, and each vector has unit length. In formulas, we say that the collection is orthonormal if . These can be useful things to have, but how do we get our hands on them?
It turns out that if we have a linearly independent collection of vectors then we can come up with an orthonormal collection spanning the same subspace of . Even better, we can pick it so that the first vectors span the same subspace as . The method goes back to Laplace and Cauchy, but gets its name from Jørgen Gram and Erhard Schmidt.
We proceed by induction on the number of vectors in the collection. If , then we simply set
This “normalizes” the vector to have unit length, but doesn’t change its direction. It spans the same one-dimensional subspace, and since it’s alone it forms an orthonormal collection.
Now, lets assume the procedure works for collections of size and start out with a linearly independent collection of vectors. First, we can orthonormalize the first vectors using our inductive hypothesis. This gives a collection which spans the same subspace as (and so on down, as noted above). But isn’t in the subspace spanned by the first vectors (or else the original collection wouldn’t have been linearly independent). So it points at least somewhat in a new direction.
To find this new direction, we define
This vector will be orthogonal to all the vectors from to , since for any such we can check
where we use the orthonormality of the collection to show that most of these inner products come out to be zero.
So we’ve got a vector orthogonal to all the ones we collected so far, but it might not have unit length. So we normalize it:
and we’re done.
|
On August 9, 2011, the Canadian Ice Service (CIS) reported that the Petermann Ice Island-A (PII-A) appeared to be grounded off the east coast of Newfoundland, east of the city of St. Anthony.
The Moderate Resolution Imaging Spectroradiometer (MODIS) on NASA’s Terra satellite captured this natural-color image of the ice island and its surroundings on August 14, 2011. Clouds hide much of the region, and white lines delineate coasts and borders.
PII-A appears as an irregularly shaped white body east of St. Anthony. What look like small fragments of ice appear immediately west and north of the ice island. The CIS had reported for weeks that the ice island was losing mass due to melting and calving, so a continued loss of ice is consistent with CIS reports.
PII-A is a remnant of a much larger ice island that calved off the Petermann Glacier in northwestern Greenland on August 5, 2010. Over the course of the following year, that ice island fragmented into smaller pieces, which continued drifting. Other fragments of the original ice island were in Baffin Bay and Lancaster Sound as of August 9, according to the CIS.
- Canadian Ice Service (2011, August 9). Petermann Ice Island Updates. Accessed August 15, 2011.
|
Dragon Eggs are not as common as they used to be but they are still around. Jenna found Spit Fyre's egg in the tunnels beneath Aunt Zelda's cottage and gave it to Septimus, neither knowing its true identity then.
Dragon Eggs are impossible smooth and oval shaped. They resemble large stones. They are also said by Septimus to give off a slight luminescent sheen in the light. They can only be hatched in a specific order of unique steps.
Steps to Hatch a Dragon Egg
1. Sustain heat surrounding the dragon egg at a minimum of eighty degrees for at least twenty-four hours
2. Keep the dragon egg supplied with constant warmth for at least a year and a day (It is recommended that you sleep with the egg under your pillow), providing the sensation of movement for at least eight hours a day.
3. After the year and a day, the dragon must receive a sharp tap on its shell to wake it up. (Dropping it onto a stone surface from head height should do the trick.)
4. Finally, provide the dragon egg with a touch of Darknesse, possible methods include: Leaving the egg outside a Darke Witch Coven's house overnight, or wrapping the egg in a Darke spell at midnight
5. Your dragon is now ready to hatch!
Ad blocker interference detected!
|
The press release doesn’t contain any pictures, and really doesn’t do this new web tool justice, so I’ve added some screencaps. In a nutshell, the new iSWA site lets you arrange graphical packages of solar images and plots oncsreen for simultaneous evaluation. Stuff that had been scattered over several solar related websites is now in one interface. Pretty cool. – Anthony
When NASA’s satellite operators need accurate, real-time space-weather information, they turn to the Community Coordinated Modeling Center (CCMC) of the Space Weather Laboratory at NASA’s Goddard Space Flight Center in Greenbelt, Md. The CCMC’s newest and most advanced space-weather science tool is the Integrated Space Weather Analysis (iSWA) system.
The iSWA is a robust, integrated system provides information about space weather conditions past, present, and future and, unlike many other programs currently in use, has an interface that the user can customize to suit a unique set of data requirements.
“The iSWA space-weather data analysis system offers a unique level of customization and flexibility to maintain, modify, and add new tools and data products as they become available,” says Marlo Maddox, iSWA system chief developer at NASA Goddard.
iSWA draws together information about conditions from the sun to the boundary of the sun’s influence, known as the heliosphere. The iSWA systems digests information from spacecraft including the National Oceanic and Atmospheric Administration’s (NOAA) Geostationary Operational Environmental Satellites (GOES), NASA’s Solar Terrestrial Relations Observatory (STEREO), the joint European Space Agency and NASA mission Solar and Heliospheric Observatory (SOHO), and NASA’s Advanced Composition Explorer (ACE).
Citizen scientists and science enthusiasts can also use the data, models, and tools of the iSWA system. Similar to the way in which armchair astronomers have used SOHO data to discover comets, enthusiasts will find the iSWA system a wonderful resource for increasing their familiarity with the concept of space weather.
“We are continuously evolving the iSWA system, and we hope that it will benefit not only NASA satellite operators, but also that it may also help space-weather forecasting at other agencies such as the Air Force Weather Agency and NOAA,” says Michael Hesse, chief of the Space Weather Laboratory at NASA Goddard.
Space-weather information tends to be scattered over various Web sites. NASA Goddard space physicist Antti Pulkkinen says the iSWA system represents “the most comprehensive single interface for general space-weather-related information,” providing data on past and current space-weather events. The system allows the user to configure or design custom displays of the information.
The system compiles data about conditions on the sun, in Earth’s magnetosphere — the protective magnetic field that envelops our planet — and down to Earth’s surface. It provides a user interface to provide NASA’s satellite operators and with a real-time view of space weather. In addition to NASA, the iSWA system is used by the Air Force Weather agency.
Access to space-weather information that combines data from state-of-the-art space-weather models with concurrent observations of the space environment provides a powerful tool for users to obtain a personalized “quick look” at space-weather information, detailed insight into space-weather conditions, as well as tools for historical analysis of the space-weather’s impact.
Development of the iSWA system has been a joint activity between the Office of the Chief Engineer at NASA Headquarters and the Applied Engineering and Technology Directorate and the Science and Exploration Directorate at NASA Goddard. The iSWA system is located at NASA Goddard.
The Community Coordinated Modeling Center is funded by the Heliophysics Division in the Science Mission Directorate at NASA Headquarters, and the National Science Foundation.
Layout selector tool:
|
/*++
Copyright (c) 1999-2000 Microsoft Corporation
Module Name:
srapi.h
Abstract:
This module defines the public System Restore interface for nt.
Author:
Paul McDaniel (paulmcd) 24-Feb-2000
Revision History:
Paul McDaniel (paulmcd) 18-Apr-2000 completely new version
--*/
#ifndef _SRAPI_H_
#define _SRAPI_H_
#ifdef __cplusplus
extern "C" {
#endif
/***************************************************************************++
Routine Description:
SrCreateControlHandle is used to retrieve a HANDLE that can be used
to perform control operations on the driver.
Arguments:
pControlHandle - receives the newly created HANDLE. The controlling
application must call CloseHandle when it is done.
Options - one of the below options.
Return Value:
ULONG - Completion status.
--***************************************************************************/
#define SR_OPTION_OVERLAPPED 0x00000001 // for async
#define SR_OPTION_VALID 0x00000001 //
ULONG
WINAPI
SrCreateControlHandle (
IN ULONG Options,
OUT PHANDLE pControlHandle
);
/***************************************************************************++
Routine Description:
SrCreateRestorePoint is called by the controlling application to declare
a new restore point. The driver will create a local restore directory
and then return a unique sequence number to the controlling app.
Arguments:
ControlHandle - the control HANDLE.
pNewRestoreNumber - holds the new restore number on return. example: if
the new restore point directory is \_restore\rp5 this will return
the number 5
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrCreateRestorePoint (
IN HANDLE ControlHandle,
OUT PULONG pNewRestoreNumber
);
/***************************************************************************++
Routine Description:
SrGetNextSequenceNum is called by the application to get the next
available sequence number from the driver.
Arguments:
ControlHandle - the control HANDLE.
pNewSequenceNumber - holds the new sequnce number on return.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrGetNextSequenceNum(
IN HANDLE ControlHandle,
OUT PINT64 pNextSequenceNum
);
/***************************************************************************++
Routine Description:
SrReloadConfiguration causes the driver to reload it's configuration
from it's configuration file that resides in a preassigned location.
A controlling service can update this file, then alert the driver to
reload it.
this file is %systemdrive%\_restore\_exclude.cfg .
Arguments:
ControlHandle - the control HANDLE.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrReloadConfiguration (
IN HANDLE ControlHandle
);
/***************************************************************************++
Routine Description:
SrStopMonitoring will cause the driver to stop monitoring file changes.
The default state of the driver on startup is to monitor file changes.
Arguments:
ControlHandle - the control HANDLE.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrStopMonitoring (
IN HANDLE ControlHandle
);
/***************************************************************************++
Routine Description:
SrStartMonitoring will cause the driver to start monitoring file changes.
The default state of the driver on startup is to monitor file changes.
This api is only needed in the case that the controlling application has
called SrStopMonitoring and wishes to restart it.
Arguments:
ControlHandle - the control HANDLE.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrStartMonitoring (
IN HANDLE ControlHandle
);
//
// these are the interesting types of events that can happen.
//
typedef enum _SR_EVENT_TYPE
{
SrEventInvalid = 0, // no action has been set
SrEventStreamChange = 0x01, // data is being changed in a stream
SrEventAclChange = 0x02, // an acl on a file or directory is changing
SrEventAttribChange = 0x04, // an attribute on a file or directory is changing
SrEventStreamOverwrite = 0x08, // a stream is being opened for overwrite
SrEventFileDelete = 0x10, // a file is being opened for delete
SrEventFileCreate = 0x20, // a file is newly created, not overwriting anything
SrEventFileRename = 0x40, // a file is renamed (within monitored space)
SrEventDirectoryCreate = 0x80, // a dir is created
SrEventDirectoryRename = 0x100, // a dir is renamed (within monitored space)
SrEventDirectoryDelete = 0x200, // an empty dir is deleted
SrEventMountCreate = 0x400, // a mount point was created
SrEventMountDelete = 0x800, // a mount point was deleted
SrEventVolumeError = 0x1000, // a non-recoverable error occurred on the volume
SrEventMaximum = 0x1000,
SrEventStreamCreate = 0x2000, // a stream has been created. This will never
// be logged, but is used to make sure that
// we handle stream creations correctly.
SrEventLogMask = 0xffff,
//
// flags
//
SrEventNoOptimization = 0x00010000, // this flag on means no optimizations are to be performed
SrEventIsDirectory = 0x00020000, // this event happened on a directory
SrEventIsNotDirectory = 0x00040000, // this event happened on a non-directory (file)
SrEventSimulatedDelete = 0x00080000, // when set this is a simulated DELETE operation --
// the file is not really being deleted, but to
// SR it looks like a delete.
SrEventInPreCreate = 0x00100000, // when set, the create has not yet been succeeded by the filesystem
SrEventOpenById = 0x00200000 // when set, the create has not yet been succeeded by the filesystem
// and this file is being opened by ID.
} SR_EVENT_TYPE;
//
// this structure represents a notification from kernel mode
// to user mode. This is because of interesting volume activity
//
typedef enum _SR_NOTIFICATION_TYPE
{
SrNotificationInvalid = 0, // no action has been set
SrNotificationVolumeFirstWrite, // The first write on a volume occured
SrNotificationVolume25MbWritten,// 25 meg has been written the the volume
SrNotificationVolumeError, // A backup just failed, Context holds the win32 code.
SrNotificationMaximum
} SR_NOTIFICATION_TYPE, * PSR_NOTIFICATION_TYPE;
#define SR_NOTIFY_BYTE_COUNT 25 * (1024 * 1024)
//
// this the largest nt path the sr chooses to monitor. paths larger than
// this will be silently ignored and passed down to the file system
// unmonitored.
//
// NOTE: This lenght INCLUDES the terminating NULL at the end of the
// filename string.
//
#define SR_MAX_FILENAME_LENGTH 1000
//
// Restore needs to prepend the volume guid in addition to the filepath --
// so the maximum filepath length relative to the volume that can be supported
// is 1000 - strlen(guid) = 952 characters
// restore also appends suffixes like (2) to these names in cases of locked or
// conflicting files, so to be really safe, we choose an even smaller number
//
#define SR_MAX_FILENAME_PATH 940
#define MAKE_TAG(tag) ( (ULONG)(tag) )
#define SR_NOTIFICATION_RECORD_TAG MAKE_TAG( 'RNrS' )
#define IS_VALID_NOTIFICATION_RECORD(pObject) \
(((pObject) != NULL) && ((pObject)->Signature == SR_NOTIFICATION_RECORD_TAG))
typedef struct _SR_NOTIFICATION_RECORD
{
//
// SR_NOTIFICATION_RECORD_TAG
//
ULONG Signature;
//
// reserved
//
LIST_ENTRY ListEntry;
//
// the type of notification
//
SR_NOTIFICATION_TYPE NotificationType;
//
// the name of the volume being notified for
//
UNICODE_STRING VolumeName;
//
// a context/parameter
//
ULONG Context;
} SR_NOTIFICATION_RECORD, * PSR_NOTIFICATION_RECORD;
/***************************************************************************++
Routine Description:
SrWaitForNotificaiton is used to receive volume activity notifications
from the driver. This includes new volume, delete volume, and out of disk
space for a volume.
Arguments:
ControlHandle - the HANDLE from SrCreateControlHandle.
pNotification - the buffer to hold the NOTIFICATION_RECORD.
NotificationLength - the length in bytes of pNotification
pOverlapped - an OVERLAPPED structure if async io is enabled on the
HANDLE.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrWaitForNotification (
IN HANDLE ControlHandle,
OUT PSR_NOTIFICATION_RECORD pNotification,
IN ULONG NotificationLength,
IN LPOVERLAPPED pOverlapped OPTIONAL
);
/***************************************************************************++
Routine Description:
SrSwitchAllLogs is used to cause the filter to close all of the open
log files on all volumes, and use new log files. this is used so that
another process can parse these files without worrying about the filter
writing to them. use this to get a consistent view of the restore point.
Arguments:
ControlHandle - the HANDLE from SrCreateControlHandle.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrSwitchAllLogs (
IN HANDLE ControlHandle
);
/***************************************************************************++
Routine Description:
SrDisableVolume is used to temporarily disable monitoring on the
specified volume. this is reset by a call to SrReloadConfiguration.
There is no EnableVolume.
Arguments:
ControlHandle - the HANDLE from SrCreateControlHandle.
pVolumeName - the name of the volume to disable, in the nt format of
\Device\HarddiskDmVolumes\PhysicalDmVolumes\BlockVolume3.
Return Value:
ULONG - Completion status.
--***************************************************************************/
ULONG
WINAPI
SrDisableVolume (
IN HANDLE ControlHandle,
IN PWSTR pVolumeName
);
#define _SR_REQUEST(ioctl) \
((((ULONG)(ioctl)) >> 2) & 0x03FF)
#define SR_CREATE_RESTORE_POINT 0
#define SR_RELOAD_CONFIG 1
#define SR_START_MONITORING 2
#define SR_STOP_MONITORING 3
#define SR_WAIT_FOR_NOTIFICATION 4
#define SR_SWITCH_LOG 5
#define SR_DISABLE_VOLUME 6
#define SR_GET_NEXT_SEQUENCE_NUM 7
#define SR_NUM_IOCTLS 8
#define IOCTL_SR_CREATE_RESTORE_POINT CTL_CODE( FILE_DEVICE_UNKNOWN, SR_CREATE_RESTORE_POINT, METHOD_BUFFERED, FILE_WRITE_ACCESS )
#define IOCTL_SR_RELOAD_CONFIG CTL_CODE( FILE_DEVICE_UNKNOWN, SR_RELOAD_CONFIG, METHOD_NEITHER, FILE_WRITE_ACCESS )
#define IOCTL_SR_START_MONITORING CTL_CODE( FILE_DEVICE_UNKNOWN, SR_START_MONITORING, METHOD_NEITHER, FILE_WRITE_ACCESS )
#define IOCTL_SR_STOP_MONITORING CTL_CODE( FILE_DEVICE_UNKNOWN, SR_STOP_MONITORING, METHOD_NEITHER, FILE_WRITE_ACCESS )
#define IOCTL_SR_WAIT_FOR_NOTIFICATION CTL_CODE( FILE_DEVICE_UNKNOWN, SR_WAIT_FOR_NOTIFICATION, METHOD_OUT_DIRECT, FILE_READ_ACCESS )
#define IOCTL_SR_SWITCH_LOG CTL_CODE( FILE_DEVICE_UNKNOWN, SR_SWITCH_LOG, METHOD_NEITHER, FILE_WRITE_ACCESS )
#define IOCTL_SR_DISABLE_VOLUME CTL_CODE( FILE_DEVICE_UNKNOWN, SR_DISABLE_VOLUME, METHOD_BUFFERED, FILE_WRITE_ACCESS )
#define IOCTL_SR_GET_NEXT_SEQUENCE_NUM CTL_CODE( FILE_DEVICE_UNKNOWN, SR_GET_NEXT_SEQUENCE_NUM,METHOD_BUFFERED, FILE_WRITE_ACCESS )
//
// Names of the object directory, devices, driver, and service.
//
#define SR_CONTROL_DEVICE_NAME L"\\FileSystem\\Filters\\SystemRestore"
#define SR_DRIVER_NAME L"SR.SYS"
#define SR_SERVICE_NAME L"SR"
//
// The current interface version number. This version number must be
// updated after any significant changes to the interface (especially
// structure changes).
//
#define SR_INTERFACE_VERSION_MAJOR 0x0000
#define SR_INTERFACE_VERSION_MINOR 0x0005
//
// The name of the EA (Extended Attribute) passed to NtCreateFile(). This
// allows us to pass version information at the time the driver is opened,
// allowing SR.SYS to immediately fail open requests with invalid version
// numbers.
//
// N.B. The EA name (including the terminator) must be a multiple of eight
// to ensure natural alignment of the SR_OPEN_PACKET structure used as
// the EA value.
//
// 7654321076543210
#define SR_OPEN_PACKET_NAME "SrOpenPacket000"
#define SR_OPEN_PACKET_NAME_LENGTH (sizeof(SR_OPEN_PACKET_NAME) - 1)
C_ASSERT( ((SR_OPEN_PACKET_NAME_LENGTH + 1) & 7) == 0 );
//
// The following structure is used as the value for the EA named above.
//
typedef struct SR_OPEN_PACKET
{
USHORT MajorVersion;
USHORT MinorVersion;
} SR_OPEN_PACKET, *PSR_OPEN_PACKET;
//
// Registry paths.
//
#define REGISTRY_PARAMETERS L"\\Parameters"
#define REGISTRY_DEBUG_CONTROL L"DebugControl"
#define REGISTRY_PROCNAME_OFFSET L"ProcessNameOffset"
#define REGISTRY_STARTDISABLED L"FirstRun"
#define REGISTRY_DONTBACKUP L"DontBackup"
#define REGISTRY_MACHINE_GUID L"MachineGuid"
#define REGISTRY_SRSERVICE L"\\SRService"
#define REGISTRY_SRSERVICE_START L"Start"
//
// directory and file paths
//
#define SYSTEM_VOLUME_INFORMATION L"\\System Volume Information"
#define RESTORE_LOCATION SYSTEM_VOLUME_INFORMATION L"\\_restore%ws"
#define GENERAL_RESTORE_LOCATION SYSTEM_VOLUME_INFORMATION L"\\_restore"
#define RESTORE_FILELIST_LOCATION RESTORE_LOCATION L"\\_filelst.cfg"
//
// used as a prefix for restore point subdirs (e.g. \_restore\rp5)
//
#define RESTORE_POINT_PREFIX L"RP"
//
// used as a prefix for the backup files in a restore point subdir
// (e.g. \_restore\rp5\A0000025.dll)
//
#define RESTORE_FILE_PREFIX L"A"
#ifdef __cplusplus
}
#endif
#endif // _SRAPI_H_
|
class Solution {
public:
int countPrimes(int n) {
if(n<=2)
return 0;
bool res[n]; //array is faster than vector
memset(res, true, sizeof(res));
int counts = 2;
for(int i = 2; i * i < n; i++){
if(res[i]){
int step = (i > 2) ? 2 * i : 2; //trick:prime is odd,except 2.
for(int j=i*i;j<n;j += step){ //odd+even=odd
if(res[j]){
res[j]=false;
counts++;
}
}
}
}
return n-counts;
}
};
|
/* Copyright (c) MediaArea.net SARL. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license that can
* be found in the License.html file in the root of the source tree.
*/
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//
// Core functions
//
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//---------------------------------------------------------------------------
#ifndef FileRegisteredH
#define FileRegisteredH
//---------------------------------------------------------------------------
#include <string>
namespace MediaConch {
//***************************************************************************
// Struct FileRegistered
//***************************************************************************
class FileRegistered
{
public:
FileRegistered() : analyze_percent(0), file_id(-1), policy(-1), display(-1), verbosity(-1), report_kind(0),
analyzed(false), implementation_valid(false), policy_valid(false), create_policy(false), need_update(true)
{
}
FileRegistered(const FileRegistered& f)
{
if (&f == this)
return;
this->filename = f.filename;
this->filepath = f.filepath;
this->file_id = f.file_id;
this->policy = f.policy;
this->display = f.display;
this->verbosity = f.verbosity;
this->analyze_percent = f.analyze_percent;
this->analyzed = f.analyzed;
this->implementation_valid = f.implementation_valid;
this->policy_valid = f.policy_valid;
this->create_policy = f.create_policy;
this->index = f.index;
this->report_kind = f.report_kind;
this->need_update = f.need_update;
for (size_t x = 0; x < f.generated_id.size(); ++x)
this->generated_id.push_back(f.generated_id[x]);
for (size_t x = 0; x < f.options.size(); ++x)
{
if (f.options[x] != "File_TryToFix")
this->options.push_back(f.options[x]);
}
}
std::string filename;
std::string filepath;
std::vector<long> generated_id;
std::vector<std::string> options;
double analyze_percent;
long file_id;
int policy;
int display;
int verbosity;
int report_kind;
unsigned int index;
bool analyzed;
bool implementation_valid;
bool policy_valid;
bool create_policy;
bool need_update;
};
}
#endif
|
Tuesday, November 26, 2013
Building a Book: Part 4, The End
So here we are, at the very end. The last 25% of your novel. Congrats to everyone who's made it this far, whether it was for NaNoWriMo, or has been years in the making. So, in the first three parts of this little series so far, we've gone over the introduction, and we've split the middle into the first half and the second half. This part is going over the very end.
The nice thing about reaching this point, is that compared to the rest, this should be easy. At this point there are three main sections to fill out.
First is the last burst of rising action. By now, your characters know what they're facing, have prepared for it, and baring a few attempts to slow them down by the malign force, they're on their way to confront the villain. In Fantasy, this is usually where the last big battle with the monster army starts. The heroes have to fight their way through all the small fry to get to the mastermind. Or maybe they have to storm the castle-base. You get the idea. There's no reason to jump right to the climax right at the 75% mark.
We've been over rising action and escalation in the previous two posts, so I think you know the routine by now. Top what happened last, but make sure you're not overshadowing the climax itself. Easy-peasy.
Is everybody here? Let's get this over with, then.
Now we get to the final battle. The inexorable question. Who wins, and who loses. Now, in some genre's it's almost a foregone conclusion, but I write horror, so it does apply. The hero doesn't have to win every time, you know. There is one major problem that occasionally happens though with the grand climaxes in a good novel.
Yeah. It can sometimes be over that quick.
All that work leading up to the grand finale, the final confrontation, the apocalyptic battle. And suddenly, it's over in one page, maybe two. Sometimes it happens. I mean, in Fantasy it happens that along the journey, the heroes discover the villain's weakness and embark on a quest to obtain the one item which will defeat them. Then, at the end, all they have to do is use it. The villains powers are nullified, he becomes mortal, and off with his head. Of course, things don't necessarily have to be, or generally are that easy, but it does happen. When it does, don't panic. After all, this is your book, and it all comes down to what you want to happen. Maybe the item they quested for doesn't actually work, and the heroes have to try to win the hard way. Maybe the quest to get the item was a ruse all along and the point was to make the heroes stronger on their own to overcome the villain. Maybe the heroes or the villain makes an escape, setting up a sequel. You're a writer, you're creative. YOU figure it out.
Now, I'm not actually against plotting, per say, but I greatly prefer letting the characters lead the way. When this happens, you may not reach the climax exactly how you envisioned it to begin with. If you had previously envisioned your finale working out a certain way, it can cause havoc when your characters arrive in better or worse shape than you had planned. In my current work "Hannah", I envisioned the beginning of the climactic scene, where the beast returns to the family's house in the middle of the night. I honestly have no clue what happens next. Already the story has surprised me with the additions of characters I hadn't planned on and twists that weren't in my list of scenes to write in. It also allows me to enjoy writing it more, as I get to find out what happens as I write, instead of having hoops set up and knowing who does what, when, and how effective it is.
In my opinion, the climax should come somewhere between the 80 to 90% of the book. After all, people generally wouldn't appreciate it if you chopped off the monster's head and then had "The End" as the next two words. After all that time getting to know the characters, we want to know how things work out. How do they deal with those lost along the way? Do their relationships remain strong? Does Uncle Benny move to Alaska? That kind of stuff. People want to know if there is the final, happy ending.
There are two types of endings, really. The immediate, and the Epilogue. The immediate ending is the conversation between the characters about what happens now, as they walk through the castle back out the front door and travel home again. This is the ending which follows the climax without a break and is mostly telling the reader what the character's plans for the future are now that this villain has been vanquished. It commonly includes taking home their treasure, and settling down.
The other ending is the epilogue. Usually, with one of these, there is also a short immediate ending, to let you know that, yes, the main story is over and there's no more big monsters in the way. The meat of the epilogue happens a fair bit of time afterward. Anywhere from weeks, to months, or even years later. It shows the lives the characters are living now, along with commonly having them looking back on what has happened since as well as plans for the future, and it is generally a more satisfying ending.
Now, at this point, I have to face reality and realize that there's no way I'm finishing "Hannah" on time for the end of the month. If I can get up the gumption to get back to work on it, I could probably get up to 40,000 by November 30th, but I'm not really all that concerned. Over half a book in one month is still a heck of an accomplishment and I should easily get the first draft done by the end of the year. So, expect this series to continue once I get to that point.
Anyway, my fellow writers, and anyone who likes this month's posts. Add in your email in the top right to get a notice and a link for when I do a new post. So keep writing, and Happy Thanksgiving to all!
~ Shaun
Sunday, November 24, 2013
Personal Rant #3: Support
In Stephen King's book On Writing, he talks about the early days, when he was still a struggling English teacher and his most lucrative work was a short story sold to Playboy Magazine. He talks about his wife, who also worked full-time and the glamorous trailer, dying car, and pile of bills they shared. He says at one point "If she had said to me, Steve, it's time to put the writing aside and take a position that can support your family, I would've put away the typewriter right then and there. But that statement never came." Of course, one thing that can be proof of is that if you love someone, then nothing else really matters as long as the bare necessities are covered. The other thing that is proof of, is how important it is to have people that approve of and support you in what you want to do.
I'm not talking about financially, although at the worst times, it can come down to the choice between keeping a roof overhead and food on table or chasing a dream. After all, unless you're a pokemon, you can't eat dreams. I'm talking about support emotionally and mentally.
Specifically, I'm talking about writing, but this applies to anything you want to do. A lot of things can be done solely on your own back, but a lot of things aren't that clear-cut. Writing is a very solitary business. Sitting at the computer, typing away for hours every day, sacrificing time that could be spent with friends, family, relaxing, or even at another job. For weeks, then months, it can drag on. And despite that it's not particularly physically demanding, it can be a very draining experience. People aren't meant to be solitary creatures and when doing something like that, they need the support of those around them. They need to be assured that the sacrifices will be worth it and that the parts of the life being sacrificed stand behind you to help you do what you want to do.
When you don't have support, it makes things that much harder.
When you turn down hanging out with friends to write. When you sit at home instead of going out to ensure you can afford the cover, editing, and marketing that your work will need to be professional. When you take your work with you to family gatherings to do. When you do all those things and people tell you that you need to put it all away and get a "real job", it is an incredible weight on it's own.
Now, I'm not talking about constructive criticism. Sometimes, people will want to do things they just aren't good at, and nor will they ever be. At some point, someone who is aware of the sacrifices made and that has objectively looked at the end result should probably sit down and talk to them about it, but honestly, and because it's true. Not because they just don't believe in them.
But when people who haven't looked at your work, who either aren't aware of or care about the sacrifices already made are saying things like "Get a real job" and "It's nice, but how long until you start making money?", it's more of a burden than having people say nothing at all. They might mean well enough, but all they're doing is making an already difficult task even harder with their own ignorance.
Three guesses which end of that spectrum I'm at, and the first two don't count.
My family isn't exactly the best at being supportive. Often they do it at the wrong times for the wrong reasons, and then don't when it is actually appropriate. Those comments I listed above: "Get a real job" and "You need to make some money" are both things I've been told multiple times, by people who have no idea how much effort, money, or time has gone into this. Nor have any of them actually read any of my work, despite a few of them buying the books. I can't even ASK them to support me, by something as simple as sharing posts to let their friends know my latest book is out or offer my book cards at their meetings or what-have-you. I tried that twice. Once, I was told sure they would and I gave them a small stack of cards. That stack was sitting untouched exactly where I had put it a month later. The second time, I got the full eye-roll.
So for those of you that are so oblivious, I suggest the next time you're going to whine about someone following their dream, do it in a mirror first. See how you look when you say it, and if you can, try to imagine what that might feel like to be on that other end.
For those of you who have to deal with family and friends that aren't supportive, remember, you're not writing for them. You're writing for yourself. You're writing for the people that actually READ your works. And, you're writing because for one reason or another, you just have to. Power on through it regardless, and all on your own if that's what it comes down to. The only person you should care about letting down in this situation, is yourself.
I'm willing to bet more people fail to achieve their dreams because the people that should have been supporting them turned their backs on them instead, more than any other reason.
Tuesday, November 19, 2013
Building a Book: Part 3
Congratulations, if you're working on a book for NaNoWriMo or just as you read along with my series here, you should be at the halfway point, or just past by now. To remind you of where we are and how a book generally goes, I'm going to start with this image again.
Now, as we discussed last time, the middle of the book is usually the hardest part to write. The first 25 - 50% being the worst of the section. Now that we're getting into the meat of the story, things get a little bit easier. From 50 to 75% is where things start to get fun and hell starts breaking loose.
Now, to start, part of this is a continuation of what you did in the first half, with rising action, and worldly and character reactions. However, a lot of what came before should be done with unless you're aiming for some unexpected twist.
Characters that we first met in the introduction should be fully fleshed out by this point, with backgrounds explained, as well as character flaws, hopes, dreams, and partially why they find themselves in such a predicament as the story suggests. Minor characters that we met later on, can have a bit of time devoted to them to help explain their impact and so that we care when something happens to them, but that should be kept to a minimum for the most part. After all, characters we're meeting this late are likely to be victim fodder or there to give us an insight into how the main characters are starting to appear to the outside world. Things akin to "Oh my, Anne said something living under the Rhododendron bushes ate her dog. I think she's going crazy."
The main focus at this point is the rising action and the escalation. It's generally about this point that the main characters start putting two and two together as things get more and more out of hand and the story almost changes to a race to reach the climax. Now, while, depending on the story, this can be fairly smooth sailing at this point compared to the first half of the book, it can also get confusing and often, things fall short of what we expected. When it happens that scenes don't seem to stretch as far as we want, it can be easy to get lost and not know what else to add. Also, escalation can become an issue when you have between several to over a dozen separate scenes to add suspense and move things along.
Pictured: One form of escalation. (Ok, break's over.)
Now, for a suggestion of how to tackle these issues. I'm going to talk about how I tackled the issues for my work for the month. Before I even started to write, I made out a list of a dozen scenes and ideas I could incorporate into the work. I then organized that list into the order I thought worked the best in terms of escalation (and there were a few that were pretty close in terms of the suspense and fear they provoke), and then had a few friends look it over and put them in the order they thought the list should go in. Now, granted, most of them were looking at the list with no idea what the characters were like and had little to go on other than the very basic ideas on the list, but most of them came out the same I had envisioned, so I'm pretty sure I got the order right.
You'll commonly find though, that things change as you write, and that tends to be a good thing. It's one thing to have a plot and pre-set events, but forcing characters along from one to the next just to jump through the hoop usually leads to characters making unbelievable choices given what we know about them and how much they're aware of the situation. (Really, is there anyone out there who hasn't watched a horror movie and screamed "Don't go in there!!" at some point?) I've found in the writing that while the order of some things are intact, some have been switched up as the story progresses more organically. It's also something to keep in mind that the story usually lends itself to suspense when done right, and it's not just an issue of "How suspenseful is this scene?" but an issue of "How suspenseful is this scene in the current context of the story?". When things take off on their own, it can occasionally happen that by the time you reach a certain scene, the suspense has already been ratcheted up so high that the scene doesn't add anything more to it. When that happens, you need to look at it objectively and ask if you can change things realistically so that it works, or whether the story is better off without it. For NaNoWriMo though, we're focused on quantity over quality, and cuts like that are made for revisions anyway, so for now, add it all in, figure out what works and what doesn't later.
Then there's the climax. The ultimate high point you've been reaching for this whole time. You have to make sure every scene escalates, and reaches for it, but that nothing eclipses it before you get there, or gives away a final plot twist. While some would include the climax at the tail end of this section, I think if your story maxes out at 75% or less, you're probably moving too quickly. Despite the graph above, the falling action and resolution shouldn't take up another 25% of your book. If things need that much explaining after the final confrontation/reveal, it's another hint that you might need to go back and take another look. That's more a topic for next time though.
In the meantime, keep writing, let Hell slowly break loose in your world, and enjoy the ride, because if you aren't enjoying it while you write it, chances are readers aren't going to enjoy it as they read it either.
~ Shaun
Tuesday, November 12, 2013
Building a Book: Part 2
So you've got your characters, you've got your ideas, and you've done introductions all around.
Hi. My name is Jared, and I'm the antagonist. Although none of you know that yet.
This is where things start to get tricky. That funny little space between the beginning and the end commonly known as, The Middle.
Now, for the purposes of this discussion, (and to make this little series last the whole month.), we're going to focus on the front half of the middle. Let's say, from 25% to 50%. A normal story, works like you see below. You have the exposition, or the introduction, where you introduce the characters, setting, and occasionally the beginning of the conflict. In a novel, you have a lot more space to work with, so you'll generally go about, allowing the readers to get a feel for the characters, while still trying to drop a hint here or there to keep things interested and to provide a little bit of foreshadowing. In current novels, this introduction section should never be more than 25% of the book. Older novels might stretch that to 35 or even 50%, but that's asking a lot from today's audience who are used to 15 second commercials, 24 hour delivery, and 2 gigabyte download speeds.
After the introduction, is the rising action part of the story. In a novel, this section can easily stretch upwards of 25,000 words by itself. This can also be the hardest part of a novel to write. After all, you've introduced the characters, the setting, maybe the start of the conflict, but it's nowhere near time for the bad stuff to happen and you've got a long ways to go to get to that point. So what do you fill the time with so that you can hold your reader's attention and lead them along without getting bored?
Well, as the graph says, this is the start of the rising action. It's not like you can't do things here and there to keep things moving. In a horror movie, this is the point where you get the little things, like lights flicking on and off, maybe a door opens and closes when nobody's watching. A little further up the hill, the characters start to notice things themselves, but either nobody believes them, or they don't connect what happens to any impending sense of peril.
This is also the part where you fill in the world around your characters, and you delve into any needed back-story. Things which are important to understanding the characters, but which weren't necessarily Introduction material. After all, on a first date, you wouldn't try to impress someone with stories of your weird uncle who keeps over 100 named cockroaches as pets in his bedroom, or that your ex got a two-year prison sentence for assault just under two years ago.
So you've got relevant back-story, a few creaks and groans in the night, that isn't enough by itself to fill the space, so what else? Well, actually, that should do it. Remember, you're not just showing how the characters are reacting to what happens to them, you also need to establish the world around them and how the world reacts to the characters reactions. This goes a long way to making the story more believable, allowing it to better draw emotions out of the reader. For example, zombies are slowly making their way into a small town. The main character finds and kills one outside a local store. Obviously there are going to be witnesses, as well as no small amount of blood on the character's hands. The question is, if the rest of the town isn't aware of the zombies, how would they react to this otherwise bloody murder that just happened in front of them? Assume there was a good reason and go back to their business like nothing happened, or are they likely to call the cops? If they don't call the cops, either because they knew it was a zombie or some other reason, it better be explained and believable, and not something like; "Oh, the guy was a prick anyway, he had it coming."
The last bit I want to go over is escalation. Remember, this is rising action. Things need to be progressive. If you have a massive first scene followed by a long period of quiet, it better be explained and for a good reason. Having things escalate helps to create a sense that things are getting worse, as opposed to getting better. There's a reason you see movies like Paranormal Activity start with rattling pots and pans, move up to doors slamming, and then we see the characters getting flung through the air. If it went in the opposite order, it would be calming down to nothing, and there would be no final climax to worry about.
Sorry about the mix-up, I'll be back later, say, around 3 AM.
With all that, you shouldn't have any real trouble keeping things interesting for the second quarter of your book. And, if done right, you'll have characters that are fully fleshed out, believable, and that the readers care about by the mid-point of your story. At that point, as the author, you should be ready for all hell to break loose.
~ Shaun
Tuesday, November 5, 2013
Building a Book
It's November, which means National Novel Writing Month, or NaNoWriMo. To jump right to more information on what that means, click here.
The jist of it is this. You have the month of November, or 30 days, to write a minimum 50,000 word novel. It means writing every day and has become a fairly widely known challenge that amateur writer and professional novelists alike step up to. I signed up last year, but was distracted by too many other things to really take part, so I'm giving it a go this year and through my blog here, I'm inviting you all to follow along. Hopefully this won't get too boring and will eventually go through the entire process of writing and publishing a novel, as I go through the writing, revising, editing, cover art, formatting, and finally, uploading and publishing to Kindle, Createspace, and/or Smashwords.
So, where else to start but the beginning?
When you sit down to start a book, it's a good idea to have a few things lined up already.
1. An idea. This seems obvious, but it is probably the most important thing. If you don't even have an idea, you probably don't have any business sitting down and starting a story. A basic idea doesn't really count either. You need something that will create conflict, and that will actually last and be entertaining for the length of story you want it to be. While it is possible to stretch a short story into a novel or compress a novel into a short story, the works will usually suffer for it, by focusing on things that don't matter, or by not giving people enough time to care about the characters, and thus, the story. Now, that doesn't mean your idea can't be simple. Lots of simple ideas have plenty of depth to them. For instance, the dead rising and attacking the living. It's a simple idea, but once you get into the real connotations of it, and what it means, you find there is so much more going on. The idea of the dead rising up and attacking the living is a simple idea, but in the writing of a story about that, you get into such themes as how people react to that in general, how they react to seeing dead family members, how they protect themselves and how they stop/survive/or die in the ensuing chaos.
2. Characters. Obviously, if you have an idea, you need people that that idea happens to. The more fleshed out those characters are, the better. Even in short stories, caring about the characters means caring about what happens to them, which equals caring about the story, and that is how a lot of the best stories are made. So whether you have one character throughout the entire piece, a family, or even the population of an entire town, you need to show that these are real people, worthy of compassion. They need to have strengths and weaknesses, flaws, pasts, and hopes for the future. One point I want to make, when you have multiple characters introduced, you need to make sure they are all included in the story. If you have a family that all lives together, you can't get away with focusing on one member of the family and have everyone else walking around like everything is normal. Even if they aren't affected directly by whatever is influencing the main character, they will react to the changes in the main character, despite the way the world is sometimes shown these days; most people will not just accept the statement that nothing is wrong from someone they care about when there is obviously a change in their behavior, demeanor, or look. So even when a story is focused around one character, keep in mind there are people around him, reacting to what he does and however he expresses what's happening to him.
The Beginning
The first chapter is one of the most important. First impressions matter, and in telling a story, it's no different. You need to establish the quality of your writing as something worthy of the readers time, in addition to introducing the major characters, what they look like, a sense of who they are and setting up the story to come. Then there is the Hook.
The Hook is what gets people to read past the first few pages or the first chapter. It is the very beginning of the story, told in a way that makes people want to read more. It is the hint that things are about to go very, very wrong for the people you've just introduced. (Or, at the very least, that things are about to change for them, if you're not writing horror or some kind of action/thriller story.) This is important, people are used to instant gratification these days and books which take more than 25% of their length to really get in gear are going to lose a lot of readers before anything good starts.
So, how is my progress coming along?
My idea is basically Cujo meets The Exorcist (Horror, surprising, I know). Before November 1st, I sat down and hashed out a general outline, with a dozen plot points to hit through the story, in addition to the order I wanted them in. My main characters have been named and described, as well as some of their good points and their flaws. My hook is in place, and while it hasn't been set, I am comfortable the bait on it so far will tempt more people to bite than to not. This is still only the 5th, though, so there is still a long way to go. Hopefully you'll all keep up with me.
~ Shaun
|
Protecting your skin and checking it for changes are keys to preventing another melanoma or catching one in an early, treatable stage.
Exposure to ultraviolet (UV) rays produced by the sun increases your risk of melanoma. Here’s how to protect your skin from the sun’s UV rays:
- Cover your skin with clothing, including a shirt and a hat with a broad brim.
- When outside, try to sit in shady areas.
- Avoid exposing your skin to the sun between 10:00 a.m. and 2:00 p.m. standard time or 11:00 a.m. and 3:00 p.m. daylight saving time.
- Use sunscreens with a sun protection factor (SPF) of 15 or more on skin that will be exposed to the sun.
- Wear sunglasses with 99% or 100% UV absorption to protect your eyes.
- Don't use sun lamps or tanning booths.
Check your skin regularly and have someone help you check areas you can’t see, such as your back and buttocks, scalp, underneath the breasts of women, and the backs of the legs. If you notice a new, changing or an irregular-looking mole, show it to a doctor experienced in recognizing skin cancers, such as a dermatologist. This may include large, irregular shape with a border that is not smooth and even, more than one color, or irregular texture. Your doctor may monitor the mole or recommend removing it
Contact your doctor if you discover a mole that is new has changed or looks suspicious: large or of irregular shape, color, or texture.
- Reviewer: Brian Randall, MD
- Review Date: 04/2013 -
- Update Date: 04/09/2013 -
|
It’s All About Customers!
Your success depends on finding enough customers before you run out of investment money.
Every decision a growing company makes should be measured as to if it will help you get enough customers in the limited time you have to succeed.
How much you spend on equipment, what training you invest in, and how you spend your time must be justified by how these concerns will produce customers.
First time owners instead get hung up on being the best cleaner. What good will it do if you are the best when no one knows you exist? Yes, it sure helps to know what you are doing. But again, if your phone isn’t ringing, it will all be wasted effort. Finding the balance is critical.
Not that hard
It’s actually not that complicated to succeed. Almost anyone can do it. The reason so few companies actually make it though, is that most have never been trained to start a business. New owners often think they can learn as they go. They think, “How hard can it be?” What they don’t understand is that the simple steps to success are buried amongst thousands of wasted things to do. Without someone to point out which tasks should and shouldn’t be done, it will be a random guessing game. The price for failing to succeed is just too high to leave it up to chance.
Opening a four digit bicycle lock is easy for a child who knows the combination; but that same lock will stop potential bike thieves in their tracks because it is too difficult to open if the combination is unknown.
A guide is needed
carpet-cleaning-successAn experienced guide holds the simple solution for building a business. Getting the profitable customers depends on knowing what to do and, what not to do. Having someone show you the most direct path is what makes starting and growing a business easy and fast.
Critical decisions dealing with operations and strategy, to administration and marketing, must be made. The better those decisions are, the greater speed and chance of success. Every poor choice made consumes limited resources and time.
How do you know how much should be spent on equipment? What is needed for a website, is SEO needed, does social media produce jobs? How much should be charged, charge by the room or square foot, which advertising sources should be used?
A good guide knows how to make the most of your time, energy, and money. There should never be a doubt as to what are the most important projects you should be working on.
Benefits of using a guide
There are only a limited number of building blocks that go into creating a profitable cleaning company. If you know what these are, it takes very little time until you can be making an income and profit from it. This is similar to the child being able to easily open a bicycle lock since he knows the correct combination. It makes no sense to try to randomly attempt to start from scratch and try to figure it all out on your own.
The do-it-yourself approach takes longer because you need to research all of the options and then decide which one is best for your situation. On your own, you will have to do this for each and every decision you make. With an experienced guide, you can jump directly to the best answer.
Single Truck Success Master
Single Truck Success MasterSteve Marsh is the undisputed industry master at taking companies from startup to net- ting $100,000 annually and beyond (Take home income for the owner).
• Unmatched technical expertise: For over 15 years while was running his company, Steve was the most certified and experience hands-on carpet cleaner in the country. As a Senior Carpet Inspector, instructor (20 years), Master Cleaner, and recognized as the leading on-location upholstery cleaning expert and trainer in the country, there is no one more experience to guide you through the technical learning process of cleaning.
• Industry's Yelp and Angie's List expert: Steve has written many articles on this subject for multiple magazines. He has personally helped dozens of companies shape their company profiles and determined how to get the most for both free and paid services from these consumer review sites.
Two Step ProcessTwo Step Process
Business success requires two steps. The first is to create a continuous flow of profitable customers. Then the second step is to transform those customers into far more profitable repeat and referral clients. Most people think this can be done in one step and struggle attempting to do it that way.
The first step is what is all about. Everything you need to know to build your business to the point of a full schedule of profitable customers is included along with a guidance to help make the best decisions and keep you growing at the pace you choose.
The second step is the Be Competition Free program which is a complete and turnkey system which transforms your customer base into a profitable repeat and referral clientele.
You get to decide how fast you want to grow your business. It is possible to have help and encouragement on a daily basis.
Program Details and Pricing
Check out the two programs below to find out more.
|
Archaeological Site of Rehman Dheri
Department of Archaeology and Museums
Property names are listed in the language in which they have been submitted by the State Party.
The archaeological site of Rehman Dheri consists of a rectangular shaped mound covering some twenty two hectares and standing 4.5 metres above the surrounding field. The final occupational phase of the site is clearly visible on the surface of the mound by eye and also through air photographs. It consisted of a large walled rectangular area with a grid iron network of streets and lanes dividing the settlement into regular blocks. Walls delineating individual buildings and street frontages are clearly visible in the early morning dew or after rain and it is also possible to identify the location of a number of small-scale industrial areas within the site marked, as they are, by eroding kilns and scatters of slag. The surface of the mound is littered with thousands of shreds and artefacts, slowly eroding out of room fills.
The archaeological sequence at the site of Rehman Dheri is over 4.5 metres deep, and covers a sequence of over 1,400 years beginning at c.3,300 BC. The site represents following periods:
I c.3300-3850 BC
II c.2850-2500 BC
III c.2500-1900 BC
It is generally accept that the settlement received its formal plan in its earliest phases and that subsequent phases replicated the plan over time. Although its excavators have cut a number of deep trenches or soundings into the lower levels, the areas exposed have been too limited to undertake a study of change in layout and the spatial distribution of craft activities. It was abandoned at the beginning of the mature Indus phase by the middle of the third millennium BC and subsequent activities, greatly reduced, are only recorded on the neighbouring archaeological mound, Hisam Dheri. The plan of the Early Harappan settlement is therefore undisturbed by later developments and, as such, represents the most exceptionally preserved example of the beginning of urbanisation in South Asia.
|
/*---------------------------------------------------------------------------*
| PDFlib - A library for generating PDF on the fly |
+---------------------------------------------------------------------------+
| Copyright (c) 1997-2004 Thomas Merz and PDFlib GmbH. All rights reserved. |
+---------------------------------------------------------------------------+
| |
| This software is subject to the PDFlib license. It is NOT in the |
| public domain. Extended versions and commercial licenses are |
| available, please check http://www.pdflib.com. |
| |
*---------------------------------------------------------------------------*/
/* $Id: p_annots.c 14574 2005-10-29 16:27:43Z bonefish $
*
* PDFlib routines for annnotations
*
*/
#include "p_intern.h"
/* Annotation types */
typedef enum {
ann_text, ann_locallink,
ann_pdflink, ann_weblink,
ann_launchlink, ann_attach
} pdf_annot_type;
/* icons for file attachments and text annotations */
typedef enum {
icon_file_graph, icon_file_paperclip,
icon_file_pushpin, icon_file_tag,
icon_text_comment, icon_text_insert,
icon_text_note, icon_text_paragraph,
icon_text_newparagraph, icon_text_key,
icon_text_help
} pdf_icon;
/* Annotations */
struct pdf_annot_s {
pdf_annot_type type; /* used for all annotation types */
pdc_rectangle rect; /* used for all annotation types */
pdc_id obj_id; /* used for all annotation types */
pdf_annot *next; /* used for all annotation types */
pdf_icon icon; /* attach and text */
char *filename; /* attach, launchlink, pdflink,weblink*/
char *contents; /* text, attach, pdflink */
char *mimetype; /* attach */
char *parameters; /* launchlink */
char *operation; /* launchlink */
char *defaultdir; /* launchlink */
char *title; /* text */
int open; /* text */
pdf_dest dest; /* locallink, pdflink */
/* -------------- annotation border style and color -------------- */
pdf_border_style border_style;
float border_width;
float border_red;
float border_green;
float border_blue;
float border_dash1;
float border_dash2;
};
static const char *pdf_border_style_names[] = {
"S", /* solid border */
"D", /* dashed border */
"B", /* beveled (three-dimensional) border */
"I", /* inset border */
"U" /* underlined border */
};
static const char *pdf_icon_names[] = {
/* embedded file icon names */
"Graph", "Paperclip", "Pushpin", "Tag",
/* text annotation icon names */
"Comment", "Insert", "Note", "Paragraph", "NewParagraph", "Key", "Help"
};
/* flags for annotation properties */
typedef enum {
pdf_ann_flag_invisible = 1,
pdf_ann_flag_hidden = 2,
pdf_ann_flag_print = 4,
pdf_ann_flag_nozoom = 8,
pdf_ann_flag_norotate = 16,
pdf_ann_flag_noview = 32,
pdf_ann_flag_readonly = 64
} pdf_ann_flag;
void
pdf_init_annots(PDF *p)
{
/* annotation border style defaults */
p->border_style = border_solid;
p->border_width = (float) 1.0;
p->border_red = (float) 0.0;
p->border_green = (float) 0.0;
p->border_blue = (float) 0.0;
p->border_dash1 = (float) 3.0;
p->border_dash2 = (float) 3.0;
/* auxiliary function parameters */
p->launchlink_parameters = NULL;
p->launchlink_operation = NULL;
p->launchlink_defaultdir = NULL;
}
void
pdf_cleanup_annots(PDF *p)
{
if (p->launchlink_parameters) {
pdc_free(p->pdc, p->launchlink_parameters);
p->launchlink_parameters = NULL;
}
if (p->launchlink_operation) {
pdc_free(p->pdc, p->launchlink_operation);
p->launchlink_operation = NULL;
}
if (p->launchlink_defaultdir) {
pdc_free(p->pdc, p->launchlink_defaultdir);
p->launchlink_defaultdir = NULL;
}
}
static void
pdf_init_annot(PDF *p, pdf_annot *ann)
{
(void) p;
ann->next = NULL;
ann->filename = NULL;
ann->mimetype = NULL;
ann->contents = NULL;
ann->mimetype = NULL;
ann->parameters = NULL;
ann->operation = NULL;
ann->defaultdir = NULL;
ann->title = NULL;
}
/* Write annotation border style and color */
static void
pdf_write_border_style(PDF *p, pdf_annot *ann)
{
/* don't write the default values */
if (ann->border_style == border_solid &&
ann->border_width == (float) 1.0 &&
ann->border_red == (float) 0.0 &&
ann->border_green == (float) 0.0 &&
ann->border_blue == (float) 0.0 &&
ann->border_dash1 == (float) 3.0 &&
ann->border_dash2 == (float) 3.0)
return;
if (ann->type != ann_attach) {
pdc_puts(p->out, "/BS");
pdc_begin_dict(p->out); /* BS dict */
pdc_puts(p->out, "/Type/Border\n");
/* Acrobat 6 requires this entry, and does not use /S/S as default */
pdc_printf(p->out, "/S/%s\n",
pdf_border_style_names[ann->border_style]);
/* Acrobat 6 requires this entry */
pdc_printf(p->out, "/W %f\n", ann->border_width);
if (ann->border_style == border_dashed)
pdc_printf(p->out, "/D[%f %f]\n",
ann->border_dash1, ann->border_dash2);
pdc_end_dict(p->out); /* BS dict */
/* Write the Border key in old-style PDF 1.1 format */
pdc_printf(p->out, "/Border[0 0 %f", ann->border_width);
if (ann->border_style == border_dashed &&
(ann->border_dash1 != (float) 0.0 || ann->border_dash2 !=
(float) 0.0))
/* set dashed border */
pdc_printf(p->out, "[%f %f]", ann->border_dash1, ann->border_dash2);
pdc_puts(p->out, "]\n");
}
/* write annotation color */
pdc_printf(p->out, "/C[%f %f %f]\n",
ann->border_red, ann->border_green, ann->border_blue);
}
void
pdf_write_annots_root(PDF *p)
{
pdf_annot *ann;
/* Annotations array */
if (p->annots) {
pdc_puts(p->out, "/Annots[");
for (ann = p->annots; ann != NULL; ann = ann->next) {
ann->obj_id = pdc_alloc_id(p->out);
pdc_printf(p->out, "%ld 0 R ", ann->obj_id);
}
pdc_puts(p->out, "]\n");
}
}
void
pdf_write_page_annots(PDF *p)
{
pdf_annot *ann;
for (ann = p->annots; ann != NULL; ann = ann->next) {
pdc_begin_obj(p->out, ann->obj_id); /* Annotation object */
pdc_begin_dict(p->out); /* Annotation dict */
pdc_puts(p->out, "/Type/Annot\n");
switch (ann->type) {
case ann_text:
pdc_puts(p->out, "/Subtype/Text\n");
pdc_printf(p->out, "/Rect[%f %f %f %f]\n",
ann->rect.llx, ann->rect.lly, ann->rect.urx, ann->rect.ury);
pdf_write_border_style(p, ann);
if (ann->open)
pdc_puts(p->out, "/Open true\n");
if (ann->icon != icon_text_note) /* note is default */
pdc_printf(p->out, "/Name/%s\n", pdf_icon_names[ann->icon]);
/* Contents key is required, but may be empty */
pdc_puts(p->out, "/Contents");
if (ann->contents) {
pdc_put_pdfunistring(p->out, ann->contents);
pdc_puts(p->out, "\n");
} else
pdc_puts(p->out, "()\n"); /* empty contents is OK */
/* title is optional */
if (ann->title) {
pdc_puts(p->out, "/T");
pdc_put_pdfunistring(p->out, ann->title);
pdc_puts(p->out, "\n");
}
break;
case ann_locallink:
pdc_puts(p->out, "/Subtype/Link\n");
pdc_printf(p->out, "/Rect[%f %f %f %f]\n",
ann->rect.llx, ann->rect.lly, ann->rect.urx, ann->rect.ury);
pdf_write_border_style(p, ann);
/* preallocate page object id for a later page */
if (ann->dest.page > p->current_page) {
while (ann->dest.page >= p->pages_capacity)
pdf_grow_pages(p);
/* if this page has already been used as a link target
* it will already have an object id.
*/
if (p->pages[ann->dest.page] == PDC_BAD_ID)
p->pages[ann->dest.page] = pdc_alloc_id(p->out);
}
pdc_puts(p->out, "/Dest");
pdf_write_destination(p, &ann->dest);
pdc_puts(p->out, "\n");
break;
case ann_pdflink:
pdc_puts(p->out, "/Subtype/Link\n");
pdc_printf(p->out, "/Rect[%f %f %f %f]\n",
ann->rect.llx, ann->rect.lly, ann->rect.urx, ann->rect.ury);
pdf_write_border_style(p, ann);
pdc_puts(p->out, "/A");
pdc_begin_dict(p->out); /* A dict */
pdc_puts(p->out, "/Type/Action/S/GoToR\n");
pdc_puts(p->out, "/D");
pdf_write_destination(p, &ann->dest);
pdc_puts(p->out, "\n");
pdc_puts(p->out, "/F");
pdc_begin_dict(p->out); /* F dict */
pdc_puts(p->out, "/Type/Filespec\n");
pdc_puts(p->out, "/F");
pdc_put_pdfstring(p->out, ann->filename,
(int)strlen(ann->filename));
pdc_puts(p->out, "\n");
pdc_end_dict(p->out); /* F dict */
pdc_end_dict(p->out); /* A dict */
break;
case ann_launchlink:
pdc_puts(p->out, "/Subtype/Link\n");
pdc_printf(p->out, "/Rect[%f %f %f %f]\n",
ann->rect.llx, ann->rect.lly, ann->rect.urx, ann->rect.ury);
pdf_write_border_style(p, ann);
pdc_puts(p->out, "/A");
pdc_begin_dict(p->out); /* A dict */
pdc_puts(p->out, "/Type/Action/S/Launch\n");
if (ann->parameters || ann->operation || ann->defaultdir) {
pdc_puts(p->out, "/Win");
pdc_begin_dict(p->out); /* Win dict */
pdc_printf(p->out, "/F");
pdc_put_pdfstring(p->out, ann->filename,
(int)strlen(ann->filename));
pdc_puts(p->out, "\n");
if (ann->parameters) {
pdc_printf(p->out, "/P");
pdc_put_pdfstring(p->out, ann->parameters,
(int)strlen(ann->parameters));
pdc_puts(p->out, "\n");
pdc_free(p->pdc, ann->parameters);
ann->parameters = NULL;
}
if (ann->operation) {
pdc_printf(p->out, "/O");
pdc_put_pdfstring(p->out, ann->operation,
(int)strlen(ann->operation));
pdc_puts(p->out, "\n");
pdc_free(p->pdc, ann->operation);
ann->operation = NULL;
}
if (ann->defaultdir) {
pdc_printf(p->out, "/D");
pdc_put_pdfstring(p->out, ann->defaultdir,
(int)strlen(ann->defaultdir));
pdc_puts(p->out, "\n");
pdc_free(p->pdc, ann->defaultdir);
ann->defaultdir = NULL;
}
pdc_end_dict(p->out); /* Win dict */
} else {
pdc_puts(p->out, "/F");
pdc_begin_dict(p->out); /* F dict */
pdc_puts(p->out, "/Type/Filespec\n");
pdc_printf(p->out, "/F");
pdc_put_pdfstring(p->out, ann->filename,
(int)strlen(ann->filename));
pdc_puts(p->out, "\n");
pdc_end_dict(p->out); /* F dict */
}
pdc_end_dict(p->out); /* A dict */
break;
case ann_weblink:
pdc_puts(p->out, "/Subtype/Link\n");
pdc_printf(p->out, "/Rect[%f %f %f %f]\n",
ann->rect.llx, ann->rect.lly, ann->rect.urx, ann->rect.ury);
pdf_write_border_style(p, ann);
pdc_puts(p->out, "/A<</S/URI/URI");
pdc_put_pdfstring(p->out, ann->filename,
(int)strlen(ann->filename));
pdc_puts(p->out, ">>\n");
break;
case ann_attach:
pdc_puts(p->out, "/Subtype/FileAttachment\n");
pdc_printf(p->out, "/Rect[%f %f %f %f]\n",
ann->rect.llx, ann->rect.lly, ann->rect.urx, ann->rect.ury);
pdf_write_border_style(p, ann);
if (ann->icon != icon_file_pushpin) /* pushpin is default */
pdc_printf(p->out, "/Name/%s\n",
pdf_icon_names[ann->icon]);
if (ann->title) {
pdc_puts(p->out, "/T");
pdc_put_pdfunistring(p->out, ann->title);
pdc_puts(p->out, "\n");
}
if (ann->contents) {
pdc_puts(p->out, "/Contents");
pdc_put_pdfunistring(p->out, ann->contents);
pdc_puts(p->out, "\n");
}
/* the icon is too small without these flags (=28) */
pdc_printf(p->out, "/F %d\n",
pdf_ann_flag_print |
pdf_ann_flag_nozoom |
pdf_ann_flag_norotate);
pdc_puts(p->out, "/FS");
pdc_begin_dict(p->out); /* FS dict */
pdc_puts(p->out, "/Type/Filespec\n");
pdc_puts(p->out, "/F");
pdc_put_pdfstring(p->out, ann->filename,
(int)strlen(ann->filename));
pdc_puts(p->out, "\n");
/* alloc id for the actual embedded file stream */
ann->obj_id = pdc_alloc_id(p->out);
pdc_printf(p->out, "/EF<</F %ld 0 R>>\n", ann->obj_id);
pdc_end_dict(p->out); /* FS dict */
break;
default:
pdc_error(p->pdc, PDF_E_INT_BADANNOT,
pdc_errprintf(p->pdc, "%d", ann->type), 0, 0, 0);
}
pdc_end_dict(p->out); /* Annotation dict */
pdc_end_obj(p->out); /* Annotation object */
}
/* Write the actual embedded files with preallocated ids */
for (ann = p->annots; ann != NULL; ann = ann->next) {
pdc_id length_id;
PDF_data_source src;
if (ann->type != ann_attach)
continue;
pdc_begin_obj(p->out, ann->obj_id); /* EmbeddedFile */
pdc_puts(p->out, "<</Type/EmbeddedFile\n");
if (ann->mimetype) {
pdc_puts(p->out, "/Subtype");
pdc_put_pdfname(p->out, ann->mimetype, strlen(ann->mimetype));
pdc_puts(p->out, "\n");
}
if (pdc_get_compresslevel(p->out))
pdc_puts(p->out, "/Filter/FlateDecode\n");
length_id = pdc_alloc_id(p->out);
pdc_printf(p->out, "/Length %ld 0 R\n", length_id);
pdc_end_dict(p->out); /* F dict */
/* write the file in the PDF */
src.private_data = (void *) ann->filename;
src.init = pdf_data_source_file_init;
src.fill = pdf_data_source_file_fill;
src.terminate = pdf_data_source_file_terminate;
src.length = (long) 0;
src.offset = (long) 0;
pdf_copy_stream(p, &src, pdc_true); /* embedded file stream */
pdc_end_obj(p->out); /* EmbeddedFile object */
pdc_put_pdfstreamlength(p->out, length_id);
if (p->flush & pdf_flush_content)
pdc_flush_stream(p->out);
}
}
void
pdf_init_page_annots(PDF *p)
{
p->annots = NULL;
}
void
pdf_cleanup_page_annots(PDF *p)
{
pdf_annot *ann, *old;
for (ann = p->annots; ann != (pdf_annot *) NULL; /* */ ) {
switch (ann->type) {
case ann_text:
if (ann->contents)
pdc_free(p->pdc, ann->contents);
if (ann->title)
pdc_free(p->pdc, ann->title);
break;
case ann_locallink:
pdf_cleanup_destination(p, &ann->dest);
break;
case ann_launchlink:
pdc_free(p->pdc, ann->filename);
break;
case ann_pdflink:
pdf_cleanup_destination(p, &ann->dest);
pdc_free(p->pdc, ann->filename);
break;
case ann_weblink:
pdc_free(p->pdc, ann->filename);
break;
case ann_attach:
pdf_unlock_pvf(p, ann->filename);
pdc_free(p->pdc, ann->filename);
if (ann->contents)
pdc_free(p->pdc, ann->contents);
if (ann->title)
pdc_free(p->pdc, ann->title);
if (ann->mimetype)
pdc_free(p->pdc, ann->mimetype);
break;
default:
pdc_error(p->pdc, PDF_E_INT_BADANNOT,
pdc_errprintf(p->pdc, "%d", ann->type), 0, 0, 0);
}
old = ann;
ann = old->next;
pdc_free(p->pdc, old);
}
p->annots = NULL;
}
/* Insert new annotation at the end of the annots chain */
static void
pdf_add_annot(PDF *p, pdf_annot *ann)
{
pdf_annot *last;
/* fetch current border state from p */
ann->border_style = p->border_style;
ann->border_width = p->border_width;
ann->border_red = p->border_red;
ann->border_green = p->border_green;
ann->border_blue = p->border_blue;
ann->border_dash1 = p->border_dash1;
ann->border_dash2 = p->border_dash2;
ann->next = NULL;
if (p->annots == NULL)
p->annots = ann;
else {
for (last = p->annots; last->next != NULL; /* */ )
last = last->next;
last->next = ann;
}
}
static void
pdf_init_rectangle(PDF *p, pdf_annot *ann,
float llx, float lly, float urx, float ury)
{
pdc_rect_init(&ann->rect, llx, lly, urx, ury);
if (p->usercoordinates == pdc_true)
pdc_rect_transform(&p->gstate[p->sl].ctm, &ann->rect, &ann->rect);
}
/* Attach an arbitrary file to the PDF. Note that the actual
* embedding takes place in PDF_end_page().
* description, author, and mimetype may be NULL.
*/
static const pdc_keyconn pdf_icon_attach_keylist[] =
{
{"graph", icon_file_graph},
{"paperclip", icon_file_paperclip},
{"pushpin", icon_file_pushpin},
{"tag", icon_file_tag},
{NULL, 0}
};
static void
pdf__attach_file(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *filename,
const char *description,
int len_descr,
const char *author,
int len_auth,
const char *mimetype,
const char *icon)
{
static const char fn[] = "pdf__attach_file";
pdf_annot *ann;
pdc_file *attfile;
if (filename == NULL || *filename == '\0')
pdc_error(p->pdc, PDC_E_ILLARG_EMPTY, "filename", 0, 0, 0);
if ((attfile = pdf_fopen(p, filename, "attachment ", 0)) == NULL)
pdc_error(p->pdc, -1, 0, 0, 0, 0);
pdf_lock_pvf(p, filename);
pdc_fclose(attfile);
ann = (pdf_annot *) pdc_malloc(p->pdc, sizeof(pdf_annot), fn);
pdf_init_annot(p, ann);
ann->type = ann_attach;
pdf_init_rectangle(p, ann, llx, lly, urx, ury);
if (icon == NULL || !*icon)
icon = "pushpin";
ann->icon = (pdf_icon) pdc_get_keycode(icon, pdf_icon_attach_keylist);
if (ann->icon == PDC_KEY_NOTFOUND)
pdc_error(p->pdc, PDC_E_ILLARG_STRING, "icon", icon, 0, 0);
ann->filename = (char *) pdc_strdup(p->pdc, filename);
ann->contents = pdf_convert_hypertext(p, description, len_descr);
ann->title = pdf_convert_hypertext(p, author, len_auth);
if (mimetype != NULL) {
ann->mimetype = (char *) pdc_strdup(p->pdc, mimetype);
}
pdf_add_annot(p, ann);
}
PDFLIB_API void PDFLIB_CALL
PDF_attach_file(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *filename,
const char *description,
const char *author,
const char *mimetype,
const char *icon)
{
static const char fn[] = "PDF_attach_file";
if (pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")\n",
(void *) p, llx, lly, urx, ury, filename,
pdc_strprint(p->pdc, description, 0),
pdc_strprint(p->pdc, author, 0), mimetype, icon))
{
int len_descr = description ? (int) pdc_strlen(description) : 0;
int len_auth = author ? (int) pdc_strlen(author) : 0;
pdf__attach_file(p, llx, lly, urx, ury, filename,
description, len_descr, author, len_auth, mimetype, icon) ;
}
}
PDFLIB_API void PDFLIB_CALL
PDF_attach_file2(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *filename,
int reserved,
const char *description,
int len_descr,
const char *author,
int len_auth,
const char *mimetype,
const char *icon)
{
static const char fn[] = "PDF_attach_file2";
if (pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\", %d, \"%s\", %d, "
"\"%s\", %d, \"%s\", \"%s\")\n",
(void *) p, llx, lly, urx, ury, filename, reserved,
pdc_strprint(p->pdc, description, len_descr), len_descr,
pdc_strprint(p->pdc, author, len_auth), len_auth, mimetype, icon))
{
pdf__attach_file(p, llx, lly, urx, ury, filename,
description, len_descr, author, len_auth, mimetype, icon) ;
}
}
static const pdc_keyconn pdf_icon_note_keylist[] =
{
{"comment", icon_text_comment},
{"insert", icon_text_insert},
{"note", icon_text_note},
{"paragraph", icon_text_paragraph},
{"newparagraph", icon_text_newparagraph},
{"key", icon_text_key},
{"help", icon_text_help},
{NULL, 0}
};
static void
pdf__add_note(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *contents,
int len_cont,
const char *title,
int len_title,
const char *icon,
int open)
{
static const char fn[] = "pdf__add_note";
pdf_annot *ann;
ann = (pdf_annot *) pdc_malloc(p->pdc, sizeof(pdf_annot), fn);
pdf_init_annot(p, ann);
ann->type = ann_text;
ann->open = open;
pdf_init_rectangle(p, ann, llx, lly, urx, ury);
if (icon == NULL || !*icon)
icon = "note";
ann->icon = (pdf_icon) pdc_get_keycode(icon, pdf_icon_note_keylist);
if (ann->icon == PDC_KEY_NOTFOUND)
pdc_error(p->pdc, PDC_E_ILLARG_STRING, "icon", icon, 0, 0);
/* title may be NULL */
ann->title = pdf_convert_hypertext(p, title, len_title);
/* It is legal to create an empty text annnotation */
ann->contents = pdf_convert_hypertext(p, contents, len_cont);
pdf_add_annot(p, ann);
}
PDFLIB_API void PDFLIB_CALL
PDF_add_note(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *contents,
const char *title,
const char *icon,
int open)
{
static const char fn[] = "PDF_add_note";
if (pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\", \"%s\", \"%s\", %d)\n",
(void *) p, llx, lly, urx, ury,
pdc_strprint(p->pdc, contents, 0),
pdc_strprint(p->pdc, title, 0), icon, open))
{
int len_cont = contents ? (int) pdc_strlen(contents) : 0;
int len_title = title ? (int) pdc_strlen(title) : 0;
pdf__add_note(p, llx, lly, urx, ury, contents, len_cont,
title, len_title, icon, open);
}
}
PDFLIB_API void PDFLIB_CALL
PDF_add_note2(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *contents,
int len_cont,
const char *title,
int len_title,
const char *icon,
int open)
{
static const char fn[] = "PDF_add_note2";
if (pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\", %d, \"%s\", %d, \"%s\", %d)\n",
(void *) p, llx, lly, urx, ury,
pdc_strprint(p->pdc, contents, len_cont), len_cont,
pdc_strprint(p->pdc, title, len_title), len_title,
icon, open))
{
pdf__add_note(p, llx, lly, urx, ury, contents, len_cont,
title, len_title, icon, open);
}
}
/* Add a link to another PDF file */
PDFLIB_API void PDFLIB_CALL
PDF_add_pdflink(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *filename,
int page,
const char *optlist)
{
static const char fn[] = "PDF_add_pdflink";
pdf_annot *ann;
if (!pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\", %d, \"%s\")\n",
(void *) p, llx, lly, urx, ury, filename, page, optlist))
{
return;
}
if (filename == NULL)
pdc_error(p->pdc, PDC_E_ILLARG_EMPTY, "filename", 0, 0, 0);
ann = (pdf_annot *) pdc_malloc(p->pdc, sizeof(pdf_annot), fn);
pdf_init_annot(p, ann);
ann->filename = pdc_strdup(p->pdc, filename);
ann->type = ann_pdflink;
pdf_parse_destination_optlist(p, optlist, &ann->dest, page, pdf_remotelink);
pdf_init_rectangle(p, ann, llx, lly, urx, ury);
pdf_add_annot(p, ann);
}
/* Add a link to another file of an arbitrary type */
PDFLIB_API void PDFLIB_CALL
PDF_add_launchlink(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *filename)
{
static const char fn[] = "PDF_add_launchlink";
pdf_annot *ann;
if (!pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\")\n",
(void *)p, llx, lly, urx, ury, filename))
{
return;
}
if (filename == NULL)
pdc_error(p->pdc, PDC_E_ILLARG_EMPTY, "filename", 0, 0, 0);
ann = (pdf_annot *) pdc_malloc(p->pdc, sizeof(pdf_annot), fn);
pdf_init_annot(p, ann);
ann->filename = pdc_strdup(p->pdc, filename);
ann->type = ann_launchlink;
if (p->launchlink_parameters) {
ann->parameters = p->launchlink_parameters;
p->launchlink_parameters = NULL;
}
if (p->launchlink_operation) {
ann->operation = p->launchlink_operation;
p->launchlink_operation = NULL;
}
if (p->launchlink_defaultdir) {
ann->defaultdir = p->launchlink_defaultdir;
p->launchlink_defaultdir = NULL;
}
pdf_init_rectangle(p, ann, llx, lly, urx, ury);
pdf_add_annot(p, ann);
}
/* Add a link to a destination in the current PDF file */
PDFLIB_API void PDFLIB_CALL
PDF_add_locallink(
PDF *p,
float llx,
float lly,
float urx,
float ury,
int page,
const char *optlist)
{
static const char fn[] = "PDF_add_locallink";
pdf_annot *ann;
if (!pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, %d, \"%s\")\n",
(void *) p, llx, lly, urx, ury, page, optlist))
{
return;
}
ann = (pdf_annot *) pdc_malloc(p->pdc, sizeof(pdf_annot), fn);
pdf_init_annot(p, ann);
ann->type = ann_locallink;
pdf_parse_destination_optlist(p, optlist, &ann->dest, page, pdf_locallink);
pdf_init_rectangle(p, ann, llx, lly, urx, ury);
pdf_add_annot(p, ann);
}
/* Add a link to an arbitrary Internet resource (URL) */
PDFLIB_API void PDFLIB_CALL
PDF_add_weblink(
PDF *p,
float llx,
float lly,
float urx,
float ury,
const char *url)
{
static const char fn[] = "PDF_add_weblink";
pdf_annot *ann;
if (!pdf_enter_api(p, fn, pdf_state_page,
"(p[%p], %g, %g, %g, %g, \"%s\")\n",
(void *) p, llx, lly, urx, ury, url))
{
return;
}
if (url == NULL || *url == '\0')
pdc_error(p->pdc, PDC_E_ILLARG_EMPTY, "url", 0, 0, 0);
ann = (pdf_annot *) pdc_malloc(p->pdc, sizeof(pdf_annot), fn);
pdf_init_annot(p, ann);
ann->filename = pdc_strdup(p->pdc, url);
ann->type = ann_weblink;
pdf_init_rectangle(p, ann, llx, lly, urx, ury);
pdf_add_annot(p, ann);
}
PDFLIB_API void PDFLIB_CALL
PDF_set_border_style(PDF *p, const char *style, float width)
{
static const char fn[] = "PDF_set_border_style";
if (!pdf_enter_api(p, fn,
(pdf_state) (pdf_state_document | pdf_state_page),
"(p[%p], \"%s\", %g)\n", (void *) p, style, width))
{
return;
}
if (style == NULL)
p->border_style = border_solid;
else if (!strcmp(style, "solid"))
p->border_style = border_solid;
else if (!strcmp(style, "dashed"))
p->border_style = border_dashed;
else
pdc_error(p->pdc, PDC_E_ILLARG_STRING, "style", style, 0, 0);
if (width < 0.0)
pdc_error(p->pdc, PDC_E_ILLARG_FLOAT,
"width", pdc_errprintf(p->pdc, "%f", width), 0, 0);
p->border_width = width;
}
PDFLIB_API void PDFLIB_CALL
PDF_set_border_color(PDF *p, float red, float green, float blue)
{
static const char fn[] = "PDF_set_border_color";
if (!pdf_enter_api(p, fn,
(pdf_state) (pdf_state_document | pdf_state_page),
"(p[%p], %g, %g, %g)\n", (void *) p, red, green, blue))
{
return;
}
if (red < 0.0 || red > 1.0)
pdc_error(p->pdc, PDC_E_ILLARG_FLOAT,
"red", pdc_errprintf(p->pdc, "%f", red), 0, 0);
if (green < 0.0 || green > 1.0)
pdc_error(p->pdc, PDC_E_ILLARG_FLOAT,
"green", pdc_errprintf(p->pdc, "%f", green), 0, 0);
if (blue < 0.0 || blue > 1.0)
pdc_error(p->pdc, PDC_E_ILLARG_FLOAT,
"blue", pdc_errprintf(p->pdc, "%f", blue), 0, 0);
p->border_red = red;
p->border_green = green;
p->border_blue = blue;
}
PDFLIB_API void PDFLIB_CALL
PDF_set_border_dash(PDF *p, float b, float w)
{
static const char fn[] = "PDF_set_border_dash";
if (!pdf_enter_api(p, fn,
(pdf_state) (pdf_state_document | pdf_state_page),
"(p[%p], %g, %g)\n", (void *) p, b, w))
{
return;
}
if (b < 0.0)
pdc_error(p->pdc, PDC_E_ILLARG_FLOAT,
"b", pdc_errprintf(p->pdc, "%f", b), 0, 0);
if (w < 0.0)
pdc_error(p->pdc, PDC_E_ILLARG_FLOAT,
"w", pdc_errprintf(p->pdc, "%f", w), 0, 0);
p->border_dash1 = b;
p->border_dash2 = w;
}
|
Click the picture above
to see three larger pictures
Show birthplace location
|Previous||(Alphabetically)||Next||Biographies index |
|Version for printing|
Lipman Bers, always known as Lipa, was born into a Jewish family. His parents Isaac Bers and Bertha Weinberg were teachers, his mother being head at an elementary school in Riga where teaching was in Yiddish while his father was head at the Yiddish high school in Riga. Born in 1914, Lipa's early years were much affected by the political and military events taking place in Russia. Latvia had been under Russian imperial rule since the 18th century so World War I meant that there were evacuations from Riga. The Russian Revolution which began in October 1917 caused fighting between the Red Army and the White Army and for the next couple of years various parts of Russia came first under the control of one faction then of the other. Lipa's family went to Petrograd, the name that St Petersburg had been given in 1914 when there was strong anti-German feeling in Russia, but Lipa was too young to understand the difficulties that his parents went through at this time.
At the end of World War I in 1918, Latvia regained its independence although this was to be short-lived. Lipa spent some time back in Riga, but he also spent time in Berlin. His mother took him to Berlin while she was training at the Psychoanalytic Institute. During his schooling mathematics became his favourite subject and he decided that it was the subject he wanted to study at university. He studied at the University of Zurich, then returned to Riga and studied at the university there.
At this time Europe was a place of extreme politics and, in 1934, Latvia became ruled by a dictator. Lipa was a political activist, a social democrat who argued strongly for human rights. He was at this time a soap-box orator putting his views across strongly both in speeches and in writing for an underground newspaper. Strongly opposed to dictators and strongly advocating democracy it was clear that his criticism of the Latvian dictator could not be ignored by the authorities. A warrant was issued for his arrest and, just in time, he escaped to Prague. His girl friend Mary Kagan followed him to Prague where they married on 15 May 1938.
There were a number of reasons why Bers chose to go to Prague at this time. Firstly he had to escape from Latvia, secondly Prague was in a democratic country, and thirdly his aunt lived there so he could obtain permission to study at the Charles University without having to find a job to support himself. One should also not underestimate the fact that by this stage his mathematical preferences were very much in place and Karl Loewner in Prague looked the ideal supervisor.
Indeed Bers did obtain his doctorate which was awarded in 1938 from the Charles University of Prague where he wrote a thesis on potential theory under Karl Loewner's supervision. At the time Bers was rather unhappy with Loewner :-
Lipa spoke of feeling neglected, perhaps even not encouraged, by Loewner and said that only in retrospect did he understand Loewner's teaching method. He gave to each of his students the amount of support needed ... It is obvious that Lipa did not appear too needy to Loewner.
In 1938 Czechoslovakia became an impossible country for someone of Jewish background. Equally dangerous was the fact that Bers had no homeland since he was a wanted man in Latvia, and was a left wing academic. With little choice but to escape again, Bers fled to Paris where his daughter Ruth was born. However, the war followed him and soon the Nazi armies began occupying France. Bers applied for a visa to the USA and, while waiting to obtain permission, he wrote two papers on Green's functions and integral representations. Just days before Paris surrendered to the advancing armies, Bers and his family moved from Paris to a part of France not yet under attack from the advancing German armies. At last he received the news that he was waiting for, the issue of American visas for his family.
In 1940 Bers and his family arrived in the United States and joined his mother who was already in New York. There was of course a flood of well qualified academics arriving in the United States fleeing from the Nazis and there was a great scarcity of posts, even for the most brilliant, so he was unemployed until 1942, living with other unemployed refugees in New York. During this time he continued his mathematical researches. After this he was appointed Research Instructor at Brown University where, as part of work relevant to the war effort, he studied two-dimensional subsonic fluid flow. This was important at that time since aircraft wings were being designed for planes with jet engines capable of high speeds.
Between 1945 and 1949 Bers worked at Syracuse University, first at Assistant Professor, later as Associate Professor. Gelbart wanted to build up the department at Syracuse and attracting both Bers and Loewner was an excellent move. Here Bers began work on the problem of removability of singularities of non-linear elliptic equations. His major results in this area were announced by him at the International Congress of Mathematicians in 1950 and his paper Isolated singularities of minimal surfaces was published in the Annals of Mathematics in 1951. Courant writes:-
The nonparametric differential equation of minimal surfaces may be considered the most accessible significant example revealing typical qualities of solutions of non-linear partial differential equations. With a view to such a general objective, [Bers] has studied singularities, branch-points and behaviour in the large of minimal surfaces.
Abikoff writes in that this paper is:-
... a magnificent synthesis of complex analytic techniques which relate the different parameterisations of minimal surfaces to the representations of the potential function for subsonic flow and thereby achieves the extension across the singularity.
Bers then became a member of the Institute for Advanced Study at Princeton where he began work on Teichmüller theory, pseudoanalytic functions, quasiconformal mappings and Kleinian groups. He was set in the right direction by an inequality he found in a paper of Lavrentev who attributed the inequality to Ahlfors. In a lecture he gave in 1986 Bers explained what happened next:-
I was in Princeton at the time. Ahlfors came to Princeton and announced a talk on quasiconformal mappings. He spoke at the University so I went there and sure enough, he proved this theorem. So I came up to him after the talk and asked him "Where did you publish it?", and he said "I didn't". "So why did Lavrentev credit you with it?" Ahlfors said "He probably thought I must know it and was too lazy to look it up in the literature".
When Bers met Lavrentev three years later he asked him the same questions and, indeed, Ahlfors had been correct in guessing why Lavrentev had credited him. Bers continued in his 1986 lecture:-
I immediately decided that, first of all, if quasiconformal mappings lead to such powerful and beautiful results and, secondly, if it is done in this gentlemanly spirit - where you don't fight over priority - this is something that I should spend the rest of my life studying.
It is ironic, given Bers strong political views on human rights, that he should find that Teichmüller, a fervent Nazi, had already made stunning contributions. In one of his papers on Teichmüller theory, Bers quotes Plutarch:-
It does not of necessity follow that, if the work delights you with its grace, the one who wrought it is worthy of your esteem.
In 1951 Bers went to the Courant Institute in New York, where he was a full professor, and remained there for 13 years. During this time he wrote a number of important books and surveys on his work. He published Theory of pseudo-analytic functions in 1953 which Protter, in a review, described as follows:-
The theory of pseudo-analytic functions was first announced by [Bers] in two notes. These lecture notes not only contain proofs and extensions of the results previously announced but give a self-contained and comprehensive treatment of the subject.
The author sets as his goal the development of a function theory for solutions of linear, elliptic, second order partial differential equations in two independent variables (or systems of two first-order equations). One of the chief stumbling blocks in such a task is the fact that the notion of derivative is a hereditary property for analytic functions while this is clearly not the case for solutions of general second order elliptic equations.
Another classic text was Mathematical aspects of subsonic and transonic gas dynamics published in 1958:-
It should be said, even though this is taken for granted by everybody in the case of Professor Bers, that the survey is masterly in its elegance and clarity.
In 1958 Bers address the International Congress of Mathematicians in Edinburgh, Scotland, where he lectured on Spaces of Riemann surfaces and announced a new proof of the measurable Riemann mapping theorem. In his talk Bers summarised recent work on the classical problem of moduli for compact Riemann surfaces and sketched a proof of the Teichmüller theorem characterizing extremal quasiconformal mappings. He showed that the Teichmüller space for surfaces of genus g is a (6g-6)-cell, and showed how to construct the natural complex analytic structure for the Teichmüller space.
Bers was a Guggenheim Fellow in 1959-60, and a Fulbright Fellow in the same academic year. From 1959 until he left the Courant Institute in 1964, Bers was Chairman of the Graduate Department of Mathematics.
In 1964 Bers went to Columbia University where he was to remain until he retired in 1984. He was chairman of the department from 1972 to 1975. He was appointed Davies Professor of Mathematics in 1972, becoming Emeritus Davies Professor of Mathematics in 1982. During this period Bers was Visiting Miller Research Professor at the University of California at Berkeley in 1968.
Tilla Weinstein describes in Bers as a lecturer:-
Lipa's courses were irresistible. He laced his lectures with humorous asides and tasty tidbits of mathematical gossip. He presented intricate proofs with impeccable clarity, pausing dramatically at the few most critical steps, giving us a chance to think for ourselves and to worry that he might not know what to do next. Then, just as the silence got uncomfortable, he would describe the single most elegant way to complete the argument.
Jane Gilman describes Bers' character:-
Underneath the force of Bers' personality and vivacity was the force of his mathematics. His mathematics had a clarity and beauty that went beyond the actual results. He had a special gift for conceptualising things and placing them in the larger context.
In Bers life is summed up by Abikoff as follows:-
Lipa possessed a joy of life and an optimism that is difficult to find at this time and that is sorely missed. Those of us who experienced it directly have felt an obligation to pass it on. That, in addition to the beauty of his own work, is Lipa's enduring gift to us.
We have yet to say something about Bers' great passion for human rights. In fact this was anything but a sideline in his life and one could consider that he devoted himself full-time to both his mathematical work and to his work as a social reformer. Perhaps his views are most clearly expressed by quoting from an address he gave in 1984 when awarded an honorary degree by the State University of New York at Stony Brook:-
By becoming a human rights activist ... you do take upon yourself certain difficult obligations. ... I believe that only a truly even-handed approach can lead to an honest, morally convincing, and effective human rights policy. A human rights activist who hates and fears communism must also care about the human rights of Latin American leftists. A human rights activist who sympathises with the revolutionary movement in Latin America must also be concerned about human rights abuses in Cuba and Nicaragua. A devout Muslim must also care about human rights of the Bahai in Iran and of the small Jewish community in Syria, while a Jew devoted to Israel must also worry about the human rights of Palestinian Arabs. And we American citizens must be particularly sensitive to human rights violations for which our government is directly or indirectly responsible, as well as to the human rights violations that occur in our own country, as they do.
Bers received many honours for his contributions in addition to those we have mentioned above. He was elected to the American Academy of Arts and Sciences, to the Finnish Academy of Sciences, and to the American Philosophical Society. He served the American Mathematical Society in several capacities, particularly as Vice-President (1963-65) and as President (1975-77). The American Mathematical Society awarded him their Steele Prize in 1975. He received the New York Mayor's award in Science and Technology in 1985. He was an honorary life member of the New York Academy of Sciences, and of the London Mathematical Society.
Article by: J J O'Connor and E F Robertson
Click on this link to see a list of the Glossary entries for this page
List of References (5 books/articles)|
|Some Quotations (3)|
|Mathematicians born in the same country|
|Honours awarded to Lipman Bers|
(Click below for those honoured in this way)
|AMS Colloquium Lecturer||1971|
|AMS Steele Prize||1975|
|American Maths Society President||1975 - 1976|
|LMS Honorary Member||1984|
Other Web sites
|Previous||(Alphabetically)||Next||Biographies index |
|History Topics || Societies, honours, etc.||Famous curves |
|Time lines||Birthplace maps||Chronology||Search Form |
|Glossary index||Quotations index||Poster index |
|Mathematicians of the day||Anniversaries for the year|
JOC/EFR © April 2002 |
School of Mathematics and Statistics|
University of St Andrews, Scotland
The URL of this page is:|
|
/*
* Copyright (c) 2015 - 2022, Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "config.h"
#include <string.h>
#include <errno.h>
#include <limits.h>
#include <iostream>
#include <iomanip>
#include "ProfileTracerImp.hpp"
#include "geopm/PlatformIO.hpp"
#include "geopm/PlatformTopo.hpp"
#include "geopm/Helper.hpp"
#include "geopm_hint.h"
#include "Environment.hpp"
#include "geopm/Exception.hpp"
#include "CSV.hpp"
#include "geopm_debug.hpp"
#include "ApplicationSampler.hpp"
#include "record.hpp"
namespace geopm
{
ProfileTracerImp::ProfileTracerImp(const std::string &start_time)
: ProfileTracerImp(start_time,
1024 * 1024,
environment().do_trace_profile(),
environment().trace_profile(),
hostname())
{
}
ProfileTracerImp::ProfileTracerImp(const std::string &start_time,
size_t buffer_size,
bool is_trace_enabled,
const std::string &file_name,
const std::string &host_name)
: m_is_trace_enabled(is_trace_enabled)
{
if (m_is_trace_enabled) {
m_csv = geopm::make_unique<CSVImp>(file_name, host_name, start_time, buffer_size);
m_csv->add_column("TIME", "double");
m_csv->add_column("PROCESS", "integer");
m_csv->add_column("EVENT", event_format);
m_csv->add_column("SIGNAL", event_format);
m_csv->activate();
}
}
ProfileTracerImp::~ProfileTracerImp() = default;
std::string ProfileTracerImp::event_format(double value)
{
static bool is_signal = false;
static int event_type;
std::string result;
if (!is_signal) {
// This is a call to format the event column
// Store the event type for the next call
event_type = value;
result = event_name((int)value);
// The next call will format the signal column
is_signal = true;
}
else {
// This is a call to format the signal column
switch (event_type) {
case EVENT_REGION_ENTRY:
case EVENT_REGION_EXIT:
result = string_format_hex(value);
break;
case EVENT_EPOCH_COUNT:
result = string_format_integer(value);
break;
default:
result = "INVALID";
GEOPM_DEBUG_ASSERT(false, "ProfileTracer::event_format(): event out of range");
break;
}
// The next call will be to format the event column
is_signal = false;
}
return result;
}
void ProfileTracerImp::update(const std::vector<record_s> &records)
{
if (m_is_trace_enabled) {
std::vector<double> sample(M_NUM_COLUMN);
for (const auto &it : records) {
sample[M_COLUMN_TIME] = it.time;
sample[M_COLUMN_PROCESS] = it.process;
sample[M_COLUMN_EVENT] = it.event;
sample[M_COLUMN_SIGNAL] = it.signal;
m_csv->update(sample);
}
}
}
std::unique_ptr<ProfileTracer> ProfileTracer::make_unique(const std::string &start_time)
{
return geopm::make_unique<ProfileTracerImp>(start_time);
}
}
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/dax/model/NodeTypeSpecificValue.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace DAX
{
namespace Model
{
NodeTypeSpecificValue::NodeTypeSpecificValue() :
m_nodeTypeHasBeenSet(false),
m_valueHasBeenSet(false)
{
}
NodeTypeSpecificValue::NodeTypeSpecificValue(JsonView jsonValue) :
m_nodeTypeHasBeenSet(false),
m_valueHasBeenSet(false)
{
*this = jsonValue;
}
NodeTypeSpecificValue& NodeTypeSpecificValue::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("NodeType"))
{
m_nodeType = jsonValue.GetString("NodeType");
m_nodeTypeHasBeenSet = true;
}
if(jsonValue.ValueExists("Value"))
{
m_value = jsonValue.GetString("Value");
m_valueHasBeenSet = true;
}
return *this;
}
JsonValue NodeTypeSpecificValue::Jsonize() const
{
JsonValue payload;
if(m_nodeTypeHasBeenSet)
{
payload.WithString("NodeType", m_nodeType);
}
if(m_valueHasBeenSet)
{
payload.WithString("Value", m_value);
}
return payload;
}
} // namespace Model
} // namespace DAX
} // namespace Aws
|
The video player below has an interactive transcript player integrated into it. This feature allows the viewer to quickly scan through the text, locate a point of interest and click on a word.
The video immediately jumps to that point and begins playing. An interactive Table of Contents and Interactive Headings can also be added.
The look and feel of the player and the interactive transcript can be altered by changing the HTML code and css file.
Well Being Feeling Better
Well Being Program 1
Well Being Program 1: Feeling Better
Interactive Transcript
Click on a word to move video to that point
Well being is something we all desire. But staying healthy and feeling good can be quite a challenge, especially with the stresses that pervade every aspect of our lives today.
Everyone is aware that
regular exercise and eating healthily are two essential steps to well being.
In this
series, you'll learn about a third step, relaxation. Not just sitting around but some specific activities that trigger your body's relaxation response. You'll learn about the importance of deep relaxation for reducing the harmful effects of stress, along with a series of easy to do relaxation skills.
through our weekly questionnaire, you'll gain an awareness of how stress may be affecting you.
So get a paper and pencil ready and join us as we
take the next step to well being.
[music] [music] [music] [music] [music] [music] [music] [music] [music] [music] [music]
Few of us are aware of how
stress erodes our sense of well being. In this program, you'll find out what causes stress. This week's questionnaire will help you recognize how stress is affecting you and we'll show you what you can do about it.
Eli Bay,
Director of the Relaxation Response Institute in Toronto is a pioneer in the teaching of relaxation skills for stress management.
Eli: Stress is
not all bad. In fact, we need a certain amount of stress to get out of bed in the morning. Without stress, we wouldn't be alive. Stress is the fuel that gets things done. It motivates us to live and to work in the world efficiently.
Too much stress,
however, excessive stress becomes a problem. And it really affects every aspect of our life including our health right through to our relationships.
term stress was first coined by the late Dr. Hans Selye from the University of Montreal. And Selye analyzed 1000s of research studies that sort of looked at how the body reacts to injury, to excessive stimulation, to unusual work demands. And he discovered that our bodies react to all of these different demands in much the same way. Like most animals, humans have a built-in stress alarm system called a fight or flight reaction. And its purpose is to help us to react quickly to life threatening events. It's automatic. It mobilizes our body to protect itself by fighting or running away from the threat.
either case, your blood pressure rises, your heart beat speeds up, your muscles contract. It's a whole arousal state. And when this arousal state is turned on, that medically is what stress is.
Deepak: So what does this stress response
have to do with our daily lives? How often do we face life threatening situations where we have to take flight or fight for our lives.
Dr. Posen: Well it's an interesting thing because
I've actually heard the phrase that the stress response is dumb. You get the same reaction no matter what the trigger or stimulus.
So it's like a computer switch,
it's either on or its off. And the body in a sense only has one way of responding. It can be a severe reaction or a mild one. But the reaction is the same every time.
The interesting
thing is, when we know that anything that is life threatening, this physical danger, will result in a stress reaction. But the same thing happens when you feel your self esteem to be threatened. So if somebody yells at you or criticizes you or if you have an argument, and you start to feel not good about yourself or insecure with that person, that threat to your self esteem or identity will actually for some people be as threatening as if it were life threatening in a physical way.
And then the other thing is,
when people have an on-going stress reaction, whether it's -- and most things aren't life threatening -- your body is in a state of arousal, ready to fight or run away from danger. In most cases, fighting and running away are not appropriate responses.
So what happens
is the stress response, the state of arousal continues. And if it goes on for minutes or hours, it can result in actually physical or psychological symptoms. It's almost like if a muscle is tense long enough, it will become sore and stiff. And that's what happens throughout the body in a stress reaction, but not only to the muscles, but in terms of internal organs and digestive upsets and so on.
Deepak: Many of us are
so bombarded by stressful situations that by the end of the day, our bodies are reacting as if we had just been through a life threatening situation. Even though our brains know that our lives are not literally in danger, our bodies apparently don't.
But why
does this stress continue to build up? If the body is smart enough to turn the fight or flight response on, isn't it smart enough to turn it off at the end of a stressful day?
Eli: We would
almost be better off if the stressful events were life threatening. If the source of stress is identifiable, then it's obvious when the danger has passed and the body can naturally relax.
Our problem in this technological age is that stress
comes from many sources. The source of our stress is so undefined, so ambiguous, so prolonged, that our bodies never know when its over. So our stress mechanisms don't turn off.
As a result, most of us are in a state of
chronic stress and we are not even aware of it.
Sheila: About 6 months ago if you had told me
that I was stressed, I wouldn't even had known it. I would've said, "No that's just me. That's the way that I respond to things."
But now I can
actually feel the difference. I know when my body is relaxed. I know when my mind is relaxed and I know when it's not.
Dr. Posen: Raising people's awareness
and consciousness of how stress shows up for them has several important benefits. First of all, they become aware of their stress where often they weren't. In other cases, it helps them monitor their stress. Because once you know how it is showing up for you, you can know when it is getting better or worse if you know what to watch for.
But another thing that is very interesting
that blocks people from dealing with stress is the problem of denial. A lot of people find it very difficult to admit when they are experiencing stress, and they just won't admit it, they will deny it. And it's the sub-conscious process. And to get through that barrier, having people actually look at check-lists, sometimes brings to their awareness things that are impossible for them to ignore. If they said yes to 14 questions, then it's pretty hard to say, "But I don't have stress."
Eli: Actually the very best way to determine how
well you are dealing with stress is to listen to your own body. But that's a fairly sophisticated awareness that takes some time to develop.
There are other ways.
There are in fact research scientists over the past several decades that have been exploring and trying to figure out ways to help us develop understanding about the stress in our own lives. And over these 8 programs, we are going to be looking at a number of different stress tests or questionnaires or evaluations, and you'll have an opportunity to test yourself and see how well you are dealing with stress.
test will spotlight a different aspect of stress.
Deepak: Ahh, so we'll be able to evaluate ourselves?
Eli: Yes,
that's the objective -- to be able to get an understanding, a self-evaluation of various aspects of stresses in your life.
In today's test,
it's a test that's going to look at just life change and not looking at the macro changes, the changes in the culture of the technology. But just life changes, day to day things that we all experience in various times in our life.
Drs. Holmes and Rahe, a couple of medical researchers at
the University of Washington Medical School developed this test roughly 25 years ago. And they've tested it on 10,000's of people. And they found it to be an extremely accurate predictor of illness. That the more change that one has in a short period of time, the higher the statistical probability that one will develop an illness as a result of those changes.
Deepak: So by taking
this test, we're going to find out whether or not we'll get sick?
Eli: Well, no. We are going to look at statistical
probabilities. The more change, the higher the probability.
Trend is
not destiny. It's not a question of you becoming ill as a result of this test. Although it's an indicator and certainly one should be aware that if you score highly on this test, there is really a high probability of becoming ill.
We'll talk about that once we've done the scoring.
Deepak: OK, so let's take the test. Can you tell us how it works?
Eli: Yes, you are going to be presented with a list of
life events that have been weighted according to the amount of adjustment or change that is required to deal with that event.
For example,
if your spouse died within the last 12 months, you would get 100 change units. The researchers have discovered that universally, cross culturally, that the death of a spouse is the most stressful event.
In fact, researchers have discovered that from up to
two years after a spouse dies, the surviving spouse's immune system is about half the strength of what it should be.
So if your spouse
died, write down 100 on the piece of paper. If your spouse didn't die, then you don't write anything down.
If you had a close family
member that died within the last 12 months, you get 63 change points. If you had two close family members that had died, you'd get 63 * 2.
Deepak: It
sounds like we are going to be adding these numbers up.
Eli: Yes, so as each event occurs
for you, write down the number so that you can total it up at the end. And don't worry if you don't have a pencil and paper handy. Just pay attention to the event so that you get a sense of the life changes that have occurred in your life in the last year, to give you some sense of the kind of stresses that you are experiencing. Some of which may come as a real surprise to you.
this test is contained in the home study package, so you can refer to that in more detail.
The first
question, death of a spouse. If your spouse died within the last 12 months, give yourself 100 change points. Write it down.
If you had a close family member that passed away
within the past 12 months, give yourself 63 change points.
If you had a friend that died within the last year,
37 change points.
Now, let's look at change in marital
status. If you were divorced within the last year, 73 change points -- it's a major, stressful event.
If you were separated within the
last year, 65 change points.
If you were
married within the last year, you get 50 change points.
Now marriage seems an anomaly here.
You can certainly recognize the other events as being stressful. Marriage is perceived as a positive, at least in the beginning. And yet marriage here is rated as an extremely stressful event because of change.
Again, if you understand
nothing else but the equation change equals stress, you'll go a long way towards understanding both the nature and the problem of stress in our time.
So even a positive event like getting married
can be an extremely stressful event.
If you had a marital reconciliation in the last year,
give yourself 45 change units.
If you had
sexual difficulties, 39 change units.
The next category is the change in
family relationships. If you were pregnant within the last year, you get 40 change units.
you gained a new family member in the last year, 39 change units.
Deepak: Eli, I've got a problem
with this one. Because my daughter is 12.5 months old and I'm not sure whether to count this one or not.
Eli: Well Deepak, that's a very good
question and the answer I'm going to give you applies to all of the questions. We aren't talking hard and defined lines, it's roughly. You are looking at the amount of adaptations that have occurred in roughly the last year. So you can apply that to all of these.
Deepak: So
she is in!
Eli: She's in.
So the next
one, son or daughter leaves home -- 29 change units.
If you've had trouble with your
in-laws in the last year, 29 change points.
If you've had a change in the number of arguments with your spouse, that could be
fewer arguments as well as more, 35 chance points.
is another area, encounter with the legal system. If you had a jail term within the last year, you get 63 change units.
If you had a minor
violation of the law such as as speeding ticket, you get 11 change points. If you had 5 speeding tickets, let me remind you, it's 11 times 5.
If you had a personal injury or illness
in the last year, 53 change units. If you had a change in the health of a family member in the last year, 44 change units.
there is another area here, change in work situation. If you were fired from your job in the last 12 months, give yourself 47 change units. If you had a business readjustment, say you had to lay staff off, 39 change units.
If you changed to a different
line of work, 36 change units.
If you had a change
in your responsibilities at work, 29 change units -- for example if you were promoted.
If you have trouble with your boss, 23 change units.
If you had to change the number of work
hours or conditions, even if you work less, 20 change units.
If you retired within the
last year, 45 change units.
The next
area is major financial changes. If you are carrying a large mortgage, we're looking at 38 change units.
If you had a mortgage
greater than 1 years income, then give yourself 31 change points.
If you had a
foreclosure of a mortgage, 30 change points.
If your spouse
began or stopped work in the last year, give yourself 26 change points.
If you are
carrying a small mortgage or a loan, 17 change points.
area -- change in routines. If you had a change in living conditions in the last year, 25 change points.
If you changed your personal
habits, say you stopped smoking, give yourself 24 change points.
If you had a change in
residence, if you moved, 20 change points.
If you
changed your school, also 20 change points.
If you had an outstanding personal achievement,
28 change points.
That's another surprise for people. Most people
don't associate something positive like this with stress. But again, we are looking at change and stress being related to one another.
Even in the next area,
change in activities, it's even more dramatic. Change in recreational patterns, 19 change points.
So if you took up tennis in the
last year, we are looking at 19 points.
If you had a change in church activities -- if
you are going to church less than you did a year earlier, we're looking at 19 change points.
If you've had a change
in social activities, you've taken on new friends, 18 change points.
If you've changed
your sleeping patterns, you are commuting now and you have to get up an hour earlier and go to bed an hour later, we're looking at 16 change points.
If you had a change in the number of family
get-togethers in the last year, 15 points.
If you
had a change in your eating habits, so for example if you decided to give up or cut down on your red meat, 15 change points.
Things that you
wouldn't normally consider as being stressful are on this test. They aren't major, but they do impact in total on us.
And this last segment, deals with holidays. If you had a
vacation within the last year, give yourself 13 change units. You may be surprised to say vacation as a cause of stress -- that's how I deal with my stress. And yet if any of you have ever left a nice, cold climate and flown south to a warmer climate and changed your diet, changed your routine, and gotten sick while on holiday or sick soon after returning where that change however positive and well deserved kind of just tips the balance of health and illness.
And even here
the last question is Christmas. If you experienced Christmas within the last year, you get 12 change points. And everybody experiences Christmas.
Deepak: In one way or another I suppose.
Can I add up my scores
Eli: Let's leave that for later. I'd rather look at the implications
Researchers have found that the higher
the score, the more serious the illness. And in fact, after working with 10,000s of people over many decades, the research is quite precise. And they found that if you score over 300 change points in a 12 month period, there is an 89% chance that some illness will develop within the next 2 years.
If you score under 300 but more
than 150, sort of the mid-range, there is roughly a 50 - 50 chance that you'll get some illness within the next couple of years.
And if you score under 150, there is
roughly a 37% chance that some illness will emerge.
Deepak: Now that's fascinating.
If I understand the test correctly, the combination of a few things such as a death of someone close to you, the purchase of a new house and the mortgage that goes with it, and a promotion, it could add up to a serious possibility of getting sick in the next couple of years.
Eli: Yes, and these things can creep up on one very quickly.
Research has shown that the higher the score, the more serious illness likely results. And it could be anything from a cold to a flu, right through to heart disease to strokes to arthritis to even cancer.
Deepak: Eli, while you were
talking, I added up my score and came up with 179. Now that puts me in the middle range right here and that means that I have a better of even chance of getting sick in the next two years?
Eli: That's right.
Deepak: How serious is this? Should
I plan for it now?
Eli: No. Trend is not destiny.
What you should be aware of though is your body is undergoing a great deal of strain and stress as a result of these changes. And that there are things that you can do to prevent that stress from building to the point of illness.
Deepak: So what can we do?
Eli: We can learn to turn on the body's natural
anti-stress mechanism, the relaxation response.
The relaxation response
was first identified by Dr. Herbert Benson at Harvard Medical School back in the early 1970s. Benson, one of the world's foremost cardiologists, discovered in the course of his research that built into every single body was a natural anti-stress mechanism that is equal to but opposite of that of the body stress response.
In fact,
the model that Benson identified, I think I can best illustrate very simply -- if you imagine that my torso is the body's autonomic nervous system, we all have an autonomic nervous system. The autonomic nervous system handles the so called automatic functions. You don't have to think about digesting your food or about breathing or having a heart beat. Those are conducted automatically by the autonomic nervous system.
The autonomic nervous system has two branches,
it has the sympathetic branch and a para-sympathetic branch. They are equal but opposite.
we experience stress, it is the sympathetic branch of the autonomic nervous system that is turned on -- it's an arousal state. It's the same reaction if a cat were standing with its back arched, hair on end, ready to fight or to run.
That same reaction in us is what is literally called the body
stress mechanism. It's the arousal of the sympathetic nervous system.
What we are going to be
learning to turn on is the opposite, the para-sympathetic branch of the autonomic nervous system. It's a measurable and scientifically defined shift in the body -- and when it comes on, it literally shuts down the body stress mechanism. Again, it's measurable, your blood pressure drops, your heart rate slows down, your muscle tension is reduced, your metabolism slows down. Even your brain waves slow down.
And when
you turn on the body's relaxation response, it literally shuts down the body's stress response, right at the level of stress hormones.
Deepak: Now how do I do that?
Eli: There are many ways that you can learn to turn on the relaxation
response and that's what the intention of this whole series is about, to present you a number of practical effective techniques that will enable you to pretty much at will shut down the body stress response so the body can rest and recuperate properly.
I want to show you a very simple technique, a
breathing exercise that will enable you to control the levels of stress in your own system right at the level of stress hormones.
Deepak: Now breathing is something I do every day, how can it
cure all of my ills?
Eli: Well I'm not sure I can say that it will cure all of your
ailments but it will certainly give you a handle on the stresses that you experience in your day to day life.
is a rhythm that is with us from the moment we are born until the moment that we die. And very few people ever pay attention to their breathing. But it's important to realize that breathing and emotions are intimately connected.
When we
are tense, frightened, angry, our breathing is short and shallow and located in our chest. When we are relaxed, our breathing is easy and deep and located in our abdomen. And just the very act of breathing as if you are relaxed, enables you to become relaxed. It reflects the flow of hormones through your entire endocrine system. And by breathing slowly, deeply into the abdomen, the way a child breathes, you start to become relaxed regardless of the situation.
Deepak: Yeah, that's just like my daughter's breathing --
you can see her tummy rise and fall.
Eli: All children do it like that, we used to breathe
like that. In fact, that's really our birth right.
When we are children, we start to breathe into the
abdomen and as we accumulate stress and tension through our life, the breathing shifts up into the chest.
most adults tend to be chest breathers unless they've had woodwind training or theater training of voice training. Most adults tend to breathe in their chest which is related to stress.
And one of the most
easiest and practical ways to control the stress in your life is to be able to breathe as if you are relaxed.
Would you like me to show you
an exercise?
Deepak: Please.
Eli: Get
comfortable. And for those of you at home, you get much more out of this program by actually doing the program with us rather than just watching. So follow along with us.
one hand on your best and the other hand on your upper abdomen. Just focus your attention into your breathing and just be aware and see which hand is moving -- is your upper hand moving or your bottom hand moving? Are you breathing into your chest or into your upper abdomen?
So you are breathing into your chest? Most adults
tend to breathe into their chest. And the very simple act of just breathing in through your nose and guiding the air down into your abdomen, letting your abdomen rise and fall with each breath is really all that you need to do. So just in through the nose, letting your belly rise.. letting your belly fall.
You don't want your upper hand to move, you just want
to consciously direct the air into the bottom part of you lungs, so that as you bring in the abdomen rises, and as you breathe out the abdomen falls.
Deepak: It's
pretty simple.
I've seen this simple exercise transform people's lives.
If you just
remember that when you breathe as if you are relaxed, you start to become relaxed. This breathing exercise affects the flow of hormones through your whole body and literally has an effect on turning on the body's relaxation response which affects every aspect of your body. It affects you physically, mentally, emotionally. It affects every system of your body, digestive system, cardio-vascular system, respiratory system.
And you can do
the same. You can do this travelling on the bus or the subway. You can do this in the car even when you are driving as long as you do it with your eyes open.
You can do it anywhere. It's
unobtrusive, you could stand in front of 100s of people and do it. And I only invite you to try it and do it for 10 or 15 minutes and just observe and see what kinds of changes happen.
Most people are
really truly amazed at how practical and effective this is. If I had time to come and teach people only one exercise out of the 100s that I know, this is the one that I would teach. It is profound in its effects. Don't be put off because it's so simple. Who said it had to be difficult?
just to be able to breathe in this way all the time is a goal. If people practice this for 15 minutes a day over a period of 4 - 6 weeks, there is literally a re-training of the breathing apparatus so that at the end of a month or month and a half, most people are naturally reconnected with their diaphragmatic or abdominal breathing.
And your whole
baseline of stress changes. When you are breathing as if you are relaxed all the time, you stay at a lower level of stress.
Deepak: Well I'm certainly going to do this. But tell me,
how does this exercise fit in the series? You are going to show us a number of exercises?
Eli: Yes, we are going to look
at a number of different practical techniques that people can carry away with them.
Deepak: Are they all going to be as
simple as this?
Eli: Maybe not quite as simple, but some will be.
We're going to be looking at a range of different approaches. There
are physical techniques, there are mental techniques, there are breathing techniques.
Generally people tend to find that one method
works better for them than the other. Although of all the methods that I'm familiar with, the diaphragmatic or abdominal breathing exercise that we just did is really perhaps the universal most popular exercise because it really works.
Deepak: Well you've convinced me
to make the experiment Eli. I'll read breathe through my tummy for a while and report back to you on how I feel.
Eli: You don't have to report back to me. It's
yours. Use it and enjoy it.
Program Length: 26:50 min
About the Program:
Program 1 Feeling Better, looks at the mechanism of stress and introduces deep relaxation as a method of dealing with the changes and challenges in life that stimulate the body's stress reaction.
|
Arctic meltdown not caused by nature
Rapid loss of Arctic sea ice - 80 per cent has disappeared since 1980 - is not caused by natural cycles such as changes in the Earth's orbit around the Sun, says Dr Karl.
The situation is getting rather messy with regard to the ice melting in the Arctic. Now the volume of the ice varies throughout the year, rising to its peak after midwinter, and falling to its minimum after midsummer, usually in the month of September.
Over most of the last 1,400 years, the volume of ice remaining each September has stayed pretty constant. But since 1980, we have lost 80 per cent of that ice.
Now one thing to appreciate is that over the last 4.7 billion years, there have been many natural cycles in the climate — both heating and cooling. What's happening today in the Arctic is not a cycle caused by nature, but something that we humans did by burning fossil fuels and dumping slightly over one trillion tonnes of carbon into the atmosphere over the last century.
So what are these natural cycles? There are many many of them, but let's just look at the Milankovitch cycles. These cycles relate to the Earth and its orbit around the Sun. There are three main Milankovitch cycles. They each affect how much solar radiation lands on the Earth, and whether it lands on ice, land or water, and when it lands.
The first Milankovitch cycle is that the orbit of the Earth changes from mostly circular to slightly elliptical. It does this on a predominantly 100,000-year cycle. When the Earth is close to the Sun it receives more heat energy, and when it is further away it gets less. At the moment the orbit of the Earth is about halfway between "nearly circular" and "slightly elliptical". So the change in the distance to the Sun in each calendar year is currently about 5.1 million kilometres, which translates to about 6.8 per cent difference in incoming solar radiation. But when the orbit of the Earth is at its most elliptical, there will be a 23 per cent difference in how much solar radiation lands on the Earth.
The second Milankovitch cycle affecting the solar radiation landing on our planet is the tilt of the north-south spin axis compared to the plane of the orbit of the Earth around the Sun. This tilt rocks gently between 22.1 degrees and 24.5 degrees from the vertical. This cycle has a period of about 41,000 years. At the moment we are roughly halfway in the middle — we're about 23.44 degrees from the vertical and heading down to 22.1 degrees. As we head to the minimum around the year 11,800, the trend is that the summers in each hemisphere will get less solar radiation, while the winters will get more, and there will be a slight overall cooling.
The third Milankovitch cycle that affects how much solar radiation lands on our planet is a little more tricky to understand. It's called 'precession'. As our Earth orbits the Sun, the north-south spin axis does more than just rock gently between 22.1 degrees and 24.5 degrees. It also — very slowly, just like a giant spinning top — sweeps out a complete 360 degrees circle, and it takes about 26,000 years to do this. So on January 4, when the Earth is at its closest to the Sun, it's the South Pole (yep, the Antarctic) that points towards the Sun.
So at the moment, everything else being equal, it's the southern hemisphere that has a warmer summer because it's getting more solar radiation, but six months later it will have a colder winter. And correspondingly, the northern hemisphere will have a warmer winter and a cooler summer.
But of course, "everything else" is not equal. There's more land in the northern hemisphere but more ocean in a southern hemisphere. The Arctic is ice that is floating on water and surrounded by land. The Antarctic is the opposite — ice that is sitting on land and surrounded by water. You begin to see how complicated it all is.
We have had, in this current cycle, repeated ice ages on Earth over the last three-million years. During an ice age, the ice can be three kilometres thick and cover practically all of Canada. It can spread through most of Siberia and Europe and reach almost to where London is today. Of course, the water to make this ice comes out of the ocean, and so in the past, the ocean level has dropped by some 125 metres.
From three million years ago to one million years ago, the ice advanced and retreated on a 41,000-year cycle. But from one million years ago until the present, the ice has advanced and retreated on a 100,000-year cycle.
What we are seeing in the Arctic today — the 80 per cent loss in the volume of the ice since 1980 — is an amazingly huge change in an amazingly short period of time. But it seems as though the rate of climate change is accelerating, and I'll talk more about that, next time …
Published 27 November 2012
© 2013 Karl S. Kruszelnicki Pty Ltd
|
Saturday, August 30, 2014
Capturing Romance
The Birthday (1915)
Marc Chagall
As an artist I have been ruminating on how to go about capturing a moment of romance. In Chagall's The Birthday (pictured above) we see an ever-present aspect of it depicted in art:
Fantasy, as it is depicted in Chagall's painting, is that distinctive faculty we have for imagining things - the product of which one recognizes in a painting, musical composition, or some other sensory-stimulating work of the imagination that involves our dwelling either consciously or subconsciously on the moment.
In Chagall's painting, two lovers are caught in the moment of a surprise kiss. She, moving flowers, perhaps that he delivered, into a vase; he, whipping around to steal a kiss. She is surprised, but her eyes are wide open, brining in the moment in a very personal way. Both are swept off their feet as furniture and objets de art fall upward, in the general direction of the sentiment displayed:
a heart that has been touched soars...
Throughout history, fantasy has taken on different meanings, with themes as different as the ages that cultivated them. In ancient civilizations, fantasy was that superfantastical realm of dragons and spirits in the east ...
c. 3800 BCE (China) Pig-dragon Pendant Hongshan Culture; Neolithic
...and demons in the west.
The Falling Angel (1923-47)
Marc Chagall
(This piece combines Biblical and Torah lore with the modern world and with Chagall's personal symbolism in a juxtaposition of images that attempt to summarize the many experiences the artist had over the course of his work on the painting)
During the Middle Ages, the prevailing fashion for fantasy was for grotesque distortions of human and animal forms. These figures filled the margins of illustrated manuscripts, while their carved equivalents adorned the façades of churches and public buildings.
Sermonizing artists provoked their public with terrifying visions of purgatory. The most imaginative artist in this field was Hieronymus Bosch.
The Garden of Earthly Delights (c. 1450-1516)
Hieronymus Bosch
The Prado
Fantastical images can be expressed utilizing the twisting, flowing, dream-like movements found in Chagall's whimsical paintings or in the hollowed-out eggshell bodies, demons with tree-like limbs, and man-eating birds with long, spiky beaks (Bosch) - or with entirely different imagery.
Guiseppe Arcimboldo
The Louvre
Fortunately, fantasy has not always been linked with terror. Renaissance artist Guiseppe Arcimboldo brought us novelty pictures in which the human head was composed out of an assemblage of fruit, flowers, or vegetables.
Our love of fantasy expresses our fascination with fables and fairies, folklore and myths. Our creation of these artworks or musical scores represents our desire to immerse ourselves in the production of these moments.
To first experience an aspect of it in our own mind and then, afterwards, to produce that image, allows us to whisk others away with visually stunning fantasies we draw upon from our own personal flights of fancy. At the same time, these images are idealistic and romantic.
Love combines the physical combining of separate forces in their moment of ecstasy. Chagall's The Birthday is based on the artist's exploration of dreams and the human psyche that in turn produces an artifact from this experience.
Expressing love in art comes from the desire to creatively produce that which is free from the restraints of reason. In pursuing this aim, painters adopt a variety of personal styles. Some produce images that resemble hallucinations or dreams in which figures or objects are depicted in a startlingly realistic manner, juxtaposed in a way that defies rational analysis; others produce semi-abstract works by deliberating suppressing themselves in their automatic drawings.
Joan Miró is a leading example of this approach.
Ballet Romantic (1974)
Joan Miró
However one imagines a romantic artwork, the artwork itself must surprise the painter as well as the viewer; if not, it will fail to resemble the workings of the subconscious mind at play.
I have not yet created this piece, but the expression of this highly sought after human sentiment is indeed weaving its way through my subconscious as I diligently work through creating the next 15 or so paintings for this upcoming exhibition.
Creating this many pieces of art (in such a short period of time) requires one to draw heavily upon the subconscious in search for these sentiments ... where they are found is between the artist and his or her canvas ... where they end up depends upon to whom these productions speak most.
Friday, August 22, 2014
Raising Funny Kids 44: Ma Mignonne
Ma mignonne
A une Damoyselle malade
Ma mignonne,
Je vous donne
Le bon jour;
Le séjour
C'est prison.
Puis ouvrez
Votre porte
Et qu'on sorte
Car Clément
le vous mande.
Va, friande
Da ta bouche,
Que se couche
En danger
Pour manger
Si tu dures
Trop malade,
Couleur fade
Tu prendras,
Et perdras
Dieu te doint
Santé bonne,
Ma mignonne.
Marot's seemingly simple poem is charming as it is disarming. What is made simple is invariably that which only a master of a given craft can present for our amusement.
Marot's poem in English (this literal translation by D. Hofstadter, whose magnum opus Le Ton beau de Marot is indispensable if one seeks to delve deeper into this poem), lacks the beautiful rhyming couplets (AA, BB, CC), the carefully scrutinized and chosen wording, and the musical nature which the French original offers.
For those who do not speak or who cannot faithfully read in French, the eloquence of this poem cannot be fully appreciated - though it can be internalized. If one speaks French, this poem is delight to the senses.
Originally penned in October 1527 for a future queen, Jeanne d'Albret de Navarre, it is one of my favorite poems, intimately speaking to my experience of being a mother.
Queen Jeanne d'Albret de Navarre
François Clouet
To a Sick Damsel
My sweet,
I bid you
A good day;
The stay
Is prison.
Then open
Your door,
And go out
For Clément
Tells you to.
Go, indulger
Of thy mouth,
Lying abed
In danger,
Off to eat
Fruit preserves;
If thou stay'st
Too sick,
Pale shade
Thou wilt acquire,
And wilt lose
Thy plump form.
God grant thee
Good health,
My sweet.
Marot's poem is refreshingly whimsical, notably respectful, and appropriately personal for court artist to bequeath a young child of noble birth.
Transporting ourselves to a village in France, dans le sein du beau Quercy, near the old Pont Valentré whose stony towers and stately arches stand astride itself, cradling a little town once called "Divona" by a tribe once called "Cadourques", we recognize the precarious aftereffects when a child of noble birth fell ill.
We can also transport ourselves to a space where softness of manner and eloquence of tongue is the transitional divide between our humanity and the nobility of spirit. For those who are moved by the arts of the prophets, who seek beauty as a companion, who soar on wings that carry the mind to the threshold of its imagination, who delight in charm and gaiety and who speak a similar language, this is one of those poems that serve us, a nice accompaniment to life - and parenthood.
This poem is a delight to recite aloud. When one wishes to express a sincere fondness for youthful innocence, few poems reach the pinnacle of Marot's verses. When read faithfully, the inflection carries with it a sweet-sounding, mellifluous tone, but to be read masterfully, it must be read from one's personal connection with or subjective memories of childhood in conjunction with one's intimate relation to childhood as a caring, nurturing adult.
In raising funny kids, one must first raise happy children. Happiness is found on an individual level and nearly always includes an element of beauty for inspiration. The beauty found in Marot's poem is one such nicety that can be shared in the intimacy known by a very simple word: home.
Ma Mignonne, the title I prefer, is a beloved poem that can be passed down for generations, just as it has been for over half a millennium.
Friday, August 15, 2014
The Secret to Reading This Blog
Life is all about communication. When we keep a secret, we not only have to monitor what we write, but also what we might say with our photos, quotes, in-between-the-lines, and colloquial expressions. We become vigilant of our own being - separate from it rather than part of it. An Avatar of our true identity and, thus, an art form - a snapshot taken from a given angle depending upon the lens. The director and orchestrator of the shot. The private funding behind the public entity. We are then separate from the spontaneous, streaming umph that marks true aliveness.
The posts herein are burdened with having to keep secrets. The exile, etched into the keyboard keys. The private equity investor on holiday, with few details - unlike most execs. The injunction mirrors the internalized feelings of entertainers and performers who rarely divulge their secrets - if ever.
And let's not forget the artistic poet, taking creative liberties - as is the case herein.
The little girl with the big imagination, labeled eccentric before she could spell the word. A confidant and, thus, an individual relaxed in the art of secret holding - in fact, quite relaxed. The encryptor, enjoying the creation of unspoken rules and patterns. The artist, mapping out said rules and patterns onto canvas - the key for which is nicely hidden in a neurological vault.
We are careful not to speak of secrets, even the mere mention of a secret sends imaginations flaring, minds wandering, and hopes twirling through a web of irrational desires, landing wherever they are trained to land - forever peeking out over the horizon.
If we jump from the ledge, Which direction, exactly, is up?
Secrets are kept from Readers. I know someone who blogs about one subject, but who never discusses the reasons for it or explores their feelings on the matter. It is simply the case with many bloggers. Hidden clues and messages, nuances and innuendos peppering post after post, in patterns few can map out.
When life can be seen plainly but is not talked about openly, people pick up the unstated rule:
Keep Secrets
This makes it impossible for Readers to ask relevant questions on anything other than the shared subject matter, the words and phrases - which rarely match the design. Though written language comes after spoken language, which is programmed early in life, the use of communication to solve problems is a lost art. Most every communication, like these blog posts, is encrypted with sensitivities, with taboos, with secret or hidden agendas, and with fervor - passionate, privately endorsed privilege, to which few - if any - are granted access.
But why do we communicate like this when the goal of communication is clear understanding? What does secret communication lead to?
look around
It leads to a whole slew of forbidden subjects. In families where "certain subjects" are never discussed, everyone accepts that some information simply must be hidden. This increases undue shyness, awkwardness, and acting out behavior.
This type of communication is rampant in society. We expect it. We do it. We are accustomed to it. When we encounter something different, we repel - we wonder, "What's wrong with this person?" We wonder, "Why are there so many "Ws" in this paragraph?"
In the early Latin alphabet, there was no letter "u" or "w," but there was a "V." As writing forms progressed through the middle ages, the introduction of lower case forms gave rise to the form "u," originally a variant of the letter "V." The intermingling of forms continued up until at least the early 17th century, as evidenced by the following text in 1620:
In this text, the word "uso" does not make use of the alternative form but retains the original Latin form "vso."
Sometime during the middle ages, the sound [w], found in Old English and other early Germanic langauges, began being represented by the digraph "W" (two "Vs" side by side) and eventually gave rise to the single character "W."
As promised, or at least hinted, in the title of this blog post, I shall offer a secret to reading this blog, perhaps the introduction on secret holding was also fruitful in decoding posts - or in the realization that there is more than meets the reading brain than one had previously interpreted. Either way, here it goes:
As with the "W-tangent" above, this blog will often "go off on a tangent" of providing educational or otherwise informative information as a gift to Readers. This information, tangible in nature, is an added benefit of returning to this blog on a regular basis. Given that the author - namely, me - enjoys and takes numerous creative liberties with this blog, rendering it - at times - incomprehensible or, at the very least, confusing, said author - i.e., "me" - feels it the least she could do given her tendency to do this, which according to said author - okay, you know who I'm talking about - is not about to change anytime soon.
Typically, secrets promote ignorance. "I never saw it coming," is something sometimes heard once a secret is revealed. "I had an idea, but I never thought..." such a thing would happen, right? Exactly.
As with the recent circumstances surrounding the passing of Robin Williams, the world community must be looking at one another, wondering ... "What are you hiding?" the following thought - I hope - is "...and how can I help?"
It is important to look for signs of secrecy with people. There is always a break in the flow of communication. Behavior or expressions are choppy. Lies are not natural, they are counter to natural human communication, which is as expressive as it is revealing. When individuals feel that they must keep secrets or guard information, their personality changes - and often times, they show a different aspect of themselves to different audiences - rarely do these two world mix.
Secrets do not allow a subject to reach home, to touch the light of consciousness where new information can be received for later processing. Secrets block the flow of energie, perpetuating repetitive and compulsive behavior patterns.
I wrote this post in an attempt to open up the dialogue of keeping secrets, something that the world community is now examining. The loss of a beloved, talented entertainer with whom many people around the world resonated has hit home. It is not the loss of just an actor. It is the loss of all the people we care about. It is the persistent question, "Could I have done something to help?" that gets people. We have all experienced loss in our lives and often times it leaves us at a loss in how to deal with it - and how to help others cope. We struggle with what to say - and how to say it. Do we get clever? Do we go for an emotional response? Do we divulge our own struggles as a way to communicate? How do we repair the perception of connectivity in a world when separation is staring us right in the face?
Being told - from childhood forward - that we are supposed to "act" a certain way in public, "speak" a certain way in public, "write" our essays a certain way, "do" math problems a certain way, "paint" in a certain medium or style, affects who we are - causing some to join the Hipster movement.
Unless you want this to happen to you - or to your children - beware of what you say, think, teach, tweet, and pass along to others. Most people cannot decipher your encryption codes - nor do they have time to even think about deciphering your encryption codes.
It is plain, ordinary language that appeals most to individuals. With the uttering of every sentence, there must be an equal recognition, review, and agreement to continue onward. Without this very precise give-and-take exchange, there is no connection - and separation follows.
Blogging and writing, in general, are solitary activities. You have to speak to someone. Rarely do writers craft words like an artist crafts shapes on a canvas - though, admittedly, some - including myself - do. The point is that it can be difficult to communicate to a general audience in a public forum as you never know who is reading, which secrets they bring to the table, what sensitivities they posses, and how life has shaped their worldview. Given the online community is global, language, culture, politics, and social status often confuse communications into misunderstandings.
Children learn by imitation. An atmosphere of clear communication promotes clear communication. When the home atmosphere is pleasant, peaceful, clear, and open to new information and experiences, so, too, are the individuals in that home. As our home community extends into the living rooms, the computers, and the phones of the world community, what we send out, like what we express in our own homes, affects the lives of others.
Be kind to one another. Know that comedy, in its truest form - humor - is not about laughing at others ... it is about laughing with others ... sharing the fun of life, celebrating the happy moments, the moments that make us laugh, and the moments that connect us. Connection does not have to occur at the exclusion of other groups. The experience is what connects us. The subject matter. Everyone should be welcome. It is not about age, gender, social status, or cultural heritage, it is about whether or not we find value in a particular subject, wish to know more about it, and enjoy sharing that information with others.
The future of world communication is the abolition of secrets in favor of open communication. Most people share in this understanding, so it is only a matter of recognition and practice that will allow it to flourish. When it does, I hope that no one ever feels so alone again that they choose to end their life. No matter what happens, no matter how old or wrinkly we get, or how many times our boat fails to show up, there is always something worthy of investigation just over the horizon - and by horizon I mean tomorrow, next month, or five minutes from now (as we never know who might be calling or texting or what zany idea might cross through our mind).
Rules are meant to be questioned. If there are unspoken rules in your life that tell you secret holding is important, examine them - ask yourself why you're holding the secret, what the pros and cons are of holding that secret, and what you would do if someone "found out."
If the answer to that question is "So what!" you're on the right track. Besides, most people are more concerned with their own lives than they are with your secrets. A secret only trends for a short period of time. Living with lies can affect someone's entire life - and the lives of those around them.
So, if you're wondering how to read this blog... just know, that everything herein was written with good intentions, a hint of good humor, an interest in sharing, and a playful demeanor ... there are no hidden secrets other than the ones I'm not telling you about.
Thursday, August 14, 2014
Robin Williams
Robin William was a wonderful, kind and generous man.
Patch Adams
1951 - 2014
American actor, stand-up comedian,
film producer, screen writer...
Loved by millions
"He never acted as if he was powerful or famous.
Instead, he was always tender and welcoming."
Patch Adams
Reader Response
(While on vacation) I received news that Robin Williams had died, and that he had taken his own life. Within a couple of days, I received a number of heartfelt letters from friends and readers telling me that they missed me on Facebook and that they wondered what thoughts or insights I might have to share on the subject and if I would be writing an article on Robin Williams anytime soon. They also asked if I was doing okay.
While the subject of hiding pain behind comedy has come up countless times in my investigations into humor, I was touched by the concern my Readers expressed towards me. Perhaps Robin Williams' death will remind people to check on one another, to make sure that the people for whom they care are "doing okay" - sometimes just asking someone how their doing can keep them feeling connected.
Robin Williams
Rather than look to drugs or alcohol for my own intense need for input (and output), I look to creative outlets that soothe the frustrations life can sometimes bring.
Even if I don't like what I write or create, the desire to try again, to do better, fuels my inspiration to continue. To make more. To get better. To have one more laugh or epiphany that fills me with joy and wonder. To have one more "moment" that quiets all the others and allows me to feel present, to be myself when the world might otherwise want me to serve as an amalgam for their own thoughts.
While drugs and alcohol can aid in reaching these experiences, they do not ultimately leave one feeling inspired. On the contrary, drugs and alcohol leave one feeling empty.
Making light of life's challenges can give most of us a long enough break to sit back and find new meaning, but that does not mean that comedy can cure heartache. Only we can cure heartache - comedy just reminds us that we can choose to laugh when we might otherwise want to cry.
"Carpe per diam - seize the check."
Robin Williams
Robin Williams - the comedian
Robin Williams, an incredibly prolific individual whose ability to consciously engage in the work of personal growth and of inner transformation through comedy, left this world as he lived in it - on his own accord.
Robin Williams in Flubber
What made Robin Williams so intense?
Overexcitability Questionnaire
1. Do you ever feel really high, ecstatic, and incredibly happy? Describe your feelings.
2. What has been your experience of the most intense pleasure?
3. What are your special kinds of daydreams and fantasies?
4. What kinds of things get your mind going?
5. When do you feel the most energy, and what do you do with it?
6. How do you act when you get excited?
7. What kind of physical activity (or inactivity) gives you the most satisfaction?
8. Is taste something very special to you? Describe it in a way that it is special.
9. Do you ever catch yourself seeing, hearing, or imagining things that aren't really there? Give examples.
10. When do you feel the greatest urge to do something?
11. If you come across a difficult idea or concept, how does it become clear to you? Describe what goes on in your head in this case.
12. Describe what you do when you are just fooling around.
Living With Intensity, Daniels & Piechowski, Ph.D.s
Robin Williams in Dead Poets Society
"You're only given a little spark of madness.
You mustn't lose it."
Robin Williams
The overexcitability questions above come from the book Living With Intensity, the adapted list was created from the 21-item OEQ by Ackerman & Miller, 1997.
The book describes and explores the multi-faceted sensitivities and intensities of gifted children and adults. It offers insights in understanding and nurturing the complex combination of intellectual advancement and overexcitabilities... with insights into how to avoid tragic misperceptions and misdiagnoses.
Living on the edge. That is how precarious it often feels when we come to the top of the mountain, or what seems like the top, and are startled to find ourselves looking over the edge. The view is panoramic, breath-taking. But what about the trip down? Ordinarily, a sequence of moments shifts the boundaries of our private universe gradually from the concerns of young adulthood to something larger, startling, mysterious.
Sheehy (1995) "the mortality crisis, (Living with Intensity, p. 176)
Robin Williams
Triumph to Tragedy
The tragedy associated with the circumstances of Robin Williams' death has heightened our understanding of the dangers associated with living with intensity or what some call creative genius (turned against itself).
What is Creative Genius
It is the applied integration of expanded sensitivities, otherwise known as artful living. Creative Genius is the outward sign of some area of intensity.
Robin Williams unleashed his creative genius into his comedy. His true genius was in making others laugh. While Robin Williams was in possession of many talents, it was making people laugh for which he will mostly be remembered.
Maya Angelou
"Death is nature's way of saying, "Your table's ready.""
Robin Williams
How could someone so talented do such a thing?
This is the primary question my Readers have been asking me. While my blog often strays from the topic of humor, it is humor that underlines all my articles - even the serious ones. It is me saying, "Hey, isn't it funny how serious we can be?"
Comedy covers serious topics as well as surprising insights. We are accustomed to having Robin Williams surprise us with his zany sense of humor, but the seriousness by which he left the world will leave many people asking "Why?" for years to come.
For people who live with intensity - often described as creative genius - there is an enormous range of human capacity, an intricate web of understanding that occurs due to the intensity that is the hallmark of the individual's predisposition.
Irrespective of where that intensity is directed, there is an extra umph behind everything they do.
"What's right is what's left when everything is wrong."
Robin Williams
Living with Intensity
An individual living with intensity approaches everything they do with their heart fully in it. This does not mean that they will give away their secrets. On the contrary. In the case of Robin Williams, he directed his intensity into his comedy, into entertaining others, and as it so happened, into his personal vision of the world.
Some people call this the "dark side" because when you try to penetrate this veil, when you try to reach the person, they are somewhere else. This "somewhere else" is not always a happy place.
This personal or dark side, as it is known, offers the heart a repose. Rather than comedy being his only escape, inactivity and retreat were the spaces into which Robin sometimes fled. When he was tired, done, or otherwise needed a break, this space served as his solace.
It is inside this space that the analytical mind can turn against itself. That non-emotional, disengaged, quietly critiquing serious side we all know "too well" reigns in this sphere.
The negative responses one encounters in life but normally "blocks out" are intensified. Tragically, for some, these voices can take over.
While it is natural to question the world and our role in it (turning us all into philosophers), it is not natural to end our lives as a result of this questioning.
The natural order of life is to keep going.
"...when you have a great audience, you can just keeping going and finding new things."
Robin Williams
Common Philosophical Questions
that people ask
1. What is my purpose in life?
2. Why are we here?
3. Is there a God?
4. What if there is no God?
5. Is this all there is?
6. Is there something more after this life?
7. Do we have free will?
8. Why did the Chicken cross the Road?
"The only weapon we have is comedy."
Robin Williams
Living with intensity is like being on a constant Road Trip. You are forever crossing one road or another. Only when you exhaust yourself to the point of near non-existence, do you step back, retreat, and focus on your own needs.
The difficulty with living in the public eye or of being an individual to whom others look for support, guidance, or entertainment, is that you are human - you're actually human. And like everyone who returns from a Road Trip, you need and absolutely must have time to relax and recharge.
We all need time to process the thoughts, hopes, and doubts that cross our mind. When we are constantly 'on the go' or when we have constant demands hanging over us, the desire to retreat increases and we naturally look for escapes.
If you are intellectually driven, you look to your work, your research, your insights and epiphanies for solace. You thrive on the insights your analytical brain offers and upon the respect you receive for your efforts. These experiences increase your self-respect and become emotional sustenance to continue onwards. We all need something to fill the coffers back up when they run low.
If you are creatively driven, you look toward your craft for solace. Your craft is your lifeline. It keeps you connected. You have an avenue through which to communicate, to tell the world, "I exist."
Only it is not the world we're trying to convince - it's ourselves.
Despite popular belief,
we all need proof that we exist
"a weird combination of isolation and connection and disconnection; discomfort and awkwardness."
Robin Williams (on connection)
Robin Williams and Socrates
Robin Williams juggled multiple intensities: intellectual intensity, emotional intensity, and creative intensity. His comedy was a metaphor for the evolution of his intensity. To reclaim his balance, he withdrew himself from the depth his intensity carved into his life.
If one is not careful, an intense person can carve a gorge so deeply that they begin to believe that they cannot escape. This is the pitfall of intensity and/or creative genius.
Socrates, an intensely focused Greek, mesmerized by the notion of finding truth and wisdom, carved out his own grave - and what did the Athenian citizens do to him? They buried him in it.
Like Ancient Athenian citizens, the world of Hollywood can can be overly harsh in their judgment. Men are judged by their charisma, women on their beauty. If either fail to deliver, the Twitter feeds go wild. Talk about pressure. No wonder so many intensely creative individuals find solace in drugs or alcohol. Where does one retreat when all eyes are on them?
Into a world of their own making.
Socrates went around asking people big questions - questions others did not want to ask themselves nor have someone else ask of them because they did not know - or did not want to know - the answer.
As it turns out, having an answer - even if it is wrong - is better for most people than asking deep questions and being open to the answers that surface.
Heartfelt questions can lead to our questioning whether or not life is worth it. If the answer is no, trouble follows.
There is a reason why people continue ask the question:
Half-Empty or Half-Full?
For individuals who live with intensity, life usually feels like it is one way or the other. There is very little middle-ground.
"Reality... what a concept."
Robin Williams
Why do we love comedy?
We love comedy because it fills life's in-between moments with laughter. It's simple. When we step back from the intensity of our daily lives, from the demands of family, the pressures of work, the relentless internal dialogue critiquing every thought and moment, we need a release. We need to laugh. We need to know that we're not in this thing alone. That others feel the way we do. That there is meaning. That there is a purpose - or no purpose, in which case, we need to find meaning in that.
Comedy takes confusion, doubt, and tragedy and twists it up like a balloon animal. The sound it makes can often times send shrills down our spines, but in the end, we look at the finished product and smile. We see the bigger picture. We see the magic. We see the art.
Exploring Humor
Exploring humor has allowed me to discover the humorous side of life. From Scooby Doo to Pacman to Research Papers, all the things we do and think about in life, when viewed through a humorous microscope, become absurd, which is funny.
The seriousness we attach to tragedy and the importance we place on every thought and emotion we experience can leave one laughing.
As human beings, we want the range of emotions to match the range of experiences we have in life - and comedy provides that.
Comedy (Sometimes) Heals
When the intensity in our own lives lightens up, so do we. This is when humor and the people who make us laugh are such a welcomed presence in our lives.
This is something most comedians understand on a deeply personal level, but understanding the power of humor and feeling it are not the same thing.
"My battles with addiction definitely shaped how I am now. They really made me deeply appreciate human contact. And the value of friend and family, how precious that is."
Robin Williams
The intensity that leads some to wild, zany expressions is what also leads them to scary places, to places where demons have their way with the mind, a place into which no one can pass.
Comedians, like everyone else, sometimes find themselves asking the question,
"Why am I doing this?"
This is a natural question to ask. The answer to the question is as different as each individual asking it.
Descartes walks into a bar. The bartender says, "Are you having a beer?" Descartes says, "I think not," and ceases to exist.
René Descartes (1596 - 1650) wrote in his Discourse on the Method of Rightly Conduction the Reason and Seeking Truth in the Sciences,
"Mrs. Doubtfire: 'He was quite fond of the drink. It was the drink that killed him.'
Miranda: 'How awful. Was he an alcoholic?'
Mrs. Doubtfire: 'No, he was hit by a Guinness truck.'
The BIG Questions
Descartes understood the inherent perils of examining one's thoughts. He admitted that
The single design to strip one's self of all past beliefs is one that ought not to be taken by every one.
Robin Williams (and Matt Damon) in Good Will Hunting
Robin Williams
The Genius of Robin Williams
Robin Williams must have recognized his own genius. All he needed to do was sit back and observe the power he had over the happiness of others.
In a world of intensity, love and compassion are the two of the greatest forces upon which the soul can find respite. For an intense person, love and compassion are true lifelines.
Without a deep connection to these forces, the soul ceases to exist. Love is replaced with despair. Compassion is replaced with rejection. The result is either a temporary retreat to heal oneself (often times sought after in drugs, in alcohol, or in other intimate pleasures) or, in extreme cases, suicide.
The withdrawal feels as intense as the mania.
Depression is the hallway to a nervous breakdown. Whether or not someone can be saved, or whether or not someone can save another suffering is a question many have asked.
There is no magic cure or simple answer. There is no amount money or fame that can heal a heavy heart. In fact, money and fame can complicate life.
"People think they know you. They expect you to be literally like you are on TV or in the movies, bouncing off the walls. A woman in an airport once said to me, "Be zany!" People always want zany, goofy sh-t from me. It takes a lot of energy to do that. If you do that all the time, you'll burn out."
Robin Williams
"I went to rehab in wine country, just to keep my options open"
Robin Williams
All entertainers and public figures are subject to scrutiny. For highly intense individuals, this scrutiny (including, in particular, self-scrutiny) can have disastrous effects on self-esteem, and upon an individual's sense of self-worth.
The pain associated with rejection can lead a person back to that personal space, back to that dark place where one naturally questions the value of it all.
"When I'm awake, I don't want to go to sleep. I don't want the hassle of turning the light off, putting my head down and then all the thoughts. I don't want all those thoughts."
Robin Williams
"Robin Williams was so funny that it is difficult to imagine him sad," one of my Readers wrote in a letter to me.
Like many comedians, Robin Williams had the ability to give others what he himself did not always feel inside.
But for most of the world, it appeared as if Robin was truly "into" what he was doing. When people roared, his intensity flew off the charts ... and he took us with him!
"...they are always talking about 'well, is it meaningful?' Well, sure it's meaningful if you come out and you had a great laugh."
Robin Williams
Robin Williams took comedy to a new level - he took it everywhere. He took it to our hopes, to our dreams, and to our fears. He humored us with our own shortcomings and with his own. He showed the world how to laugh when one might otherwise want to cry. He lightened the mood in the room when it got too heavy. He was someone to whom others could turn when they needed to feel good. With all eyes upon him, he served the world a platter of joy. He made us laugh and we loved him for it.
Everyone's situation in life is different. We all have different experiences, different needs, and different opinions on everything, including which direction the toilet paper roll should flow. Our view of the world is largely dependent upon where we are standing in relation to it.
In the end, the world is what we decide it to be. We can see the world as funny. We can see the world as tragic ... or, like most, we can see it somewhere in between. We can choose to see it for how (we think) it is - or is not. We can see a purple world - even if we're colorblind. We can see a kind world. We can see a harsh world. We can see a world with purpose - or none at all.
"Comedy is acting out optimism."
Robin Williams
Robin Williams has reminded the world that our lives are not just about us. While we are not responsible for the happiness of others, our words and actions do affect them.
Feeling blue???
If you are languishing, feeling empty, or otherwise questioning whether life is worth living... talk to someone.
This doesn't mean that others will have the answers you seek, but talking to others gives you a moment to pause and to think through the emotions that flood everyone's system from time to time.
We don't have to be held hostage by our emotions. We may not be able to stop every negative thought that flows through our brains, but we can devise strategies to cope with them. We can also create healthy outlets upon which we might rest until the storm passes.
As I have written many times over, laughter is best when shared.
True comedy is not laughing at the shortcomings of others - that just makes people feel self-conscious. True comedy is making others feel good about themselves.
Robin Williams made millions of people laugh and feel good on the inside. He showed us how to be silly, reminded us that it is okay to loosen up, and that laughter truly is the best medicine. His life, like his humor, moved millions to laughter and to tears. His genius will not be forgotten.
"No matter what people tell you,
words and ideas can change the world."
Robin Williams
|
#pragma once
//------------------------------------------------------------------------------
/**
@class ToolkitUtil::FBXSkinParser
Searches an FBX scene and extracts skins
(C) 2012 gscept
*/
#include "fbxparserbase.h"
#include "fbxtypes.h"
//------------------------------------------------------------------------------
namespace ToolkitUtil
{
class FBXSkinParser : public FBXParserBase
{
__DeclareClass(FBXSkinParser);
public:
/// constructor
FBXSkinParser();
/// destructor
virtual ~FBXSkinParser();
/// parses a scene in search of skins
void Parse(KFbxScene* scene, ToolkitUtil::AnimBuilder* animBuilder = 0);
/// sets a list of skeletons to be used for skin->skeleton connections
void SetSkeletonList(SkeletonList skeletons);
/// sets the mesh which should get skinned
void SetMesh(ShapeNode* mesh);
private:
ShapeNode* mesh;
SkeletonList skeletons;
};
//------------------------------------------------------------------------------
/**
*/
inline void
FBXSkinParser::SetSkeletonList( SkeletonList skeletons )
{
this->skeletons = skeletons;
}
//------------------------------------------------------------------------------
/**
*/
inline void
FBXSkinParser::SetMesh( ShapeNode* mesh )
{
n_assert(mesh);
this->mesh = mesh;
}
} // namespace ToolkitUtil
//------------------------------------------------------------------------------
|
Thursday, July 21, 2011
A New Sudan
1. Independence Day in the New Sudan
The night before Independence Day, our new District Superintendent Fred Dearing received passes for us to sit in the bleachers to watch the official birth of the Republic of South Sudan. I was amazed at the diversity of the crowd gathered in Yei's Freedom Square to celebrate their first Independence day. It included not only the many Christian denominations that are here, but also local Muslims; not just South Sudanese from almost every state, but even Darfuris who sincerely celebrated the independence of their Southern brothers and sisters even while their home area in the western part of the North continues to suffer massacres of entire villages. It was great to witness that in the midst of their own celebration, South Sudanese pledged continued efforts for peace and freedom in Darfur. All the more amazing to realize that Independence is not just decades but centuries overdue for South Sudanese, who have been controlled, exploited and oppressed by others since ancient times without cease. In a sea of hundreds of banners, one summed it up in the words of Dr. Martin Luther King: “Free At Last.” People had walked to Yei from surrounding villages; some climbed trees to join the thousands enjoying marching and speeches in Yei's Freedom Square. Every local civic group and school had a banner and marched in a long parade; Salaam United Methodist School was there marching in their school uniforms. Sudanese women groups marched with banners proclaiming their commitment to playing a key role in the development of the new South Sudan. As we looked around us in the bleachers, we saw Sudanese young educated professionals listening intently to all the speeches, responding at times to commit themselves to the task of building a new nation. The highlight of the day was the simple act of lowering the flag of Sudan, and raising the new flag of the Republic of South Sudan. People cheered and ululated wildly, and that energy continued into the afternoon as dozens of tribal groups gathered in circles around Freedom Square into the evening to dance traditional tribal dances: Kakwa, Nuer, Dinka, Mandari, and many more. I hope and pray the peaceful, joyful spirit of those co-existing celebrations on Independence Day can be continued permanently into the complex process of becoming one nation of many tribes working together.
2.Oil and Food in the New Sudan
Petrol (gasoline) prices in Yei have risen to about $6.80 per gallon, but farther north in Unity State they are over $10 per gallon, down from $12 per gallon during the height of the fuel shortage a few weeks ago. Even though 75% of the known oil is located in South Sudan, all the pipelines and processing are in the north, and the north stopped allowing shipments of oil after the vote to separate. Oil from other countries has slowed down, reportedly because Libyan oil production has been disrupted. South Sudan is working on arrangements to build a pipeline through Kenya to a seaport, but that is expected to take 8 years. Meanwhile, higher fuel prices mean higher food costs (over half the local food is still imported and trucked in.) Already food prices were rising independently of this crisis; according to the South Sudan Minister of Agriculture Anne Itto, maize (corn) prices in Kenya rose 130% in the first half of 2011. Adding to that the fuel crisis in South Sudan, Itto says maize prices have quadrupled here (Sudan Tribune 16 July 2011). The conservation farming methods we're teaching here are increasing yields by 6.5 times in Zimbabwe just using existing hand tools with no commercial fertilizers (Conservation Farming in Zimbabwe: Evaluation Report, January 2011, Canadian Food Grains Bank.) If we can show that these methods do even half as well in South Sudan, it could help increase food production here quickly and inexpensively.
3. Training Leaders for the New Sudan
Elizabeth Heft, an Individual Volunteer in Mission from Ginghamsburg UMC in Ohio, is here for 6 weeks as an Individual Volunteer in Mission conducting training for youth leaders of the 17 United Methodist churches in South Sudan. It was exciting to see 35 Sudanese young adults, about 1/3 women, gathered for the 2-day retreat here in Yei. Elizabeth and Peter Lomorro, the Youth Coordinator for Sudan District, did a fantastic job of providing much appreciated training while the young adults provided joyful worship with drums, shakers, song and dance...and fervent commitments to the task of leading and teaching youth in the remote village churches. Looking over this group of energized young women and men gathered in Yei UMC as we celebrated communion on the final day of the retreat, I was moved with the knowledge that faith development among the youth of the village churches would take a major step forward, and astounded to realize that several of these young adults would be, in a few years, pastors of these and of new churches. What an incredible gift!
|
/**
* \file
* \brief Driverkit module implementation.
*
* Contians helper functions to iterate over driver modules in a domain
* and create driver instances from driver modules.
*/
/*
* Copyright (c) 2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdlib.h>
#include <barrelfish/barrelfish.h>
#include <driverkit/driverkit.h>
#include <driverkit/iommu.h>
#include <driverkit/hwmodel.h>
#include <collections/hash_table.h>
#include <skb/skb.h>
#include <if/mem_defs.h>
#include "debug.h"
#include "../libc/include/namespace.h"
__attribute__((unused))
static void format_nodelist(int32_t *nodes, char *out){
*out = '\0';
sprintf(out + strlen(out), "[");
int first = 1;
while(*nodes != 0){
if(!first) sprintf(out + strlen(out), ",");
sprintf(out + strlen(out), "%" PRIi32, *nodes);
nodes++;
first = 0;
}
sprintf(out + strlen(out), "]");
}
void driverkit_parse_namelist(char *in, struct hwmodel_name *names, int *conversions){
assert(in);
*conversions = 0;
struct list_parser_status status;
skb_read_list_init_offset(&status, in, 0);
while(skb_read_list(&status, "name(%"SCNu64", %"SCNi32")",
&names->address, &names->nodeid)) {
debug_printf("parse_namelist: %lx\n", names->address);
names++;
*conversions += 1;
}
}
#define ALLOC_WRAP_Q "state_get(S)," \
"alloc_wrap(S, %zu, %d, %"PRIi32",%s, NewS)," \
"state_set(NewS)."
errval_t
driverkit_hwmodel_allocate(size_t bytes, int32_t dstnode, int32_t * nodes,
uint8_t alloc_bits, genpaddr_t *retaddr) {
errval_t err;
char nodes_str[128];
format_nodelist(nodes, nodes_str);
HWMODEL_QUERY_DEBUG(ALLOC_WRAP_Q, bytes, alloc_bits, dstnode, nodes_str);
err = skb_execute_query(ALLOC_WRAP_Q, bytes, alloc_bits, dstnode, nodes_str);
if (err_is_fail(err)) {
DEBUG_SKB_ERR(err, "failed to query\n");
return err;
}
struct hwmodel_name names[1];
int num_conversions = 0;
driverkit_parse_namelist(skb_get_output(), names, &num_conversions);
assert(num_conversions == 1);
if (retaddr) {
*retaddr = names[0].address;
}
return SYS_ERR_OK;
}
errval_t driverkit_hwmodel_ram_alloc(struct capref *dst,
size_t bytes, int32_t dstnode,
int32_t *nodes)
{
if (bytes < (LARGE_PAGE_SIZE)) {
bytes = LARGE_PAGE_SIZE;
}
int bits = log2ceil(bytes);
bytes = 1 << bits;
assert(bits >= 21);
// The PT configuration in the SKB is currently using 2M pages.
#ifdef DISABLE_MODEL
if (dstnode != driverkit_hwmodel_lookup_dram_node_id()) {
return LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS;
}
return ram_alloc(dst, bits);
#else
errval_t err;
errval_t msgerr;
genpaddr_t addr;
err = driverkit_hwmodel_allocate(bytes, dstnode, nodes, bits, &addr);
if(err_is_fail(err)) {
return err;
}
// Alloc cap slot
err = slot_alloc(dst);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
struct mem_binding * b = get_mem_client();
debug_printf("Determined addr=0x%"PRIx64" as address for (nodeid=%d, size=%zu) request\n",
addr, dstnode, bytes);
err = b->rpc_tx_vtbl.allocate(b, bits, addr, addr + bytes,
&msgerr, dst);
if(err_is_fail(err)){
DEBUG_ERR(err, "allocate RPC");
return err;
}
if(err_is_fail(msgerr)){
DEBUG_ERR(msgerr, "allocate");
return msgerr;
}
return SYS_ERR_OK;
#endif
}
errval_t driverkit_hwmodel_frame_alloc(struct capref *dst,
size_t bytes, int32_t dstnode,
int32_t *nodes)
{
#ifdef DISABLE_MODEL
if (dstnode != driverkit_hwmodel_lookup_dram_node_id()) {
return LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS;
}
return frame_alloc(dst, bytes, NULL);
#else
errval_t err;
struct capref ram_cap;
if(bytes < LARGE_PAGE_SIZE) bytes = LARGE_PAGE_SIZE;
// Allocate RAM cap
err = driverkit_hwmodel_ram_alloc(&ram_cap, bytes, dstnode, nodes);
if(err_is_fail(err)){
return err;
}
// Alloc cap slot
err = slot_alloc(dst);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_SLOT_ALLOC);
}
// Get bits
assert(bytes > 0);
uint8_t bits = log2ceil(bytes);
assert((1UL << bits) >= bytes);
// This is doing what "create_ram_descendant" in
// lib/barrelfish/capabilities.c is doing.
err = cap_retype(*dst, ram_cap, 0, ObjType_Frame, (1UL << bits), 1);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_RETYPE);
}
err = cap_destroy(ram_cap);
if (err_is_fail(err)) {
return err_push(err, LIB_ERR_CAP_DESTROY);
}
return SYS_ERR_OK;
#endif
}
/**
* fills in dmem->vbase + maps frame
*/
errval_t driverkit_hwmodel_vspace_map(int32_t nodeid, struct capref frame,
vregion_flags_t flags, struct dmem *dmem)
{
#ifdef DISABLE_MODEL
return SYS_ERR_OK;
#else
errval_t err;
struct frame_identity id;
err = frame_identify(frame, &id);
if (err_is_fail(err)) {
return err;
}
char conf_buf[512];
dmem->mem = frame;
dmem->size = id.bytes;
dmem->devaddr = id.base;
// Alloc space in my vspace
assert(nodeid == driverkit_hwmodel_get_my_node_id());
err = driverkit_hwmodel_get_map_conf(frame, nodeid, conf_buf, sizeof(conf_buf),
&dmem->vbase);
if(err_is_fail(err)) {
DEBUG_ERR(err, "vspace_map local");
return err;
}
uint64_t inaddr, outaddr;
int32_t conf_nodeid;
struct list_parser_status status;
skb_read_list_init_offset(&status, conf_buf, 0);
while(skb_read_list(&status, "c(%"SCNi32", %"SCNu64", %"SCNu64")",
&conf_nodeid, &inaddr, &outaddr)) {
debug_printf("%s:%u %i, %i, inaddr=%lx, vbase=%lx\n", __FUNCTION__, __LINE__,
nodeid, conf_nodeid, inaddr, dmem->vbase);
err = driverkit_hwmodel_vspace_map_fixed(nodeid, dmem->vbase, frame,
flags, dmem);
if (err_is_fail(err)) {
DEBUG_ERR(err, "TODO CLEANUP!");
return err;
}
}
return SYS_ERR_OK;
#endif
}
errval_t driverkit_hwmodel_vspace_map_fixed(int32_t nodeid,
genvaddr_t addr,
struct capref frame,
vregion_flags_t flags,
struct dmem *dmem)
{
errval_t err;
if(nodeid != driverkit_hwmodel_get_my_node_id()){
return LIB_ERR_NOT_IMPLEMENTED;
}
struct frame_identity id;
err = frame_identify(frame, &id);
if (err_is_fail(err)) {
return err;
}
dmem->vbase = addr;
return vspace_map_one_frame_fixed_attr(addr, id.bytes, frame, flags, NULL, NULL);
}
#define MAP_WRAP_Q "state_get(S)," \
"map_wrap(S, %zu, 21, %"PRIi32", %"PRIu64", %s, NewS)," \
"state_set(NewS)."
errval_t driverkit_hwmodel_vspace_alloc(struct capref frame,
int32_t nodeid, genvaddr_t *addr)
{
errval_t err;
struct frame_identity id;
err = frame_identify(frame, &id);
if (err_is_fail(err)) {
return err;
}
int32_t src_nodeid[2];
char src_nodeid_str[128];
src_nodeid[0] = nodeid;
src_nodeid[1] = 0;
format_nodelist(src_nodeid, src_nodeid_str);
//int32_t mem_nodeid = id.pasid;
int32_t mem_nodeid = driverkit_hwmodel_lookup_dram_node_id();
uint64_t mem_addr = id.base;
HWMODEL_QUERY_DEBUG(MAP_WRAP_Q,
id.bytes, mem_nodeid, mem_addr, src_nodeid_str);
err = skb_execute_query(MAP_WRAP_Q,
id.bytes, mem_nodeid, mem_addr, src_nodeid_str);
if(err_is_fail(err)){
DEBUG_SKB_ERR(err, "map_wrap");
return err;
}
struct hwmodel_name names[2];
int num_conversions = 0;
driverkit_parse_namelist(skb_get_output(), names, &num_conversions);
assert(num_conversions == 2);
//ignore, names[0] it is the resolved name as stored in frame
*addr = names[1].address;
debug_printf("Determined addr=0x%"PRIx64" as vbase for (nodeid=%d, size=%zu) request\n",
*addr, nodeid, id.bytes);
return SYS_ERR_OK;
}
/*
* Returns this process nodeid. It lazily adds the process' model node
* and returns it's identifier.
*/
int32_t driverkit_hwmodel_get_my_node_id(void)
{
errval_t err;
err = skb_client_connect();
if (err_is_fail(err)) {
return -1;
}
/*
* XXX: this assumes the domain only runs on a single core!
*/
static int32_t nodeid = -1;
if(nodeid == -1){
HWMODEL_QUERY_DEBUG(
"state_get(S), "
"add_process(S, E, NewS), writeln(E), "
"state_set(NewS)");
err = skb_execute_query(
"state_get(S), "
"add_process(S, E, NewS), writeln(E), "
"state_set(NewS)");
if (err_is_fail(err)) {
DEBUG_SKB_ERR(err, "add_process");
return -1;
}
err = skb_read_output("%d", &nodeid);
assert(err_is_ok(err));
DRIVERKIT_DEBUG("Instantiated new process model node, nodeid=%"PRIi32"\n",
nodeid);
}
return nodeid;
}
int32_t driverkit_hwmodel_lookup_dram_node_id(void)
{
#ifdef DISABLE_MODEL
return 1;
#else
return driverkit_hwmodel_lookup_node_id("[\"DRAM\"]");
#endif
}
int32_t driverkit_hwmodel_lookup_pcibus_node_id(void)
{
return driverkit_hwmodel_lookup_node_id("[\"PCIBUS\"]");
}
int32_t driverkit_hwmodel_lookup_node_id(const char *path)
{
debug_printf("%s:%u with path='%s'\n", __FUNCTION__, __LINE__, path);
errval_t err;
HWMODEL_QUERY_DEBUG(
"node_enum(%s, E), writeln(E)",
path);
err = skb_execute_query(
"node_enum(%s, E), writeln(E)",
path);
if (err_is_fail(err)) {
DEBUG_SKB_ERR(err, "query node_enum");
}
int32_t nodeid;
err = skb_read_output("%d", &nodeid);
assert(err_is_ok(err));
return nodeid;
}
#define REVERSE_RESOLVE_Q "state_get(S)," \
"reverse_resolve_wrap(S, %"PRIi32", %"PRIu64", %zu, %"PRIi32")."
#define FORMAT "[\"KNC_SOCKET\", \"PCI0\", %d]"
// Without reconfiguration, under what ret_addr can you reach dst
// from nodeid?
errval_t driverkit_hwmodel_reverse_resolve(struct capref dst, int32_t nodeid,
genpaddr_t *ret_addr)
{
errval_t err;
struct frame_identity id;
err = frame_identify(dst, &id);
if (err_is_fail(err)) {
return err;
}
assert(ret_addr);
#ifdef DISABLE_MODEL
*ret_addr = id.base;
return SYS_ERR_OK;
#else
int dst_enum = id.pasid;
dst_enum = driverkit_hwmodel_lookup_pcibus_node_id();
assert(nodeid < 100);
char buf[sizeof(FORMAT)];
snprintf(buf, sizeof(buf), FORMAT, nodeid);
nodeid = driverkit_hwmodel_lookup_node_id(buf);
HWMODEL_QUERY_DEBUG(REVERSE_RESOLVE_Q, dst_enum, id.base, id.bytes, nodeid);
err = skb_execute_query(REVERSE_RESOLVE_Q, dst_enum, id.base, id.bytes, nodeid);
DEBUG_SKB_ERR(err, "reverse_resolve");
if(err_is_fail(err)){
DEBUG_SKB_ERR(err, "reverse_resolve");
return err;
}
struct hwmodel_name names[1];
int num_conversions = 0;
driverkit_parse_namelist(skb_get_output(), names, &num_conversions);
assert(num_conversions == 1);
*ret_addr = names[0].address;
debug_printf("Determined (0x%"PRIx64", %d) is alias of (0x%"PRIx64", %d)\n",
names[0].address, nodeid, id.base, dst_enum);
return SYS_ERR_OK;
#endif
}
#define MAP_WRAP_Q "state_get(S)," \
"map_wrap(S, %zu, 21, %"PRIi32", %"PRIu64", %s, NewS)," \
"state_set(NewS)."
errval_t driverkit_hwmodel_get_map_conf_addr(int32_t mem_nodeid, genpaddr_t addr,
gensize_t size, int32_t nodeid,
char *ret_conf, size_t ret_conf_size,
lvaddr_t *ret_addr)
{
errval_t err;
#ifdef DISABLE_MODEL
return SYS_ERR_OK;
#endif
debug_printf("%s:%d: alias_conf request addr=0x%"PRIx64", size=%"PRIuGENSIZE"\n",
__FUNCTION__, __LINE__, addr, size);
int32_t src_nodeid[2];
char src_nodeid_str[128];
src_nodeid[0] = nodeid;
src_nodeid[1] = 0;
format_nodelist(src_nodeid, src_nodeid_str);
for(int tries=0; tries<3; tries++){
HWMODEL_QUERY_DEBUG(MAP_WRAP_Q, size, mem_nodeid, addr, src_nodeid_str);
err = skb_execute_query(MAP_WRAP_Q, size, mem_nodeid, addr, src_nodeid_str);
if(err_is_ok(err)) break;
}
if (err_is_fail(err)) {
DEBUG_SKB_ERR(err, "alias_conf \n");
return err;
}
// Determine and copy conf line (second output line)
char * confline = strstr(skb_get_output(), "\n");
assert(confline);
if(ret_conf){
strncpy(ret_conf, confline + 1, ret_conf_size);
}
debug_printf("retbuf=%p, %s\n", ret_conf, confline);
// Parse names
*confline = 0;
struct hwmodel_name names[2];
int conversions;
driverkit_parse_namelist(skb_get_output(), names, &conversions);
debug_printf("Conversions = %d\n", conversions);
if(ret_addr) *ret_addr = names[1].address;
return SYS_ERR_OK;
}
/**
* Makes dst visible to nodeid, assuming the configuration returned
* in ret_conf will be installed.
*/
errval_t driverkit_hwmodel_get_map_conf(struct capref dst,
int32_t nodeid,
char *ret_conf, size_t ret_conf_size,
lvaddr_t *ret_addr)
{
#ifdef DISABLE_MODEL
return SYS_ERR_OK;
#else
struct frame_identity id;
errval_t err;
err = frame_identify(dst, &id);
if (err_is_fail(err)) {
return err;
}
int32_t mem_nodeid = driverkit_hwmodel_lookup_pcibus_node_id();
return driverkit_hwmodel_get_map_conf_addr(mem_nodeid, id.base, id.bytes,
nodeid, ret_conf, ret_conf_size, ret_addr);
#endif
}
|
Black holes growing faster than expected
Black hole find Existing theories on the relationship between the size of a galaxy and its central black hole are wrong according to a new Australian study.
The discovery by Dr Nicholas Scott and Professor Alister Graham, from Melbourne's Swinburne University of Technology, found smaller galaxies have far smaller black holes than previously estimated.
Central black holes, millions to billions of times more massive than the Sun, reside in the core of most galaxies, and are thought to be integral to galactic formation and evolution.
However astronomers are still trying to understand this relationship.
Scott and Graham combined data from observatories in Chile, Hawaii and the Hubble Space Telescope, to develop a data base listing the masses of 77 galaxies and their central supermassive black holes.
The astronomers determined the mass of each central black hole by measuring how fast stars are orbiting it.
Existing theories suggest a direct ratio between the mass of a galaxy and that of its central black hole.
"This ratio worked for larger galaxies, but with improved technology we're now able to examine far smaller galaxies and the current theories don't hold up," says Scott.
In a paper to be published in the Astrophysical Journal, they found that for each ten-fold decrease in a galaxy's mass, there was a one hundred-fold decrease in its central black hole mass.
"That was a surprising result which we hadn't been anticipating," says Scott.
The study also found that smaller galaxies have far denser stellar populations near their centres than larger galaxies.
According to Scott, this also means the central black holes in smaller galaxies grow much faster than their larger counterparts.
Black holes grow by merging with other black holes when their galaxies collide.
"When large galaxies merge they double in size and so do their central black holes," says Scott.
"But when small galaxies merge their central black holes quadruple in size because of the greater densities of nearby stars to feed on."
Somewhere in between
The findings also solve the long standing problem of missing intermediate mass black holes.
For decades, scientists have been searching for something in between stellar mass black holes formed when the largest stars die, and supermassive black holes at the centre of galaxies.
"If the central black holes in smaller galaxies have lower mass than originally thought, they may represent the intermediate mass black hole population astronomers have been hunting for," says Graham.
"Intermediate sized black holes are between ten thousand and a few hundred thousand times the mass of the Sun, and we think we've found several good candidates."
"These may be big enough to be seen directly by the new generation of extremely large telescopes now being built," says Graham.
|
Sunday, March 13, 2016
Architecture is Awesome #11: Sense of Place
Pont Neuf (1872, Pierre-Auguste Renoir)
This is another in my series of posts inspired by 1000 Awesome Things, the Webby Award winning blog written by Neil Pasricha. The series is my meditation on the awesome reasons why I was and continue to be attracted to the art of architecture.
One of the “aha! moments” of my academic life was coming to realize architecture has the power to add to the physical, cultural, and social identity of places. I learned how important it is for architects to thoroughly understand how an authentic place is not anywhere but rather somewhere people have purposely invested meaning in over time. I came to understand why caring for a sense of place should be an imperative in my work.
The best places possess a strong identity and character. They help us know where we are in the world and why they are unique. They are far from placeless (that feeling “there is no there, there") because they impart a physical, emotional, and sometimes spiritual connectedness to a specific geographic area. Architects who keenly understand this will do everything they can to ensure what makes a well-loved place so is enhanced, rather than diminished, by what they add to it.
Architects are adept at analyzing and responding to the problems of a site, which include its physical attributes, context, and opportunities. Additionally though, the most thoughtful among us do consider much more in an effort to distinguish a site’s most important characteristics. These architects reveal and strengthen the spirit of the place, rather than allowing it to remain weak and undifferentiated.
The locations we all consider memorable, unique, and enjoyable are often redolent of placeness. Their protective genius loci is strong, yet contingent upon how people have used and built upon it over time. Think of the banks of the Seine in Paris, the views from which have inspired countless artists, among them Renoir and Van Gogh. Or the Piazza Navona in Rome, once an ancient stadium, later transformed as a public space and market, its history vividly layered for all to see. Closer to home, it’s hard to imagine Timberline Lodge anywhere but nestled high up the snowy south flank of Mt. Hood.
Piazza Navona, Rome
Sometimes, a singular piece of architecture not only contributes to the sense of place but is necessary to bring it to light. The Sydney Opera House is inseparable from its harbor setting but now even more so from Sydney’s consciousness. Jorn Utzon’s optimistic masterpiece transcended its infamous travails to become an unforgettable landmark and symbol for an entire nation.
We do have to be careful: Attempting to create a sense of place from whole cloth is folly; instead, we must discern and tease out the already present, most beneficial emergent properties of each site we work with. No two projects should ever be exactly alike because the countless factors influencing every one of our projects are as complex and varied as life itself. Therefore, our goal should be to build upon the distinguishing structure of each place, taking care to preserve its unique essence—its soul—when we design.
The irony of our hyper-connected digital existence today is that many of us are starved of deep engagement with others and the real world we inhabit. The automobile-centric development patterns that predate the electronic age and persist today exacerbate our isolation and the ubiquity of placelessness. Preserving and augmenting a sense of place is an antidote: the particularity of real places, the memories they help make or elicit, and the way they bring us together provide us with the kinds of genuine experiences we naturally crave as human beings.
Bringing a critical approach to designing every project means designing with place and what it means always in mind. I think it’s totally AWESOME we architects are entrusted to contribute positively to a sense of place with every project we undertake, and in the process help people truly connect with the world they live in.
Next Architecture is Awesome: #12: Ordered Complexity
No comments:
|
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* [email protected]
*/
#include "ue_context.hpp"
//------------------------------------------------------------------------------
ue_context::ue_context() {
ran_ue_ngap_id = 0;
amf_ue_ngap_id = 0;
rrc_estb_cause = {};
isUeContextRequest = false;
cgi = {};
tai = {};
pdu_sessions = {};
tmsi = 0;
}
//------------------------------------------------------------------------------
bool ue_context::find_pdu_session_context(
const std::uint8_t& session_id,
std::shared_ptr<pdu_session_context>& context) const {
std::shared_lock lock(m_pdu_session);
if (pdu_sessions.count(session_id) > 0) {
context = pdu_sessions.at(session_id);
return true;
} else {
return false;
}
}
//------------------------------------------------------------------------------
void ue_context::add_pdu_session_context(
const std::uint8_t& session_id,
const std::shared_ptr<pdu_session_context>& context) {
std::unique_lock lock(m_pdu_session);
pdu_sessions[session_id] = context;
}
void ue_context::copy_pdu_sessions(std::shared_ptr<ue_context>& ue_ctx) {
pdu_sessions = ue_ctx->pdu_sessions;
}
bool ue_context::get_pdu_sessions_context(
std::vector<std::shared_ptr<pdu_session_context>>& sessions_ctx) {
std::shared_lock lock(m_pdu_session);
for (auto s : pdu_sessions) {
sessions_ctx.push_back(s.second);
}
return true;
}
|
/*
* Copyright (C) 2015-2019 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _CAMERA3_GRAPHCONFIG_H_
#define _CAMERA3_GRAPHCONFIG_H_
#include <string>
#include <memory>
#include <vector>
#include <set>
#include <utils/Errors.h>
#include <hardware/camera3.h>
#include <gcss.h>
#include <ia_aiq.h>
#include <linux/media.h>
#include "MediaCtlPipeConfig.h"
#include "LogHelper.h"
#include "MediaController.h"
#include "IPU3CameraCapInfo.h"
namespace GCSS {
class GraphConfigNode;
}
#define NODE_NAME(x) (getNodeName(x).c_str())
namespace cros {
namespace intel {
class GraphConfigManager;
#define MAX_OUTPUT_NUM_IN_PIPE 2
#define CSI_BE_OUTPUT "csi_be:output"
const int32_t ACTIVE_ISA_OUTPUT_BUFFER = 2;
const int32_t MAX_STREAMS = 4; // max number of streams
const uint32_t MAX_KERNEL_COUNT = 30; // max number of kernels in the kernel list
// Declare string consts
const std::string CSI_BE = "ipu3-cio2 ";
const std::string GC_INPUT = "input";
const std::string GC_MAIN = "main";
const std::string GC_VF = "vf";
const std::string GC_RAW = "raw";
// pipe index, video pipe: "ipu3-imgu 0", still pipe: "ipu3-imgu 1"
#define VIDEO_PIPE_INDEX 0
#define STILL_PIPE_INDEX 1
/**
* Stream id associated with the ISA PG that runs on Psys.
*/
static const int32_t PSYS_ISA_STREAM_ID = 60002;
/**
* Stream id associated with the ISA PG that runs on Isys.
*/
static const int32_t ISYS_ISA_STREAM_ID = 0;
/**
* \struct SinkDependency
*
* This structure stores dependency information for each virtual sink.
* This information is useful to determine the connections that preceded the
* virtual sink.
* We do not go all the way up to the sensor (we could), we just store the
* terminal id of the input port of the pipeline that serves a particular sink
* (i.e. the input port of the video pipe or still pipe)
*/
struct SinkDependency {
uid_t sinkGCKey; /**< GCSS_KEY that represents a sink, like GCSS_KEY_VIDEO1 */
int32_t streamId; /**< (a.k.a pipeline id) linked to this sink (ex 60000) */
uid_t streamInputPortId; /**< 4CC code of that terminal */
SinkDependency():
sinkGCKey(0),
streamId(-1),
streamInputPortId(0) {};
};
/**
* \class GraphConfig
*
* Reference and accessor to pipe configuration for specific request.
*
* In general case, at sream-config time there are multiple possible graphs.
* Per each request there is additional intent that can narrow down the
* possibilities to single graph settings: the GraphConfig object.
*
* This class is instantiated by \class GraphConfigManager for each request,
* and passed around HAL (control unit, capture unit, processing unit) via
* shared pointers. The objects are read-only and owned by GCM.
*/
class GraphConfig {
public:
typedef GCSS::GraphConfigNode Node;
typedef std::vector<Node*> NodesPtrVector;
typedef std::vector<int32_t> StreamsVector;
typedef std::map<camera3_stream_t*, uid_t> StreamToSinkMap;
static const int32_t PORT_DIRECTION_INPUT = 0;
static const int32_t PORT_DIRECTION_OUTPUT = 1;
public:
GraphConfig();
~GraphConfig();
/*
* Convert Node to GraphConfig interface
*/
const GCSS::IGraphConfig* getInterface(Node *node) const;
const GCSS::IGraphConfig* getInterface() const;
/*
* Graph Interrogation methods
*/
status_t graphGetSinksByName(const std::string &name, NodesPtrVector &sinks);
status_t graphGetDimensionsByName(const std::string &name,
int &widht, int &height);
status_t graphGetDimensionsByName(const std::string &name,
unsigned short &widht, unsigned short &height);
/*
* Find distinct stream ids from the graph
*/
status_t graphGetStreamIds(std::vector<int32_t> &streamIds);
/*
* Sink Interrogation methods
*/
int32_t sinkGetStreamId(Node *sink);
/*
* Stream Interrogation methods
*/
status_t streamGetInputPort(int32_t streamId, Node **port);
/*
* Port Interrogation methods
*/
status_t portGetFullName(Node *port, std::string &fullName);
status_t portGetPeer(Node *port, Node **peer);
int32_t portGetDirection(Node *port);
bool portIsVirtual(Node *port);
status_t portGetPeerIdByName(std::string name,
uid_t &terminalId);
status_t getDimensions(const Node *node, int &w, int &h) const;
status_t getDimensions(const Node *node, int &w, int &h, int &l,int &t) const;
/*
* re-cycler static method
*/
static void reset(GraphConfig *me);
void fullReset();
/*
* Debugging support
*/
std::string getNodeName(Node *node);
status_t getValue(string &nodeName, uint32_t id, int &value);
bool doesNodeExist(string nodeName);
enum PipeType {
PIPE_STILL = 0,
PIPE_VIDEO,
PIPE_MAX
};
PipeType getPipeType() const { return mPipeType; }
void setPipeType(PipeType type) { mPipeType = type; }
bool isStillPipe() { return mPipeType == PIPE_STILL; }
public:
void setMediaCtlConfig(std::shared_ptr<MediaController> mediaCtl,
bool enableStill);
private:
/* Helper structures to access Sensor Node information easily */
class Rectangle {
public:
Rectangle();
int32_t w; /*<! width */
int32_t h; /*<! height */
int32_t t; /*<! top */
int32_t l; /*<! left */
};
struct MediaCtlLut {
string uidStr;
uint32_t uid;
int pad;
string nodeName;
int ipuNodeName;
};
class SubdevPad: public Rectangle {
public:
SubdevPad();
int32_t mbusFormat;
};
struct BinFactor {
int32_t h;
int32_t v;
};
struct ScaleFactor {
int32_t num;
int32_t denom;
};
union RcFactor { // Resolution Changing factor
BinFactor bin;
ScaleFactor scale;
};
struct SubdevInfo {
string name;
SubdevPad in;
SubdevPad out;
RcFactor factor;
};
class SourceNodeInfo {
public:
SourceNodeInfo();
string name;
string i2cAddress;
string modeId;
bool metadataEnabled;
string csiPort;
string nativeBayer;
SubdevInfo tpg;
SubdevInfo pa;
SubdevPad output;
int32_t interlaced;
string verticalFlip;
string horizontalFlip;
string link_freq;
};
friend class GraphConfigManager;
// Private initializer: only used by our friend GraphConfigManager.
void init(int32_t reqId);
status_t prepare(Node *settings,
StreamToSinkMap &streamToSinkIdMap);
status_t analyzeSourceType();
void calculateSinkDependencies();
void storeTuningModes();
/*
* Helpers for constructing mediaCtlConfigs from graph config
*/
status_t parseSensorNodeInfo(Node* sensorNode, SourceNodeInfo &info);
status_t getCio2MediaCtlData(int *cio2Format, MediaCtlConfig* mediaCtlConfig);
status_t getImguMediaCtlData(int32_t cameraId,
int cio2Format,
int32_t testPatternMode,
bool enableStill,
MediaCtlConfig* mediaCtlConfig);
status_t addControls(const Node *sensorNode,
const SourceNodeInfo &sensorInfo,
MediaCtlConfig* config);
void addVideoNodes(MediaCtlConfig *config);
void addImguVideoNode(int ipuNodeName, const string& nodeName, MediaCtlConfig* config);
status_t getBinningFactor(const Node *node,
int32_t &hBin, int32_t &vBin) const;
status_t getScalingFactor(const Node *node,
int32_t &scalingNum,
int32_t &scalingDenom) const;
void addCtlParams(const string &entityName,
uint32_t controlName,
int controlId,
const string &strValue,
MediaCtlConfig* config);
void addFormatParams(const string &entityName,
int width,
int height,
int pad,
int formatCode,
int field,
MediaCtlConfig* config);
void addLinkParams(const string &srcName,
int srcPad,
const string &sinkName,
int sinkPad,
int enable,
int flags,
MediaCtlConfig* config);
void addSelectionParams(const string &entityName,
int width,
int height,
int left,
int top,
int target,
int pad,
MediaCtlConfig* config);
void addSelectionVideoParams(const string &entityName,
const struct v4l2_subdev_selection &select,
MediaCtlConfig* config);
status_t getNodeInfo(const ia_uid uid, const Node &parent, int *width, int *height);
void dumpMediaCtlConfig(const MediaCtlConfig &config) const;
// Private helpers for port nodes
status_t portGetFourCCInfo(Node &portNode,
uint32_t &stageId, uint32_t &terminalId);
// Format options methods
status_t getActiveOutputPorts(
const StreamToSinkMap &streamToSinkIdMap);
Node *getOutputPortForSink(const std::string &sinkName);
public:
// Imgu used from ParameterWorker
status_t getSensorFrameParams(ia_aiq_frame_params &sensorFrameParams);
private:
// Disable copy constructor and assignment operator
GraphConfig(const GraphConfig &);
GraphConfig& operator=(const GraphConfig &);
private:
GCSS::GraphConfigNode *mSettings;
int32_t mReqId;
std::map<int32_t, size_t> mKernelCountsMap; // key is stream id
PipeType mPipeType;
enum SourceType {
SRC_NONE = 0,
SRC_SENSOR,
SRC_TPG,
};
SourceType mSourceType;
/**
* pre-computed state done *per request*.
* This map holds the terminal id's of the ISA's peer ports (this is
* the terminal id's of the input port of the video or still pipe)
* that are required to fulfill a request.
* Ideally this gets initialized during init() call.
* But for now the GcManager will set it via a private method.
* we use a map so that we can handle the case when a request has 2 buffers
* that are generated from the same pipe.
*/
std::map<uid_t, uid_t> mIsaActiveDestinations;
/**
* vector holding the peers to the sink nodes. Map contains pairs of
* {sink, peer}.
* This map is filled at stream config time.
*/
std::map<Node*, Node*> mSinkPeerPort;
/**
*copy of the map provided from GraphConfigManager to be used internally.
*/
StreamToSinkMap mStreamToSinkIdMap;
/**
* Map of tuning modes per stream id
* Key: stream id
* Value: tuning mode
*/
std::map<int32_t, int32_t> mStream2TuningMap;
std::string mCSIBE;
std::shared_ptr<MediaController> mMediaCtl;
std::vector<MediaCtlLut> mLut;
};
} // namespace intel
} // namespace cros
#endif
|
/*
* This file is part of https://github.com/martinruenz/maskfusion
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>
*/
#include <list>
#include <tuple>
#include "../Model/Model.h"
#include "PreSegmentation.h"
PreSegmentation::PreSegmentation(){}
PreSegmentation::~PreSegmentation() {}
SegmentationResult PreSegmentation::performSegmentation(std::list<std::shared_ptr<Model> > &models,
FrameDataPointer frame,
unsigned char nextModelID,
bool allowNew){
assert(frame->mask.type() == CV_8UC1);
assert(frame->mask.isContinuous());
static std::vector<unsigned char> mapping(256, 0); // FIXME
SegmentationResult result;
result.hasNewLabel = false;
result.fullSegmentation = cv::Mat::zeros(frame->mask.rows, frame->mask.cols, CV_8UC1);
unsigned char modelIdToIndex[256];
unsigned char mIndex = 0;
for (auto m : models) modelIdToIndex[m->getID()] = mIndex++;
modelIdToIndex[nextModelID] = mIndex;
std::vector<unsigned> outIdsArray(256, 0); // Should be faster than using a set
// Replace unseen with zeroes (except new label)
for (unsigned i = 0; i < frame->mask.total(); i++) {
unsigned char& vIn = frame->mask.data[i];
if (vIn) {
unsigned char& vOut = result.fullSegmentation.data[i];
if (mapping[vIn] != 0) {
vOut = mapping[vIn];
outIdsArray[vOut]++;
;
} else if (allowNew && !result.hasNewLabel) {
vOut = nextModelID;
mapping[vIn] = nextModelID;
result.hasNewLabel = true;
outIdsArray[vOut]++;
}
} else {
outIdsArray[0]++;
}
}
for (ModelListIterator m = models.begin(); m != models.end(); m++)
result.modelData.push_back({(*m)->getID(), m, cv::Mat(), cv::Mat(), outIdsArray[(*m)->getID()] / (16 * 16), 0.4});
if (result.hasNewLabel)
result.modelData.push_back({nextModelID, ModelListIterator(), cv::Mat(), cv::Mat(),
unsigned(std::max((float)(outIdsArray[nextModelID] / (16 * 16)), 1.0f)), 0.4});
std::vector<unsigned> cnts(result.modelData.size(), 0);
for (unsigned i = 0; i < frame->mask.total(); i++) {
const size_t index = modelIdToIndex[result.fullSegmentation.data[i]];
result.modelData[index].depthMean += ((const float*)frame->depth.data)[i];
cnts[index]++;
}
for (size_t index = 0; index < result.modelData.size(); ++index) result.modelData[index].depthMean /= cnts[index] ? cnts[index] : 1;
for (unsigned i = 0; i < frame->mask.total(); i++) {
const size_t index = modelIdToIndex[result.fullSegmentation.data[i]];
result.modelData[index].depthStd += std::abs(result.modelData[index].depthMean - ((const float*)frame->depth.data)[i]);
}
for (size_t iindex = 0; iindex < result.modelData.size(); ++iindex)
result.modelData[iindex].depthStd /= cnts[iindex] ? cnts[iindex] : 1;
return result;
}
|
Hoodoos may be seismic gurus
Hoodoo prediction Towering chimney-like sedimentary rock spires known as hoodoos may provide an indication of an area's past earthquake activity.
The research by scientists including Dr Rasool Anooshehpoor, from the United States Nuclear Regulatory Commission, may provide scientists with a new tool to test the accuracy of current hazard models.
Hoodoo formations are often found in desert regions, and are common in North America, the Middle East and northern Africa.
They are caused by the uneven weathering of different layers of sedimentary rocks, that leave boulders or thin caps of hard rock perched on softer rock.
By knowing the strengths of different types of sedimentary layers, scientists can determine the amount of stress needed to cause those rocks to fracture.
The United States Geological Survey (USGS) use seismic hazard models to predict the type of ground motion likely to occur in an area during a seismic event. But, according to Anooshehpoor, these models lack long term data.
"Existing hazard maps use models based on scant data going back a hundred years or so," says Anooshehpoor. "But earthquakes have return periods lasting hundreds or thousands of years, so there is nothing to test these hazard models against."
The researchers examined two unfractured hoodoos within a few kilometres of the Garlock fault, which is an active strike-slip fault zone in California's Red Rock Canyon.
Their findings are reported in the Bulletin of the Seismological Society of America.
"Although we can't put a precise age on hoodoos because of their erosion characteristics, we can use them to provide physical limits on the level of ground shaking that could potentially have occurred in the area," says Anooshehpoor.
The researchers developed a three-dimensional model of each hoodoo and determined the most likely place where each spire would fail in an earthquake.
They then tested rock samples similar to the hoodoo pillars to measure their tensile strength and compared their results with previously published data.
USGS records suggest at least one large magnitude earthquake occurred along the fault in the last 550 years, resulting in seven metres of slip, yet the hoodoos are still standing.
This finding is consistent with a median level of ground motion associated with the large quakes in this region, says Anooshehpoor.
"If an earthquake occurred with a higher level of ground motion, the hoodoos would have collapsed," he says.
"Nobody can predict earthquakes, but this will help predict what ground motions are associated with these earthquakes when they happen."
Dr Juan Carlos Afonso from the Department of Earth and Planetary Sciences at Sydney's Macquarie University says it's an exciting development.
"In seismic hazard studies, it's not just difficult to cover the entire planet, it's hard to cover even small active regions near populated areas," says Afonso.
"You need lots of instruments, so it's great if you can rely on nature and natural objects to help you."
He says while the work is still very new and needs to be proven, the physics seems sound.
|
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <string.h>
#include <algorithm>
#include <queue>
#include <stack>
using namespace std;
struct SOL {
long long L, W;
SOL(long long a = 0, long long b = 0):
L(a), W(b) {}
bool operator<(const SOL &x) const {
if (L * W != x.L * x.W)
return L * W < x.L * x.W;
return llabs(L - W) < llabs(x.L - x.W);
}
};
int main() {
int testcase;
long long N;
scanf("%d", &testcase);
while (testcase--) {
scanf("%lld", &N);
N = N/5 + (N%5 != 0);
SOL best;
for (long long i = 1; i * i <= N; i++) {
SOL t(i * 44 + 4, (N/i + (N%i != 0)) * 10 + 2);
if (i == 1)
best = t;
else
best = min(best, t);
}
if (best.L < best.W) {
swap(best.L, best.W);
}
printf("%lld X %lld = %lld\n", best.L, best.W, best.L * best.W);
}
return 0;
}
/*
6
1
15
22
29
36
43
*/
|
Now, it is common knowledge these days that Hitler's final great offensive in the last years of WWII was the Ardennes Offensive of 1944/45, also known as the battle of the Bulge. What was not appreciated at the time by the Allied high command was just how desperately short of vital supplies the Third Reich armies actually were. The Ardennes Offensive was Hitler's bold attempt to capture and hold the Allied army's massive supply of Brussels sprouts, vital - of course - for the full functioning of any army.
German intelligence were aware that the American army was - in particular - massing huge quantities of the vital Brussels sprouts just behind their frontlines in preparedness for their own massive push - and - of course - in time for Christmas.
The German's audacious plan would have succeeded if the Allies had not quickly worked out that it was their stockpiles of Brussels sprouts that were under immediate threat. The bold plan put forward by the Allied Generals was a heavy gamble, but it paid off. They ordered their front-line chefs to begin boiling their entire stocks of Brussels sprouts, and - most importantly - to keep them boiling well past a state of fully preparedness.
So, when the weather altered and the wind direction changed, it blew the smell of over-cooked Brussels sprouts straight into the faces of the advancing Germans. Then the Reich troops knew that they would not be able to replenish their stocks of Brussels sprouts and any sprouts that they did capture from the Allied frontline kitchens would be overcooked to the point of inedibility.
Later in this series, we will discuss the major strategic role that Brussels sprouts have played in world history, such as Hadrian building a wall to protect the Roman Empire's most northern supplies of Brussels sprouts from the northern barbarians, thus thwarting the barbarian's fiendish plan to deep-fry the Roman's entire stockpiles of sprouts.
Then there was, also, Napoleon's retreat from Moscow when his over-long supply line of Brussels sprouts direct from France broke down. Even when his troops could get sprouts, they were of poor quality - dry, wizened and frozen solid. Of course, this led to a massive collapse of morale. Eventually, the lack of good quality sprouts forced a massive retreat where thousands of French troops died from a pitiful lack of sprouts.
And, of course, not forgetting - of course - how the Spanish conquest of the Americas was a result of the Spaniards overwhelming sprout superiority.
|
How the States Can End Real ID
by Thomas Andrew Olson,
As of this writing, only a handful of states have formally resisted implementation of the draconian REAL-ID act, where the Feds create a de facto national ID card by hijacking the driver licensing agencies of all 50 states. Despite the chilling “papers, please!” overtones to this, some states are falling into line like so many obedient sheep, while the majority have resorted to sending the Department of Homeland Security a letter of intent to comply, which extends them another year or so of lead time before the mandate finally kicks in. Of course that path only legitimizes the law, as opposed to standing up to the Feds and declaring the law the unconstitutional usurpation that it is.
DHS head Michael “Skeletor” Chertoff has made it clear that starting next year the residents of Montana, Maine, et al. will find it impossible to board an aircraft or enter a Federal building unless their state legislatures and governors cave in to his demands.
There is a third way, however. It’s simple, doable, and one that is guaranteed to stop REAL-ID in its tracks. Every state can do it. Its only drawback is that state governments will have to give up certain entrenched powers that they have arrogated to themselves for decades.
|
Wednesday, 25 January 2017
Highgate Cemetery by Miranda Miller
At the very end of 2016, when the year itself seemed exhausted by its own historical weight, I visited Highgate cemetery with Britta, a friend who grew up in East Berlin. On a frosty sunny morning it was a beautiful hillside park as well as place to contemplate. Those great Victorian cemeteries were inspired by Pere La Chaise in Paris.The first part to open, in 1839, was the West Cemetery, which is on your right as you walk down Swain’s Lane from Highgate. You have to make an appointment to go there but it’s well worth visiting with its Egyptian Avenue, Lebanon Circle, Terrace Catacombs and remarkable plants and wild life. Volunteers cut back the vegetation so that it is romantic but still passable and they also study the foxes, hedgehogs butterflies and other rare insects.
Further down Swain's Lane on the right you come to the East cemetery, which costs £4 to enter and still attracts people from all over the world. Since 1975 both cemeteries have been run by a charity, the Friends of Highgate Cemetery. John Betjeman described it as a ‘Victorian Valhalla’ .These grand Victorian necropoli were built with high walls and locked gates to keep out the Resurrection Men but they were always intended to be parks as well. Once it was beautifully manicured but now it is its wildness that makes it charming and romantic. Douglas Adams, George Eliot, several of Charles Dickens’ children and his wife Catherine, Paul Foot, Eric Hobsbawn, Anna Mahler, Sidney Nolan, Peter Porter, Ralph Richardson Alan Sillitoe, Herbert Spencer Leslie Stephen and Max Wall are all buried here. Many of the less famous graves are very touching; there is an area dedicated to London firemen and some of the epitaphs on the graves of forgotten people read like short stories. For instance: “Emma Wallace Gray Died in October 1854 in the 19th year of her age...From the effects of fire, her dress having accidentally ignited ten days previously. In bloom of youth, when others fondly cling to life, I prayed, mid agonies of death.”
Karl Marx (1818-1883) upstages all his subterranean neighbours. The morning we were there a constant flow of international visitors surrounded his monument. He had been expelled from both Cologne and Paris because of his political activities before settling in London in 1849. "From this time on he was one of the leaders of the socialist party in Europe, and in 1865 he became its acknowledged chief" ( to quote from his obituary). He was laid to rest in the same grave as his wife Jenny, who had died less than a year and a half before him. Eleven people attended his funeral, including his friend Engels. Other members of his family were later buried in the same grave, including his daughter Eleanor, known as Tussy. She was a courageous supporter of the early Trades Unions who poisoned herself in 1898 after discovering that her partner, Edward Aveling, had secretly married a young actress.
As the years passed so many people came to visit Marx’s grave that it was moved to a more accessible spot, on the main path. The present grandiose marble monument was unveiled 73 years after his death, in 1956, in a ceremony attended by about 200 people, “ to honour the memory of a man whose spirit - if that is the right word - now dominates approximately half the world.” (as reported in The Guardian the following day). One of his most famous quotations is carved on it: ”The philosophers have only interpreted the world in various ways - the point however is to change it.” Laurence Bradshaw, the sculptor, said he aimed to express the "dynamic force of his intellect" and wanted the sculptured likeness to be at eye-level rather than "towering over the people."
Everybody who lived through the mid -20th century has their own Marx. At 15 I was a Young Communist for a few months and attended earnest discussion groups about his writings in West Kensington. A few years later I was taught history by academics who saw the world in terms of a Marxist interpretation. For some he was a demon, for others an omniscient prophet. Britta, my companion the day I visited the cemetery, is nostalgic for the GDR she lived in until she was in her late thirties. After the wall came down in 1989 there was a long public debate in Germany about what to do with the monuments and place names of Communism. There’s a striking scene in the 2003 film 'Good Bye, Lenin!' where the huge Lenin statue is lifted up by helicopter and flies off over the city, pointing as it goes. Finally, Marx was accepted as a philosopher and the grand boulevard round the corner from her flat is still called Karl - Marx - Allee. She tells me sadly that her grandchildren are taught at school that he was worse than Hitler.
At the moment, with socialism in crisis, you don’t hear much about Marx in England. Above the gigantic hairy bronze head hovers a large (if invisible) question mark. What does Marx mean to us now? He would not necessarily have recognised his own ideas in the uses that were made of them after he died. In 1882 he wrote in a letter of the form of 'Marxism' which arose in France: “If anything is certain, it is that I myself am not a Marxist.”
Last year there was a three-part Open University /BBC co-production for BBC Four called Genius of the Modern World. Bettany Hughes explored the life and works of Karl Marx, Friedrich Nietzsche and Sigmund Freud. “We might not realise it, but we all live with a 19th-century male philosopher in our lives. Karl Marx, Friedrich Nietzsche and Sigmund Freud are towering thinkers, men with the wit and the will to question the status quo.” Nobel laurate Paul Krugman wrote recently that when thinking about automation and the future of labor, he worries that "it has echoes of old-fashioned Marxism – which shouldn't be a reason to ignore facts, but too often is."
At his best Marx was such a powerful writer that it seems likely that people will always be influenced by him. He wrote that “ Capital is dead labor, which, vampire-like, lives only by sucking living labor, and lives the more, the more labor it sucks.” Another quotation that has great resonance for me at this time of crisis in our democracy is: “The oppressed are allowed once every few years to decide which particular representatives of the oppressing class are to represent and repress them.”
Susan Price said...
He saw very clearly, his own time and ours, didn't he?
Leslie Wilson said...
Yes, very perceptive and cynical of Marx. Great blog, Miranda!
Penny Dolan said...
Thanks, Miranda - and for reminding me that I must go there next time I'm in London.
Miranda Miller said...
Thanks for all your comments. I should have mentioned that there are interesting events in the beautiful Victorian chapel. I just went to a talk about Max wall and in May there's a Karl Marx Memorial Lecture there.
|
#pragma once
#include "Agent.h"
#include "Gun.h"
#include "Bag.h"
class Human : public Agent
{
friend class PathFollowingAI;
public:
Human();
virtual void init(float x, float y, float radius, NS2::GLTexture& texture, glm::ivec2& texDims, AI* ai);
virtual void init(float x, float y, float radius, NS2::GLTexture& texture, glm::ivec2& texDims, NS2::ColorRGBA8& color,
glm::vec2& direction, std::string name, float animSpeed, AI* ai, float speed, float health);
virtual void draw(NS2::SpriteBatch& spriteBatch) override;
virtual void draw(NS2::DebugRenderer& debugRender, NS2::ColorRGBA8& color) override;
virtual void update(Level& level, float deltaTime) override;
virtual void applyDamage(Level& level, float damage) override;
void setEquipWeapon(Gun* gun){ m_weapon = gun; }
void setEquipAmmo(Ammo* ammo){ m_ammo = ammo; }
void setEquipObject(Object* object){ m_object = object; }
Gun* getEquipWeapon(){ return m_weapon; }
Ammo* geEquipAmmo(){ return m_ammo; }
Object* getEquipObject(){ return m_object; }
protected:
float m_invTime; // Invincible time
float m_invCurrentTime;
NS2::ColorRGBA8 m_invColor;
Bag m_bag;
Gun* m_weapon;
Ammo* m_ammo;
Object* m_object;
};
|
Books Yellow, Red, and Green and Blue,
All true, or just as good as true,
And here's the Blue Book just for YOU!
Hard is the path from A to Z,
And puzzling to a curly head,
Yet leads to Books—Green, Yellow and Red.
For every child should understand
That letters from the first were planned
To guide us into Fairy Land
So labour at your Alphabet,
For by that learning shall you get
To lands where Fairies may be met.
And going where this pathway goes,
You too, at last, may find, who knows?
The Garden of the Singing Rose.
As to whether there are really any fairies or not, that is a difficult question. The Editor never saw any himself, but he knew several people who have seen them-in the Highlands-and heard their music.
If ever you are in Nether Lochaber, go to the Fairy Hill, and you may hear the music your-self, as grown-up people have done, but you must go on a fine day.
This book has been especially re-published to raise funds for:
The Great Ormond Street Hospital Children’s Charity
By buying this book you will be donating to this great charity that does so much good for ill children and which also enables families to stay together in times of crisis. And what better way to help children than to buy a book of fairy tales. Some have not been seen in print or heard for over a century. 33% of the Publisher’s profit from the sale of this book will be donated to the GOSH Children’s Charity.
YESTERDAYS BOOKS for TODAYS CHARITIES
LITTLE RED RIDING HOOD
Once upon a time there lived in a certain village a little country girl, the prettiest creature was ever seen. Her mother was excessively fond of her; and her grandmother doted on her still more. This good woman had made for her a little red riding-hood; which became the girl so extremely well that everybody called her Little Red Riding-Hood.
One day her mother, having made some custards, said to her:
"Go, my dear, and see how thy grandmamma does, for I hear she has been very ill; carry her a custard, and this little pot of butter."
Little Red Riding-Hood set out immediately to go to her grandmother, who lived in another village.
As she was going through the wood, she met with Gaffer Wolf, who had a very great mind to eat her up, but he dared not, because of some faggot-makers hard by in the forest. He asked her whither she was going. The poor child, who did not know that it was dangerous to stay and hear a wolf talk, said to him:
"I am going to see my grandmamma and carry her a custard and a little pot of butter from my mamma."
"Does she live far off?" said the Wolf.
"Oh! aye," answered Little Red Riding-Hood; "it is beyond that mill you see there, at the first house in the village."
"Well," said the Wolf, "and I'll go and see her too. I'll go this way and you go that, and we shall see who will be there soonest."
The Wolf began to run as fast as he could, taking the nearest way, and the little girl went by that farthest about, diverting herself in gathering nuts, running after butterflies, and making nosegays of such little flowers as she met with. The Wolf was not long before he got to the old woman's house. He knocked at the door—tap, tap.
"Your grandchild, Little Red Riding-Hood," replied the Wolf, counterfeiting her voice; "who has brought you a custard and a little pot of butter sent you by mamma."
The good grandmother, who was in bed, because she was somewhat ill, cried out:
"Pull the bobbin, and the latch will go up."The Wolf pulled the bobbin, and the door opened, and then presently he fell upon the good woman and ate her up in a moment, for it was above three days that he had not touched a bit. He then shut the door and went into the grandmother's bed, expecting Little Red Riding-Hood, who came some time afterward and knocked at the door—tap, tap.
Little Red Riding-Hood, hearing the big voice of the Wolf, was at first afraid; but believing her grandmother had got a cold and was hoarse, answered:
"’Tis your grandchild, Little Red Riding-Hood, who has brought you a custard and a little pot of butter mamma sends you."
The Wolf cried out to her, softening his voice as much as he could:
"Pull the bobbin, and the latch will go up."
Little Red Riding-Hood pulled the bobbin, and the door opened.
The Wolf, seeing her come in, said to her, hiding himself under the bed-clothes:
"Put the custard and the little pot of butter upon the stool, and come and lie down with me."
Little Red Riding-Hood undressed herself and went into bed, where, being greatly amazed to see how her grandmother looked in her night-clothes, she said to her:
"Grandmamma, what great arms you have got!"
"That is the better to hug thee, my dear."
"Grandmamma, what great legs you have got!"
"That is to run the better, my child."
"Grandmamma, what great ears you have got!"
"That is to hear the better, my child."
"Grandmamma, what great eyes you have got!"
"It is to see the better, my child."
"Grandmamma, what great teeth you have got!"
"That is to eat thee up."
And, saying these words, this wicked wolf fell upon Little Red Riding-Hood, and tried to start eating her. Red Riding Hood screamed “Someone Help Me!” over and over again.
The woodcutter, who was felling trees nearby, heard Red Riding Hood’s screams for help and ran to the cottage. He burst in to find the wolf trying to eat Red Riding Hood.
He swung his axe, and with one blow killed the bad wolf for which Red Riding Hood was ever so grateful.
Great Book! Really interesting read! Was great to see a published version of Jewish tales! Arrived very quickly too - great service!
A thrilling book about a chase across the US! A great story, my son loved it! Quick and Convenient delivery!
Stories of the famous spice route across Asia! Great to see a volume of Phillipine Folklore Stories in Print, only one I've found on the web!
We deliver to destinations all over the world, and here at Abela, we have some of the best rates in the book industry.
We charge shipping dependant on the book you have ordered and where in the world you are ordering from. This will be shown below the price of the book.
The delivery time is typically dependant on where in the world you are ordering from, Should you need a estimated delivery time, please do not hesitate to contact us.
We pride ourselves on the quality of our packaging and damage rates are very low. In the unlikely event there is damage please contact us before returning your item, as you may have to pay for return shipping, if you have not let us know.
Due to the nature of books being read then returned for a refund, unfortunately we do not accept returns unless the item is damaged and we are notified ON THE DAY OF DELIVERY.
|
/*
Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#pragma once
#include "engine/gems/geometry/pinhole.hpp"
#include "messages/camera.capnp.h"
#include "messages/image.hpp"
#include "messages/math.hpp"
namespace isaac {
// Reads a pinhole model from PinholeProto
inline geometry::PinholeD FromProto(::PinholeProto::Reader reader) {
geometry::PinholeD pinhole;
pinhole.dimensions = {reader.getRows(), reader.getCols()};
pinhole.focal = FromProto(reader.getFocal());
pinhole.center = FromProto(reader.getCenter());
return pinhole;
}
// Writes a pinhole model to PinholeProto
inline void ToProto(const geometry::PinholeD& pinhole, ::PinholeProto::Builder builder) {
builder.setRows(pinhole.dimensions[0]);
builder.setCols(pinhole.dimensions[1]);
ToProto(pinhole.focal, builder.initFocal());
ToProto(pinhole.center, builder.initCenter());
}
} // namespace isaac
|
Old age related health problems
Old age is the last phase of a human’s life cycle. Old age is certainly bittersweet. As people gain wealth and respect in society and the burden of work and responsibilities reduce, most transition to a life of relaxation and enjoyment. However, seniority brings with it its own set of challenges in the form of geriatric health issues AKA old age problems. As the body begins to weaken, the natural immunity goes down as well, making otherwise healthy individuals prone to falling sick very easily. Old people have slower regenerative prowess and are easily vulnerable to disease, syndromes, and sickness as compared to younger adults. According to Wikipedia, ” The organic process of ageing is called senescence, the medical study of the aging process is called gerontology, and the study of diseases that afflict the elderly is called geriatrics. The elderly also face other social issues around retirement, loneliness, and ageism.”
There are also a host of issues that arise out of old age, which are the ones that we typically witness in our parents and senior individuals. These old age problems can be physiological, psychological, social, emotional and financial. The deterioration of both physical abilities as mental faculties are synonymous with old age. The manner in which one ages and well as the pace of the aging process is dependent on his/her lifestyle, the hereditary constitution of the individual as well as external, environmental factors.
Here are some of old age problems which are related to health.
Sleep Apnea
Sleep apnea causes people to temporarily pause breathing during sleep. These pauses can range from a few seconds to a few minutes, causing imperceptible oxygen deprivation in the body and brain. This is shown to be a very common incident in senior individuals and requires immediate attention. While most of the time sleep apnea causes the person to wake up as a result of obstructed breathing and resume proper respiration thereafter, cases have been reported where sleep apnea caused death by asphyxiation because the subject failed to wake up.
Arthritis is an affliction of the joints that causes the loss of the cartilage and bone in movable joints of the skeleton. Old age is one of the major causes of this form of arthritis, also sometimes called osteoarthritis. The condition manifests in joint pain, immobilization of joints or sometimes severely reducing the range of motion of the joint. Arthritis is most effectively dealt with in its early stages so do not be dismissive if they complain about joint pain. A simple joint pain might be indicative of a more serious situation.
Dementia is often used as an umbrella term for a broad range of diseases affecting the brain. Dementia is a long term phenomenon and causes gradual loss of memory and cognitive ability. It can also give rise to emotional instability, mood swings, language issues, and lethargy. Dementia is most often characterized by Alzheimer’s disease. While dementia has no cure, early diagnosis can lead to an increase in quality of life with the help of symptomatic treatment. Use of assistive technology can also help a lot in allowing your loved ones to live the final years of their lives in peace and dignity.
High Cholesterol
Cholesterol plays a crucial part in the healthy operation of our body. Elevated levels of cholesterol pose a significant health risk, primarily of heart disease. High cholesterol can be caused by bad diet, obesity or other diseases such as diabetes but requires immediate redressal before the situation worsens.
Farsightedness, also called hypermetropia, is caused by the eye being unable to focus on objects at near distances. This is caused primarily by the ciliary muscles in the eye being unable to change the lens enough to view nearby objects clearly. One of the most obvious signs of the onset of age, hypermetropia is easily corrected with the use of glasses or lenses. However, until that is done, people from hypermetropia can suffer from blurred vision, headaches, dizziness, and discombobulation. Early diagnosis of hypermetropia as distinct from general tiredness of the eye is essential.
High Blood Pressure
High blood pressure, also called hypertension, is the sustained elevation of blood pressure in arteries, the blood vessels that carry freshly oxygenated blood from the heart to the various parts of the body. High blood pressure significantly increases the chances of coronary heart disease, stroke, loss of vision, and kidney failure. High blood pressure is one of the most significant ailments found in senior citizens and requires consistent and regular medication to keep it in check to maintain health and normal living standards.
Apart from the aforementioned old age problems, the issues of economic insecurity and isolation plague senior citizens. They need all the love and care in the world at this phase of life and it is up to us children to repay their love.
Next articleMuse of the Month- Rujuta Diwekar
Mousumi Gharami is an experienced writer, who applies her creative thoughts and makes the best approaches to cover all the essential points of a specific topic. She is capable enough to manage writing assignments of all sorts and has succeeded in earning loads of success and acheivements within a short span of time, and all credit goes to her hardwork, honesty and dedication.
1. This is very attention-grabbing, You are an overly professional blogger. I’ve joined your rss feed and sit up for in the hunt for more of your excellent post. Also, I’ve shared your web site in my social networks!
|
Monday, 19 January 2015
Anti-Racism Causes Racism!
It can be argued that zealous and fanatical anti-racism is doing more than almost anything else to contribute to racism in the United Kingdom and United States. To put that in very basic terms, one of the biggest contributors to racism today may very well be anti-racism policies and statements.
Almost every single day someone or other is put before an anti-racist inquisition or a new - even stricter - law is decreed to fight racism.
Anti-racism has now become another revolution that's eating its own children.
What we have with much of today's anti-racism is the same kind of absurdity and extremity which often happened during various historical inquisitions. More specifically, anti-racism is just like the many other political movements that, in time, became corrupted.
Many anti-racists also feel the need to to justify their existence and legitimacy by becoming more and more pure (i.e. extreme). And, as a consequence, they will also need to find new targets – more evil racists - to reprimand or even punish.
What partly contributes to all this is that a minority of Leftist activists (though often highly-influential people in the law, councils, academia, etc.) are attempting to create a “revolutionary situation” by deliberately making anti-racism policies and actions more extreme. Thus, in the process, these Leftists - along with their words and actions - are alienating people who aren't otherwise racist. Such Leftists think that the violence, turmoil or even civil conflict that their words and policies create may be utilised to benefit their own primary cause: revolutionary socialism or the “progressive future”. Thus they see what they're doing as tapping into anti-racism's revolutionary/radical potential. (These very same Leftists also - to use their own words - “tap into the revolutionary potential of Muslims”.)
The fight against racism, then, is but a means to a revolutionary or radical end.
Let's just take two examples – from a multitude - to begin with.
Think of Rotherham (UK) Council's anti-racism policies and how they resulted in fifteen years or more of unchecked Muslim sexual-grooming; which, all in all, claimed over 1,400 young victims.
In terms of the United States, think of Ben Affleck's mindless belief that the criticism of Islam equals racism.
I know that that many people are more or less being goaded into racism as a direct reaction to the extreme bullshit, zealotry and prejudice (yes, prejudice) that's coming – every day - from countless professional anti-racists (whether in politics, the law, academia or wherever).
Of course the partisans of anti-racism will simply say that such people were racist all along. After all, only the pious Leftists of this world are truly untainted by the sin of racism.
It's often as if many – or at least some - anti-racists are trying to prove their own non-racist purity by citing even more perverse and ridiculous examples of what they take to be racism. Is this because they are themselves racists? Is it because many – or at least some - anti-racists have racist thoughts?
Like their National Socialist (Nazi) counterparts, when such pious anti-racists see a person they immediately note his or her skin colour (i.e., if the skin isn't white). And that changes everything for them. They will automatically see that person as being “oppressed” or as an endless victim of racism (somehow and somewhen). Or, alternatively, as exotica to be patronised - or condescended to - in an orgy of positive Orientalism.
Therefore in order to assuage the guilt they feel about their own negative and positive racism, these puritanical Leftists project their racist thoughts into the minds and words of other people. (This is called “psychological projection” in the psychological literature.)
But really....?
The obvious riposte to what I'm arguing, then, will either be that I'm rationalising/justifying racism or that I'm a racist myself. But that response would itself display the very problem that's being highlighted: mindless and zealous anti-racism.
It can also be said that it's perfectly acceptable to say that people should – and do - react to zealous and absurd anti-racism. However, to also argue that anti-racism can actually have racism as a consequence is surely a different thing entirely.
Let's put it this way.
If anti-racist activists continuously muddy the water between genuine racism and fictional/possible racism, then surely others will do so too. That means that if things which aren't in fact racist are constantly being classed “racist”, then other people may give up on trying to make such distinctions too.
Take this example.
If the very act of giving a comedic representation of any ethnic minority individual is deemed racist (which it nearly always is), then people might have started to think that many genuine cases of racism (as put forwards by professional anti-racists and others) are bogus too. Or at least many might have developed a disposition to think that way precisely because Leftists are ceaselessly muddying the waters in order to advance political/personal objectives which have very little to do with the fight against racism.
In any case, I'm not saying that anti-racism causes all racism. I'm not even saying that extreme examples of anti-racism cause all racism. I'm simply saying that certain strands of anti-racism (e.g., ones based on Marxist and other arcane political theories) may well be responsible for much racism.
After all, after thirty or more years of outright political correctness and Leftist indoctrination (in schools, universities, council chambers, public libraries, buses, the BBC), many people are claiming that racism is still a big problem or even that it's getting worse. So have such people ever thought – for even one moment - that racism may be getting worse precisely because of thirty years of political correctness and sanctimonious anti-racism?
The constant barrage from councils, (Leftist) lawyers, rights 'n' race groups, police bodies, councillors, council workers, politicians, etc. against whites people and against English/American identity causes racism. And indeed even if there is no such thing as “English/American identity”, it's still not the place of a Marxist/Leftist academic (at Neasden University or the LSE) to decide on that and then try to get politicians and the legal system to legislate accordingly.
So where does all this leave the British and American people?
It leaves millions of people in a state in which any criticism of Islam, Muslims, immigration, Pakistani council corruption, black criminality and violence, etc. can't even made made - let alone acted upon. A state in which the people have effectively been silenced on some of the most important issues of the day.
More specifically, these professional and political anti-racists know that the silencing of the British/American people about Islam and immigration, for example, will eventually help destabilise society (as referred to earlier). And in such a destabilised society it will be easier to create (so they think) a revolutionary situation out of which - like a phoenix rising from the ashes - a new Leftist/progressive state and society can be created. From such chaos and inter-communal conflict, the Left's “fairer and better society” will somehow be formed.
Large parts of the Left also think, for example, that the large-scale criminalising of free speech – the Gulag without walls - will somehow stop or end racism. No it won't: it will make it worse!
This fear and trembling about real, possible and often fictional racism has meant that very many people – from all walks of life - have been let down by the authorities. In all these cases, the supreme and (self)righteous fight against racism has taken first place in the pecking order of politics.
The permanent revolution that is the fight against racism has often become fanatical, extreme and puritanical. Anti-racism, it seems, takes no prisoners and permits no compromise. And neither does it follow the principles of fairness and justice. What I mean by that is that it's often the case that many other rights, values and standards are sacrificed in order to cleanse society of not only real racism; but often fictional or possible racism too.
Actions cause a counter-reactions. And relentless anti-racist zealotry – day after day – is bound to cause at least some equally zealous counter-reactions.
Basically, then, many anti-racism policies and statements do cause racism.
No comments:
Post a Comment
|
Top Disdain Quotes
Browse top 146 famous quotes and sayings about Disdain by most favorite authors.
Favorite Disdain Quotes
1. "...all his longings came out as a kind of disdain for what he longed for."
Author: Alan Hollinghurst
2. "Action and blood now get the game. Disdain treads on the peaceful name."
Author: Amos Bronson Alcott
3. "So I pulled a gun on him and demanded his wallet."The soda in my mouth becomes the soda in my nose. "You had a gun?" I cough and sputter into my napkin.Mom's eyes go round and she pressed her finger to her lips, mouthing, "Shhh!""Where did you get a gun?" I hiss."Oliver lent it to me. He was always looking out for me. Told me to shoot first and run. He said the asking-questions-later part was for the police." She grins at my expression. "Does that earn me cool points?"I swirl a fry in the mound of ketchup on my plate. "You want cool points for pulling a gun on my father?" I say it with all the appropriate disdain and condescension it deserves, but deep down, we both know she gets mega cool points for it."Psh." She waves her hand. "I didn't even know whether or not it would fire. And anyway, he didn't hand me his wallet. He propositioned me instead.""Okay. Ew.""Not like that, you brat."
Author: Anna Banks
4. "It was at such moments that for an instant he ceased to be a reasoning machine and betrayed his human love for admiration and applause. The same singularly proud and reserved nature which turned away with disdain from popular notoriety was capable of being moved to its depth by spontaneous wonder and praise from a friend."
Author: Arthur Conan Doyle
5. "The true elitists in the literary world are the ones who have become annoyed by literary ambition in any form, who have converted the very meaning of ambition so totally that it now registers as an act of disdain, a hostility to the poor common reader, who should never be asked to do anything that might lead to a pulled muscle. (What a relief to be told there's no need to bother with a book that might seem thorny, or abstract, or unusual.) The elitists are the ones who become angry when it is suggested to them that a book with low sales might actually deserve a prize (...) and readers were assured that the low sales figures for some of the titles could only mean that the books had failed our culture's single meaningful literary test.-"
Author: Ben Marcus
6. "Awards are meaningless to me, and I have nothing but disdain for anyone who actively campaigns to get one."
Author: Bill Murray
7. "Exactly. We don't belong here. They're not staring out of disdain, Jordan; they're staring out of jealousy. We don't have to be a part of the horrible modelling industry. You don't have to watch what you eat and I don't have to worry about how many zits are on my face. We can be whoever the fuck we want, a type of liberty that a majority of people crave."
Author: Cameron How To Be A Girl
Author: Chuck Jones
9. "Cousin Mary hoped her journey through periods of dark and light was like that of a Swiss train toiling up the mountainside, in and out of tunnels but always a little farther up the hill at each emergence. But she could only hope that this was so, she did not feel it. It seemed to her that she did not advance at all and that what she was learning now was only to hold on. The Red Queen in Alice Through the Looking Glass, she remembered, had had to run fast merely to stay where she was, but doubtless she had run in hope, disdaining despair; and hope, Cousin Mary discovered, when deliberately opposed to despair, was one of the tough virtues."
Author: Elizabeth Goudge
10. "She was a virgin and a warrior, disdainful of the male, which was what eventually convinced people that she really must be off her head."
Author: Émile Zola
11. "It seems I have a hard time being attracted to someone unless I respect what they do on some level. Otherwise, I would feel disdain for them. Which is not always pleasant in a relationship. Sometimes it's fun though."
Author: Eric Stoltz
12. "It is a matter for considerable regret that Fermat, who cultivated the theory of numbers with so much success, did not leave us with the proofs of the theorems he discovered. In truth, Messrs Euler and Lagrange, who have not disdained this kind of research, have proved most of these theorems, and have even substituted extensive theories for the isolated propositions of Fermat. But there are several proofs which have resisted their efforts."
Author: Fermat
13. "A womanly occupation means, practically, an occupation that a man disdains. (The Odd Women)"
Author: George Gissing
Author: George Orwell
15. "Men are men." Whitebeard replied. "Dragons are dragons." Ser Jorah snorted his disdain. "How profund"
Author: George R.R. Martin
16. "In his youth, Jon Connington had shared the disdain most knights had for bowmen, but he had grown wiser in exile."
Author: George R.R. Martin
17. "And as for the Ellison Fellow's feelings towards Katherine Potter--to be honest, they involve a good deal of confusion. He reacts before Katherine Potter, in fact, as he has reacted before all new, strange (attractive) women who happen, since a certain event, to have crossed his path. He does not know how to deal with them. He is filled with dismay, a giddy sense of arbitrariness, an apprehension that the universe holds nothing sacred; all of which is only to be stilled by the imperative of loyal resistance.He is not immune to the prickle of passing lust. But he deals defensively with it. He reacts either with disdainful dismissal (Not your type, definitely not your type) or with a rampant if covert seizure of lecherousness (Christ, what tits! What legs! What an arse!), which serves the same forestalling function by reducing its object to meat and its subject (he is past fifty, after all) to a pother of shame."
Author: Graham Swift
18. "Never have things of the spirit counted for so little. Never has hatred for everything great been so manifest – disdain for beauty, execration of literature. I have always tried to live in an ivory tower, but a tide of shit is beating at its walls, threatening to undermine it."
Author: Gustave Flaubert
Author: Heinrich Heine
20. "Death is as unexpected in his caprice as a courtesan in her disdain; but death is truer – Death has never forsaken any man"
Author: Honoré De Balzac
21. "The motives behind scientism are culturally significant. They have been mixed, as usual: genuine curiosity in search of truth; the rage for certainty and for unity; and the snobbish desire to earn the label scientist when that became a high social and intellectual rank. But these efforts, even though vain, have not been without harm, to the inventors and to the world at large. The "findings" have inspired policies affecting daily life that were enforced with the same absolute assurance as earlier ones based on religion. At the same time, the workers in the realm of intuition, the gifted finessers - artists, moralists, philosophers, historians, political theorists, and theologians - were either diverted from their proper task, while others were looking on them with disdain as dabblers in the suburbs of Truth."
Author: Jacques Barzun
22. "Our political system is now run by the Big People for their own interests. If they ever deign to notice the Little People, it is with disdain and contempt."
Author: John Derbyshire
23. "Benjamin Rush had helped inform Jefferson's views on church and state in 1800. "I agree with you likewise in your wishes to keep religion and government independent of each other," Rush had told Jefferson.58 "Were it possible for St. Paul to rise from his grave at the present juncture, he would say to the clergy who are now so active in settling the political affairs of the world: ‘Cease from your political labors your kingdom is not of this world. Read my epistles. In no part of them will you perceive me aiming to depose a pagan emperor, or to place a Christian upon a throne. Christianity disdains to receive support from human governments.'"
Author: Jon Meacham
24. "Ewww... intelligent design people! They're just buck-toothed, Bible-pushing nincompoops with community-college degrees who're trying to sell a gussied-up creationism to a cretinous public! No need to address their concerns or respond to their arguments. They are Not Science. They are poopy-heads. There. I just saved you the trouble of reading 90 percent of the responses to the ID position... This is how losers act just before they lose: arrogant, self-satisfied, too important to be bothered with substantive refutation, and disdainful of their own faults... The only remaining question is whether Darwinism will exit gracefully, or whether it will go down biting, screaming, censoring, and denouncing to the bitter end.— Tech Central Station contributor Douglas Kern, 2005"
Author: Jonathan Wells
25. "W-what do you want?" I asked, thankful that my voice only trembled a little bit.That Cat Didn't blink. "Human," he said, and if a cat could sound patronizing, this one nailed it, "think about the absurdity of the question. I am resting in my tree, minding my own business and wondering if I should hunt today, when you come flying in like a bean sidhe and scare off every bird for miles around. Then, you have the audacity to ask what I want." He sniffed and gave me a very catlike stare of disdain. "I am aware that mortals are rude and barbaric, but still."
Author: Julie Kagawa
Author: Kate Mullane Robertson
27. "I looked at Reth hopefully. "You?""Must we really waste more time? Not all of us here are immortal, and I'd think you and Jack would more carefully guard what little you have. We should go immediately to my queen.""Can you get us in or not?"He looked at the ceiling, his features dripping with disdain for the entire operation. "I suppose if you were to stand immediately outside her door I could use my sense of where you are to navigate into her room and open the door from the inside.""That's my pretty faerie boy!""If you ever address me like that again, I will make that abomination on your head permanent."I put my fingers up to the brunette wig, horrified. "You wouldn't.""I suggest you do not attempt to find out."
Author: Kiersten White
28. "Anne, look here. Can't we be good friends?"For a moment Anne hesitated. She had an odd, newly awakened consciousness under all her outraged dignity that the half-shy, half-eager expression in Gilbert's hazel eyes was something that was very good to see. Her heart gave a quick, queer little beat. But the bitterness of her old grievance promptly stiffened up her wavering determination. That scene of two years before flashed back into her recollection as vividly as if it had taken place yesterday. Gilbert had called her "carrots" and had brought about her disdain before the whole school. Her resentment, which to other and older people might be as laughable as its cause, was in no whit allayed and softened by time seemingly. She hated Gilbert Blythe! She would never forgive him!"
Author: L.M. Montgomery
Author: Leonard Mlodinow
30. "And when she's alone again, as truly alone in the world as she's always felt herself to be, she looks at herself in a bamboo-framed mirror. Beautiful face, aglow with the taste of carnal pleasure, disdainful and avid … and above all an indefinable look in which can be sensed unspecified danger, sensuality triumphant and a sort of intoxicating vulgarity. She likes what she sees … around her drifts a great brunette fragrance, scent of happy brunette, in which the idea of others dissolves."
Author: Louis Aragon
Author: Ludwig Von Mises
32. "For once I didn't look away immediately. I forced myself to meet her contemptuous gaze. I allowed myself be swept away by it, to drown in it - the way I'd done so many times before. The way I would willingly do again. Because at least she was here to hate me. At least I had that. I watched my daughter conjure up the filthiest look in her vast arsenal before she turned away with complete disdain. I didn't mind that so much. It meant I could watch her, drink her in without her protest. Look at our daughter, Callum. Isn't she beautiful, so very beautiful? She laughs like me, but when she smiles... Oh Callum, when she smiles, it's picnics in Celebration Park and sunsets on our beach and our very first kiss all over again. When Callie Rose smiles at me, she lights up my life.When Callie Rose smiles at me."
Author: Malorie Blackman
33. "A collection of bad love songs, tattered from overuse, has to touch us like a cemetery or a village. So what if the houses have no style, if the graves are vanishing under tasteless ornaments and inscriptions? Before an imagination sympathetic and respectful enough to conceal momentarily its aesthetic disdain, that dust may release a flock of souls, their beaks holding the still verdant dreams that gave them an inkling of the next world and let them rejoice or weep in this world."
Author: Marcel Proust
34. "I only have disdain for New York City Mayor Michael Bloomberg. He raised taxes and has increased regulations. What else is new? He's a bully who wants to micro-manage people's lives by mandate, not persuasion."
Author: Mark Skousen
Author: Matthew Gregory Lewis
Author: Meredith Duran
37. "Morning, Peter," she callsfrom the back, in her exaggerated German accent. Mawning, Pedder.She's been in the States more than fifteen years now, but heraccent has gotten heavier. Uta is a member of what seems to be agrowing body of defiantly unassimilated expatriates. She on onehand disdains her country of origin (Darling, the word "lugubrious"comes to mind) but on the other seems to grow more German (morenot-American) with every passing year....Because Uta is German, utterly German, which of course is probably why she leftthere, and insists that she'll never go back."
Author: Michael Cunningham
38. "I'm not this unusual," she said. "It's just my hair."She looked at Bobby and she looked at me, with an expression at once disdainful and imploring. She was forty, pregnant, and in love with two men at once. I think what she could not abide was the zaniness of her life. Like many of us, she had grown up expecting romance to bestow dignity and direction."Be brave," I told her. Bobby and I stood before her, confused and homeless and lacking a plan, beset by an aching but chaotic love that refused to focus in the conventional way. Traffic roared behind us. A truck honked its hydraulic horn, a monstrous, oceanic sound. Clare shook her head, not in denial but in exasperation. Because she could think of nothing else to do, she began walking again, more slowly, toward the row of trees."
Author: Michael Cunningham
39. "Chapter 4,‘Organised abuse and the pleasures of disbelief', uses Zizek's (1991) insights into cite political role of enjoyment to analyse the hyperbole and scorn that has characterised the sceptical account of organised and ritualistic abuse. The central argument of this chapter is that organised abuse has come to public attention primarily as a subject of ridicule within the highly partisan writings of journalists, academics and activists aligned with advocacy groups for people accused of sexual abuse. Whilst highlighting the pervasive misrepresentations that characterise these accounts, the chapter also implicates media consumers in the production of ignorance and disdain in relation to organised abuse and women's and children's accounts of sexual abuse more generally."
Author: Michael Salter
40. "Well, we never expected this!" they all say. "No one liked her. They all said she was pretentious, awkward, difficult to approach, prickly, too fond of her tales, haughty, prone to versifying, disdainful, cantankerous, and scornful. But when you meet her, she is strangely meek, a completely different person altogether!"How embarrassing! Do they really look upon me as a dull thing, I wonder? But I am what I am."
Author: Murasaki Shikibu
41. "I tried to establish order over the chaos of my imagination, but this essence, the same that presented itself to me still hazily when I was a child, has always struck me as the very heart of truth. It is our duty to set ourselves an end beyond our individual concerns, beyond our convenient, agreeable habits, higher than our own selves, and disdaining laughter, hunger, even death, to toil night and day to attain that end. No, not to attain it. The self-respecting soul, as soon as he reaches his goal, places it still further away. Not to attain it, but never to halt in the ascent. Only thus does life acquire nobility and oneness."
Author: Nikos Kazantzakis
42. "Mi bella Princesa, your funny little dwarf will never dance again. It is a pity, for he is so ugly that he might have made the King smile.''But why will he not dance again?' asked the Infanta, laughing.'Because his heart is broken,' answered the Chamberlain.And the Infanta frowned, and her dainty rose-leaf lips curled in pretty disdain. 'For the future let those who come to play with me have no hearts,' she cried, and she ran out into the garden."
Author: Oscar Wilde
43. "It is not easy to hurl snowballs while holding on to a plastic bag of groceries, so my first few efforts were subpar, missing their mark. The nine maybe ten nine-maybe-ten-year-olds ridiculed me - if I turned to aim at one, four others outflanked me and shot from the sides and the back. I was, in the parlance of an ancient day, cruising for a bruising, and while a more disdainful teenager would have walked away, and a more aggressive teenager wouls have dropped the bag and kicked some major preteen ass, I kept fighting snowball with snowball, laughing as if Boomer and I were playing a school yard game, flinging my orbs with abandon."
Author: Rachel Cohn
44. "You don't have any friends, your sister dumped you, you're a freak eater..and you've got some weird thing about Simon Snow.""I object to every single thing you just said."Reagan chewed. And frowned. She was wearing dark red lipstick."I have lots of friends," Cath said."I never see them.""I just got here. Most of my friends went to other schools. Or they're online.""Internet friends don't count.""Why not?"Reagan shrugged disdainfully."And I don't have a weird thing with Simon Snow," Cath said. "I'm just really active in the fandom.""What the fuck is ‘the fandom'?"
Author: Rainbow Rowell
45. "She'd so believed he could—that decades marked by disdain for emotion could have been nothing more than a faint memory in his checkered past. That she could love him enough to prove to him that the world was worth his caring, his trust. That she could turn him into the man of whom she had dreamed for so long.That was perhaps the hardest truth of all—that Ralston, the man she'd pined over for a decade, had never been real. He'd never been the strong and silent Odysseus; he'd never been aloof Darcy; never Antony, powerful and passionate. He had only ever been Ralston, arrogant and flawed and altogether flesh and blood."
Author: Sarah MacLean
46. "I'm going to wake Peeta," I say."No, wait," says Finnick. "Let's do it together. Put our faces right in front of his."Well, there's so little opportunity for fun left in my life, I agree. We position ourselves on either side of Peeta, lean over until our faces are inches frim his nose, and give him a shake. "Peeta. Peeta, wake up," I say in a soft, singsong voice.His eyelids flutter open and then he jumps like we've stabbed him. "Aa!"Finnick and I fall back in the sand, laughing our heads off. Every time we try to stop, we look at Peeta's attempt to maintain a disdainful expression and it sets us off again."
Author: Suzanne Collins
47. "A little disdain is not amiss; a little scorn is alluring."
Author: William Congreve
48. "Thine eyes I love, and they, as pitying me,Knowing thy heart torment me with disdain,Have put on black and loving mourners be,Looking with pretty ruth upon my pain.And truly not the morning sun of heaven Better becomes the grey cheeks of the east,Nor that full star that ushers in the even,Doth half that glory to the sober west,As those two mourning eyes become thy face:O! let it then as well beseem thy heartTo mourn for me since mourning doth thee grace,And suit thy pity like in every part. Then will I swear beauty herself is black, And all they foul that thy complexion lack"
Author: William Shakespeare
49. "What, my dear Lady Disdain! are you yet living?Beatrice: Is it possible disdain should die while she hathsuch meet food to feed it as Signior Benedick?"
Author: William Shakespeare
50. "Among [Applewhite's] other teachings was the classic cult specialty of developing disdain for anyone outside of the Heaven's Gate commune. Applewhite flattered his would-be alien flock that they were an elite elect far superior to the non-initiated humans whom he considered to be deluded zombies.[...]Applewhite effectively fed his paranoid persecution complex to his followers to ensure blind loyalty to the group and himself while fostering alienation from the mundane world. This paradoxical superior/fearful attitude towards "Them" (i.e., anyone who is not one of "Us") is one of the simplest means of hooking even the most skeptical curiosity seeker into the solipsistic netherworld of a [mentally unbalanced] leader's insecure and threatened worldview."
Author: Zeena Schreck
Disdain Quotes Pictures
Quotes About Disdain
Quotes About Disdain
Quotes About Disdain
Today's Quote
I am whoever I am when I am it."
Author: Andrea Gibson
Who Was Talking About "Disdain"?
Famous Authors
Popular Topics
|
First, an object is placed on the platform of the printer upon – a petrie dish for example. Then the printer must check the height of the object to make sure everything is calibrated correctly. Mr. Carvalho placed a paper card on the platform of the 3D-Bioplotter to demonstrate how the machine works.
Mr. Carvalho then talked us through the printing process. To begin, a liquefied material – in this case a silicone paste – is pressed through a needle-like tip by applying air pressure. The needle moves in all three dimensions which means it is able to create a three dimensional object. The printer is called ‘Bioplotter’ because the unique aspect of this machine is its use of biomaterials to make implants or other objects for biomedical application.
Some of the implants which are made using the 3D Bioplotter are intended to dissolve in the body. The materials which are used in this application include PLLA, PLGA, and silicone.
Implants made with thermoplastics – as they are mostly water and CO2 – are removed by the body naturally in around a week or two. Other materials, such as ceramic paste, may also be used to print implants. The implants printed using ceramic paste do not dissolve. Instead, the body uses this material to create new bone. This actually speeds up the process of the body’s regeneration.
The 3DBioplotter also prints hydrogels – such as collagen or alginate. These materials can have human cells actually added to them. Thus human cells may be printed directly with this machine.
Every Thursday is #3dthursday here at Adafruit! The DIY 3D printing community has thrilled us at Adafruit with its passion and dedication to making solid objects from digital models. Recently, we have noticed that our community integrating electronics projects into 3D printed enclosures, brackets, and sculptures, so each Thursday we celebrate and highlight these bold pioneers!
Have you take considered building a 3D project around an Arduino or other microcontroller? How about printing a bracket to mount your Raspberry Pi to the back of your HD monitor? And don’t forget the countless EL Wire and LED projects that are possible when you are modeling your projects!
The Adafruit Learning System has dozens of great tools to get you well on your way to creating incredible works of engineering, interactive art, and design with your 3D printer! If you have a cool project you’ve made that joins the traditions of 3D printing and electronics, be sure to send it in to be featured here!
|
/*
* (C) Copyright 2019-2020 UCAR.
*
* This software is licensed under the terms of the Apache Licence Version 2.0
* which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
* In applying this licence, ECMWF does not waive the privileges and immunities
* granted to it by virtue of its status as an intergovernmental organisation nor
* does it submit to any jurisdiction.
*/
#ifndef MYMODEL_INCREMENT_INCREMENT_H_
#define MYMODEL_INCREMENT_INCREMENT_H_
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include "oops/base/GeneralizedDepartures.h"
#include "oops/base/Variables.h"
#include "oops/util/DateTime.h"
#include "oops/util/Printable.h"
#include "oops/util/Serializable.h"
// forward declarations
namespace oops {
class LocalIncrement;
class Variables;
}
namespace ufo {
class GeoVaLs;
class Locations;
}
namespace mymodel {
class Geometry;
class GeometryIterator;
class State;
}
// ----------------------------------------------------------------------------
namespace mymodel {
// Increment class
class Increment : public oops::GeneralizedDepartures,
public util::Printable,
public util::Serializable,
private util::ObjectCounter<Increment> {
public:
static const std::string classname() {return "mymodel::Increment";}
// Constructor, destructor
Increment(const Geometry &, const oops::Variables &,
const util::DateTime &);
Increment(const Geometry &, const Increment &);
Increment(const Increment &, const bool);
Increment(const Increment &);
~Increment();
// Math operators
Increment & operator =(const Increment &);
Increment & operator-=(const Increment &);
Increment & operator+=(const Increment &);
Increment & operator*=(const double &);
void accumul(const double &, const State &);
void axpy(const double &, const Increment &, const bool check = true);
void diff(const State &, const State &);
double dot_product_with(const Increment &) const;
double norm() const;
void random();
void schur_product_with(const Increment &);
void zero();
void zero(const util::DateTime &);
void ones();
// time manipulation
void updateTime(const util::Duration & dt) { time_ += dt; }
const util::DateTime & validTime() const { return time_; }
util::DateTime & validTime() { return time_; }
// dirac
void dirac(const eckit::Configuration &);
// Iterator access
oops::LocalIncrement getLocal(const GeometryIterator &) const;
void setLocal(const oops::LocalIncrement &, const GeometryIterator &);
// serialize (only needed for EDA?)
size_t serialSize() const override;
void serialize(std::vector<double> &) const override;
void deserialize(const std::vector<double> &, size_t &) override;
// other accessors
std::shared_ptr<const Geometry> geometry() const { return geom_; }
// I/O
void read(const eckit::Configuration &);
void write(const eckit::Configuration &) const;
private:
void print(std::ostream &) const;
std::shared_ptr<const Geometry> geom_;
util::DateTime time_;
oops::Variables vars_;
};
} // namespace mymodel
#endif // MYMODEL_INCREMENT_INCREMENT_H_
|
Until 1990, civil wars and citizen unrest rocked the tiny country of Nicaragua, which sits north of Costa Rica and south of El Salvador and Honduras. Since then, the country has begun rebuilding and is growing in popularity with tourists to Central America. Because decades of war prevented significant advances in technology and construction in Nicaragua, you will find the country much as it was before the violence — peaceful and unspoiled by tourism.
Popular Destinations
Most travelers to Nicaragua head straight to colonial Granada, with its brightly colored buildings and well-developed tourist infrastructure. South of Granada, the beach town of San Juan del Sur offers a long strip of waterfront and budget surfing lessons. To the east, the island of Ometepe sits in the middle of Lake Nicaragua, with its two towering volcanoes dominating the horizon. The New York Times calls Ometepe an unexpected gem and of the lake, "It’s a near-mythical tropical lake, one of the largest in the world, ringed by volcanoes and forest." For a more off-the-beaten-path experience, head north to Leon, where a few travelers wander the street markets and try sandboarding on a nearby volcano. In the far north, the green hills of Esteli hold coffee plantations. In the ocean off of Nicaragua's east coast, the Corn Islands offer a retreat for tourists looking to relax on white sand beaches.
When traveling outside of Granada, Ometepe and San Juan del Sur, you will find few locals who speak any English. For this type of trip, you should possess at least the basics of Spanish. At the very least, know how to ask for directions, talk about taxi and bus fares, request a room or bed, and understand basic conversation about times. The more Spanish you know, the better you will negotiate reasonable taxi prices — Nicaraguan taxi drivers are known for unreasonable price increases for travelers who cannot speak enough Spanish to defend themselves. Lonely Planet notes that Managua taxis are unmetered and the cab drivers are notorious for ripping off tourists.
Nicaragua is a small country but offers a network of shuttles, taxis, long-distance buses and "chicken" buses, which are usually old American school buses that have been converted into public transportation. Fares are cheap, usually under $15 (as of 2010) for even the longest trip. The small vans that run from Managua to Leon, for example, usually charge $2 for the two-hour trip. Some cities have a designated bus station, and you can usually pick up a bus or a shuttle by flagging it down; a bus attendant is usually yelling the destination as the bus rolls through town. Always ask at your hotel or hostel for reasonable taxi fares, and insist on the price with the driver; never get into a cab until you have both agreed on a price.
Nicaragua is a remarkably safe country, with few violent crimes against foreigners; according to Lonely Planet, Nicaragua is "one of the safest countries in the Americas." People are generally quite friendly, but as in any developing nation, poverty drives petty crime. Keep a close eye on your belongings, particularly when traveling by bus. If someone takes your bag to put it under or on top of a bus, watch until it has been securely stowed; sit on the same side of the bus and keep an eye out each time the bus stops to be sure your luggage doesn't "accidentally" go home with someone else. Keep any valuable items in your day pack, and never set it on the floor or in overhead racks. If you feel uneasy, keep your credit cards, cash and passport in a money belt under your clothes.
Female Travelers
Machismo is prevalent throughout Central America, but is a particular problem in Nicaragua. The Adventure Guide Nicaragua cautions that, "catcalls and overly friendly would-be suitors are not uncommon. Ignoring advances is the best solution." This street harassment is often worse for solo females and blonde women. Occasionally, Nicaraguan men will try to touch foreign women in the street, which is unacceptable even by local cultural standards. If a man touches you in the street, reprimand him sharply (in Spanish, if possible) or yell loudly to embarrass and surprise him — chances are, he will be so shocked that he will run away. As a precaution, avoid walking alone in unpopulated places after dark and do not take a solo tour with a male guide.
About the Author
Photo Credits
• coaches in nicaragua image by Grigory Kubatyan from Fotolia.com
Slider images:
1. Attribution: Fashiondetective; License: Creative Commons Attribution-Share Alike 3.0 Unported license
2. Attribution: Pasionyanhelo; License: Creative Commons Attribution-Share Alike 4.0 International, 3.0 Unported, 2.5 Generic, 2.0 Generic and 1.0 Generic license
3. Attribution: LaNicoya; License: Creative Commons Attribution 2.5 Generic license
4. Attribution: USMC Archives from Quantico, USA; License: Creative Commons Attribution 2.0 Generic license
Suggest an Article Correction
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in the project root for license information.
#include "pch.h"
#include <winrt/Windows.ApplicationModel.background.h>
#include <winrt/Windows.Storage.Streams.h>
#include <winrt/Windows.Networking.PushNotifications.h>
#include <TerminalVelocityFeatures-PushNotifications.h>
#include "PushNotificationReceivedEventArgs.h"
#include "Microsoft.Windows.PushNotifications.PushNotificationReceivedEventArgs.g.cpp"
#include <iostream>
#include <string>
#include <externs.h>
#include <PushNotificationDummyDeferral.h>
#include "ValueMarshaling.h"
#include "PushNotificationUtility.h"
namespace winrt
{
using namespace Windows::ApplicationModel::Background;
using namespace Windows::Storage::Streams;
using namespace Windows::Storage;
using namespace Windows::Networking::PushNotifications;
}
namespace winrt::Microsoft::Windows::PushNotifications::implementation
{
PushNotificationReceivedEventArgs::PushNotificationReceivedEventArgs(winrt::IBackgroundTaskInstance const& backgroundTask):
m_backgroundTaskInstance(backgroundTask),
m_rawNotificationPayload(BuildPayload(backgroundTask.TriggerDetails().as<RawNotification>().ContentBytes())),
m_unpackagedAppScenario(false)
{
THROW_HR_IF(E_NOTIMPL, !::Microsoft::Windows::PushNotifications::Feature_PushNotifications::IsEnabled());
}
PushNotificationReceivedEventArgs::PushNotificationReceivedEventArgs(winrt::PushNotificationReceivedEventArgs const& args):
m_rawNotificationPayload(BuildPayload(args.RawNotification().ContentBytes())),
m_unpackagedAppScenario(false)
{
THROW_HR_IF(E_NOTIMPL, !::Microsoft::Windows::PushNotifications::Feature_PushNotifications::IsEnabled());
}
PushNotificationReceivedEventArgs::PushNotificationReceivedEventArgs(byte* const& payload, ULONG const& length) :
m_rawNotificationPayload(BuildPayload(payload, length)),
m_unpackagedAppScenario(true)
{
THROW_HR_IF(E_NOTIMPL, !::Microsoft::Windows::PushNotifications::Feature_PushNotifications::IsEnabled());
}
PushNotificationReceivedEventArgs::PushNotificationReceivedEventArgs(std::wstring const& payload) :
m_rawNotificationPayload(BuildPayload(payload)),
m_unpackagedAppScenario(true)
{
THROW_HR_IF(E_NOTIMPL, !::Microsoft::Windows::PushNotifications::Feature_PushNotifications::IsEnabled());
}
std::vector<uint8_t> PushNotificationReceivedEventArgs::BuildPayload(winrt::Windows::Storage::Streams::IBuffer const& buffer)
{
return { buffer.data(), buffer.data() + (buffer.Length() * sizeof(uint8_t)) };
}
std::vector<uint8_t> PushNotificationReceivedEventArgs::BuildPayload(byte* const& payload, ULONG const& length)
{
return { payload, payload + (length * sizeof(uint8_t)) };
}
std::vector<uint8_t> PushNotificationReceivedEventArgs::BuildPayload(std::wstring const& payload)
{
std::string payloadToSimpleString{ ::winrt::Microsoft::Windows::PushNotifications::Helpers::WideStringToUtf8String(payload) };
return { payloadToSimpleString.c_str(), payloadToSimpleString.c_str() + (payloadToSimpleString.length() * sizeof(uint8_t)) };
}
winrt::com_array<uint8_t> PushNotificationReceivedEventArgs::Payload()
{
return { m_rawNotificationPayload.data(), m_rawNotificationPayload.data() + (m_rawNotificationPayload.size() * sizeof(uint8_t)) };
}
winrt::BackgroundTaskDeferral PushNotificationReceivedEventArgs::GetDeferral()
{
if (!m_unpackagedAppScenario)
{
THROW_HR_IF_NULL_MSG(E_ILLEGAL_METHOD_CALL, m_backgroundTaskInstance, "Foreground activation cannot call this.");
return m_backgroundTaskInstance.GetDeferral();
}
else
{
auto dummyDeferral = winrt::make<PushNotificationDummyDeferral>();
return dummyDeferral.as<winrt::BackgroundTaskDeferral>();
}
}
winrt::event_token PushNotificationReceivedEventArgs::Canceled(winrt::BackgroundTaskCanceledEventHandler const& handler)
{
if (!m_unpackagedAppScenario)
{
THROW_HR_IF_NULL_MSG(E_ILLEGAL_METHOD_CALL, m_backgroundTaskInstance, "Foreground activation cannot call this.");
return m_backgroundTaskInstance.Canceled(handler);
}
else
{
return { 0 };
}
}
void PushNotificationReceivedEventArgs::Canceled(winrt::event_token const& token) noexcept
{
if (!m_unpackagedAppScenario)
{
THROW_HR_IF_NULL_MSG(E_ILLEGAL_METHOD_CALL, m_backgroundTaskInstance, "Foreground activation cannot call this.");
m_backgroundTaskInstance.Canceled(token);
}
}
}
|
Hurricane Erika was the fifth named storm, the third hurricane, and the first major hurricane of the inactive 1997 Atlantic hurricane season. Erika didn't form until well over a month after the previous storm did. Erika made it the first time since 1961 that there were no tropical cyclone formations during the month of August. Erika was also the longest-lasting tropical cyclone in the 1997 Atlantic hurricane season, and also the strongest. Erika was also the only major hurricane of the season. Erika came fairly close to the Lesser Antilles, but far away enough that it did not cause damage. Erika later turned north in response to an approaching trough. On September 8, Erika reached its peak strength of 125 mph winds and a pressure of 946 mb. As it passed over cooler waters, Erika began to weaken after keeping its peak strength for 24 hours. As it turned to the east, it weakened to a tropical storm. Erika became extratropical after passing near the Azores.
Even though Erika did come close enough to do significant damage to the Lesser Antilles, it did come close enough to produced light rainfall and light winds throughout the northern Lesser Antilles. Erika's passage carried a cloud of volcanic ash to Antigua, due to the eruption of the Soufrière Hills Volcano on Montserrat. This is a rare occurence. Also, strong waves generated by Erika caused beach erosion as well as coastal flooding in the northern part of Puerto Rico. Erika also caused the death of two surfers in Puerto Rico. Moderate wind gusts in Puerto Rico from Erika left thousands of residents without power. Erika caused $10,000,000 (1997 USD) in damage in the Carribean territory of the United States. Finally, Erika produced gusty winds in the Azores, as well as light rainfall. Also, Erika was the only tropical cyclone during the two month period of August and September, a very rare occurence. The last time this happened was in the 1929 Atlantic hurricane season.
Erika at peak intensity on September 8
Formation September 3, 1997
Dissipation September 19, 1997
Highest winds 125 mph
Lowest pressure 946 mbar
Deaths 2 direct
Damages $10,000,000 (1997 USD)
Areas affected Lesser Antilles, Puerto Rico, Azores
Meteorological History
On August 31, a large tropical wave moved off the coast of Africa. Shortly after the wave left the coast, it had a large low-level circulation, although as the wave moved westward across the Atlantic Ocean, the circulation of the wave failed to contract significantly. Nevertheless, the wave slowly organized, and on September 3, the wave had enough convection within the circulation center to be classified as Tropical Depression Six, while located about 1150 miles east of the southernmost Lesser Antilles. Under the influence of a well-established subtropical ridge, the depression moved west-northwest at 20 mph. Late on September 3, the depression strengthened into Tropical Storm Erika. Erika continued to the west-northwest, and on September 4, an eye-like feature appeared to have developed in the center of the deepest convection. However, this feature was not an eye, as visible satellite imagery revealed a center of circulation that was partially exposed from the convection. This was due to strong upper-level wind shear over the system. Despite the unfavorable conditions, Erika became a hurricane late on September 4, located 530 miles east-southeast of Guadeloupe. Deep convection re-established itself over Erika's circulation center, and Erika continued moving west-northwest, slowly strengthening as it did so.
As Erika approached the Lesser Antilles, its foward speed lessened, and it passed within 85 miles of the islands a Category 1 hurricane. An approaching trough weakened the subtropical ridge, and this caused Erika to turn to the north, and later the northeast. On September 7, Erika quickly gained strength, and on September 8, Erika reached its peak strength of 125 mph winds and a pressure of 946 mb, while located 350 miles north of the Lesser Antilles. Erika maintained peak intensity for 24 hours before cooler waters began to weaken it. Erika passed about 350 miles east of Bermuda on September 10. Erika then turned east-northeast, in response to westerly steering currents. Erika weakened to a tropical storm on September 12, due to increased upper-level wind shear over the system. Erika turned to the east-southeast, and continued weakening as it did so, although it maintained deep convection near the center of circulation despite unfavorable upper-level conditions. On September 14, Erika turned to the northeast again, and it re-intensified to a strong tropical storm with 70 mph winds while located 510 miles west-southwest of the Azores. Erika passed near the western portion of the Azores on September 15, and quickly weakened, with deep convection diminishing near the center of circulation.
On September 16, just north of the Azores, Erika became an extratropical cyclone, and after executing a clockwise loop, it dissipated on September 19 about 230 miles southwest of Ireland.
Erika near the Lesser Antilles.
Early in its life, computer models had difficulty in forecasting where Erika would go; some brought Erika toward the Lesser Antilles, while some other models forecasted a more northerly motion, away from the Lesser Antilles. Because of the uncertainty, the government of Saint Martin issued a Tropical Storm Warning late on September 4. The next day, the governments of Antigua, Barbuda, Montserrat, Saint Kitts and Nevis, Dominica, Anguilla, Saint Barthelemy, and Guadeloupe issued Tropical Storm Warnings for their islands. When Erika's motion caused it to take a path that would bring it closer to the Lesser Antilles, all of the aforementioned islands upgraded their Tropical Storm Warnings to Hurricane Warnings, excluding the island of Guadeloupe. A Hurricane Watch was also issued for the British Isles and the United States Virgin Isles, and a Hurricane Watch was also issued for Puerto Rico. In public advisories, the National Hurricane Center stated that tropical storm-force conditions were likely in the Azores, with early forecasts posing a threat to Bermuda.
Also, the governments in the projected path of Erika urged residents to quickly prepare for Erika through radio addresses. Also, many citizens throughout the Lesser Antilles began preparing for the 1997 Atlantic hurricane season months before Erika developed. This preparedness including things such as installing hurricane shutters, and purchasing food supplies. Because numerous hurricanes affected the Lesser Antilles in both 1995 and 1996, the citizens of the Lesser Antilles executed a hurricane preparedness plan greater than usual for a mere Category 1 hurricane. In Puerto Rico, fisherman secured their boats in anticipation of Erika. Also on Puerto Rico, citizens formed long lines at gas stations, and they also purchased emergency supplies. Officials in Anguilla initiated a plan that would turn off the power supply to the island in the event that winds exceeded 50 mph.
Also, the government of Guadeloupe issued a Level 2 Storm Alert for the island, which recommended that all citizens remain in their homes. Also on Guadeloupe, officials closed the Pointe-à-Pitre International Airport. Also, as a precaution, authorities on Saint Martin initiated a curfew for all but those in service jobs. Finally, a cruise ship changed its course to avoid the island of Saint Thomas, because of the threat of Hurricane Erika.
Lesser Antilles
Erika produced strong waves throughout the Lesser Antilles, with 10-12 foot waves occuring on the island of Saint Martin. On Saint Martin, those strong waves flooded roadways and damaged one building that was under construction near the coast. Erika's outer rainbands passed through the island, with rainfall totals of 1.91 inches reported on Saint Martin. Anguilla reported winds of 35 mph as well as some rainfall. Antigua reported over 2 inches of rain from Erika, as well as wind gusts as high as 32 mph. Erika's passage resulted in low-level southwesterly winds. Also, just weeks after the eruption of the Soufrière Hills Volcano on Montserrat, Erika produced a cloud of falling ash over Antigua. Officials considered closing schools on the southern portion of the island because of the falling ash, though because the wind changed direction, so did the ash, with the ash turning away from the island. This was the first recorded occurence of ash fall in Antigua from Montserrat.
Also, winds from Erika peaked at 37 mph, with gusts to 47 mph at Cyril E. King Airport on the island of Saint Thomas. Also, Erika's outer rainbands produced light to moderate rainfall across the Virgin Islands, with rainfall peaking at 3.28 inches at the University of the Virgin Islands on the island of Saint Thomas. 1.32 inches of rain fell in Saint John, as well. The rainfall caused localized street flooding, while the added bonus of wind and rain caused power outages. Offshore, high waves capsized one dinghy, and they also broke a 50-foot boat from its moorings. On the island of Saint Croix, Erika produced sustained winds of 25 mph, with gusts up to 29 mph at Henry E. Rohlsen International Airport. Rainfall on the island of Saint Croix was light, however, peaking only at 0.83 inches at Christiansted. Wind gusts downed a few power lines on the island. Overall, damage was minor.
Puerto Rico
Erika's outer rainbands passed over Puerto Rico, with those rainbands producing winds of 23 mph, and gusts as high as 42 mph at Luis Muñoz Marín International Airport. The strong wind gusts snapped tree branches onto power lines, leaving up to 12,000 people without power in San Juan, Guayanbo, and Bayamon. Rainfall in Puerto Rico was light, however, with Caguas reported a total peak rainfall amount of 0.77 inches. Also, Erika produced swells of 10-12 feet on Puerto Rico's northeastern coast, causing beach erosion and coastal flooding, with one road being closed when sections of it were flooded or washed out. Strong waves forced the evacuation of eight families on the northern portion of the island. Strong waves also killed two surfers along the northeastern portion of the island. Total damage in Puerto Rico and the U.S. Virgin Islands totaled to $10,000,000 (1997 USD) in a preliminary estimate.
Atlantic Ocean
A total of 31 ships came into contact with Erika from September 4, when it was a tropical storm, to September 18 when it was an extratropical cyclone. Two ships reported hurricane-force winds, with a peak wind report being 99 mph. Also, the lowest pressure recored by a ship was 1000.4 mb while located 105 miles from Erika's center when it was an extratropical cyclone. The lowest pressure recorded when Erika was a tropical cyclone was 1000.5 mb when a ship was located 190 miles from the center of the cyclone.
While passing near the Azores, Erika produced sustained winds of 30 mph at Lajes Field. The wind gusts, however, were much stronger, with an 87 mph wind gust being reported in Flores. In addition, Lajes reported a wind gust as high as 105 mph from a 200 foot tower. In Flores, Erika produced as much as 2.35 inches of rain, and Erika also produced rough seas throughout the archipelago. Finally, damage, if any, in the Azores, is unknown.
Lack of Retirement
Because damage was minimal, the name Erika was not retired in the Spring of 1998 by the World Meteorological Organization. It was used again in 2003, and is on the list of names to be used for the 2009 Atlantic hurricane season.
See Also
1997 Atlantic hurricane season
Ad blocker interference detected!
|
#pragma once
#include "MeshHeader.h"
#include <experimental/filesystem>
#include <vector>
class TriangleCluster
{
public:
TriangleCluster( TriMesh& fragMesh, TriMesh& fineMesh );
private:
};
|
#ifndef __BOYER_MOORE__H__
#define __BOYER_MOORE__H__
#include <vector>
#pragma once
template<class T>
class BoyerMoore
{
typedef std::vector<int> List;
List m_Table1;
List m_Table2;
const T* m_Sub;
const T* m_Src;
int m_LengthSrc;
int m_LengthSub;
bool m_bTable1Prepared;
bool m_bTable2Prepared;
int m_index;
public:
static const int No_Index = -1;
BoyerMoore(void):
m_Sub(0),
m_Src(0),
m_LengthSrc(0),
m_LengthSub(0),
m_bTable1Prepared(false),
m_bTable2Prepared(false),
m_index(0)
{
}
~BoyerMoore(void)
{
}
int FindIndex(T* src, int srclen, T* sub, int sublen)
{
Reset();
if(!src || !sub)
{
return No_Index;
}
m_LengthSrc = srclen;
m_LengthSub = sublen;
m_Src = src;
m_Sub = sub;
return t_FindIndex(true);
}
int FindIndex(const T* src, int srclen) //sub has already been given
{
if(!src || !m_Sub || (srclen <= 0) || (m_LengthSub <= 0))
{
return No_Index;
}
m_LengthSrc = srclen;
m_Src = src;
return t_FindIndex(false);
}
void SetSubString(const T* sub, int sublen)
{
Reset();
CreateTables(sub, sublen);
}
void Reset();
private:
void CreateTables(const T* sub, int sublen)
{
if(!sub || (sublen <= 0))
{
return;
}
m_LengthSub = sublen;
m_Sub = sub;
t_CreateTableOne();
t_CreateTableTwo();
}
void t_CreateTableOne();
void t_CreateTableTwo();
bool t_AreTablePrepared()
{
return (m_bTable1Prepared & m_bTable2Prepared);
}
int t_FindIndex(bool createtable = true);
};
template<class T>
void BoyerMoore<T>::t_CreateTableOne()
{
if(t_AreTablePrepared())
{
return;
}
m_Table1.clear();
int length = m_LengthSub;
if(length <= 0)
{
return;
}
m_Table1.resize(length, 0);
for(int i = length - 1; i > 0; i--)
{
for(int j = i - 1; j >= 0; j--)
{
if(m_Sub[i] == m_Sub[j])
{
m_Table1[i - 1] = i - j;
break;
}
}
}
for(int i = 0; i < length; i++)
{
if(m_Table1[i] == 0)
m_Table1[i] = length - i;
}
m_bTable1Prepared = true;
}
template<class T>
void BoyerMoore<T>::t_CreateTableTwo()
{
if(t_AreTablePrepared())
{
return;
}
m_Table2.clear();
int length = m_LengthSub;
if(length <= 0)
{
return;
}
m_Table2.resize(length, 0);
std::vector<int> EndTable;
for(int i = 0; i < length; i++)
{
if(m_Sub[i] == m_Sub[length - 1])
{
EndTable.push_back(i);
}
}
for(int i = (int)((int)EndTable.size() - 1); i > 0; i--)
{
for(int j = i - 1; j >= 0; j--)
{
int x = EndTable[i];
int y = EndTable[j];
while((m_Sub[x] == m_Sub[y]) && (y >= 0))
{
if(m_Table2[x - 1] == 0)
{
m_Table2[x - 1] = x - y;
}
x--;
y--;
}
}
}
for(int i = 0; i < length; i++)
{
if(m_Table2[i] == 0)
{
m_Table2[i] = length - i - 1;
}
}
m_Table2[length - 1] = 1;
m_bTable2Prepared = true;
}
template<class T>
void BoyerMoore<T>::Reset()
{
m_Sub = 0;
m_Src = 0;
m_bTable1Prepared = false;
m_bTable2Prepared = false;
m_Table1.clear();
m_Table2.clear();
}
template<class T>
int BoyerMoore<T>::t_FindIndex(bool createtable)
{
int currIndex = No_Index;
int srclen = m_LengthSrc;
int sublen = m_LengthSub;
if(sublen > srclen)
{
m_index = No_Index;
return currIndex;
}
if(createtable)
{
t_CreateTableOne();
t_CreateTableTwo();
}
if(!t_AreTablePrepared())
{
return No_Index;
}
int i = sublen - 1 + m_index;
int j = sublen - 1;
int curr = i;
while(i < srclen)
{
while(j >= 0)
{
if(m_Src[i] == m_Sub[j])
{
i--;
j--;
}
else
{
break;
}
}
if(j >= 0)
{
int tab1 = m_Table1[j];
int tab2 = m_Table2[j];
int max = tab2;
if(tab1 > tab2)
max = tab1;
if(max == 0)
max = 1;
//cout<<"jumps "<<max<<endl;
curr += max;
i = curr;
j = sublen - 1;
}
else
{
break;
}
}
if(j < 0)
{
currIndex = i + 1;
m_index = currIndex;
}
else
{
m_index = No_Index;
}
return m_index;
}
#endif
|
#include<bits/stdc++.h>
#include<vector>
#define pb push_back
using namespace std;
class Priority_Queue{
vector<int> pq;
int last;
public:
void helper_insert(int curr){
int parent = (curr-1)/2;
if(curr<=0){
return ;
}
if(pq[parent]<pq[curr]){
return;
}
if(pq[(curr-1)/2]>pq[curr]){
swap(pq[curr],pq[parent]);
}
helper_insert(parent);
}
int Min(int i,int j){
if(pq[i]<pq[j]) return i;
return j;
}
bool empty(){
if(pq.size()==0) return true;
return false;
}
void insert(int element){
pq.pb(element);
helper_insert(pq.size()-1);
for(int i=0;i<pq.size();i++){
cout<<pq[i]<<" ";
}
cout<<endl;
}
int get_min(){
return pq[0];
}
int pop(){
int ans = pq[0];
swap(pq[0],pq[pq.size()-1]);
pq.pop_back();
int curr = 0;
while(2*curr +1 <pq.size()){
int left_child = 2*curr +1;
int right_child = 2*curr +2;
int min_child_index;
if(right_child<pq.size())
min_child_index = Min(right_child,left_child);
else{
min_child_index = left_child;
}
if(pq[min_child_index] > pq[curr]){
break;
}
if(pq[min_child_index] < pq[curr]){
swap(pq[min_child_index],pq[curr]);
}
curr = min_child_index;
}
return ans;
}
};
int main() {
ios_base::sync_with_stdio(false);
cin.tie(NULL); cout.tie(NULL);
Priority_Queue p;
p.insert(3534);
p.insert(4);
p.insert(24);
p.insert(76);
p.insert(3);
p.insert(-23);
while(!p.empty()){
cout<<p.pop()<<" ";
}
}
|
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <lib/fidl/cpp/object_coding.h>
#include <fidl/test/misc/cpp/fidl.h>
#include <utility>
#include "gtest/gtest.h"
namespace fidl {
namespace {
TEST(EncodeObject, Struct) {
fidl::test::misc::Int64Struct s;
s.x = 123;
std::vector<uint8_t> data;
const char* err_msg;
EXPECT_EQ(ZX_OK, EncodeObject(&s, &data, &err_msg)) << err_msg;
fidl::test::misc::Int64Struct t;
EXPECT_EQ(ZX_OK, DecodeObject(data.data(), data.size(), &t, &err_msg))
<< err_msg;
EXPECT_EQ(s.x, 123);
EXPECT_EQ(t.x, 123);
}
} // namespace
} // namespace fidl
|
/*********************************************************\
* File: Provider.cpp *
*
* Copyright (C) 2002-2013 The PixelLight Team (http://www.pixellight.org/)
*
* This file is part of PixelLight.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
* and associated documentation files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\*********************************************************/
//[-------------------------------------------------------]
//[ Includes ]
//[-------------------------------------------------------]
#include "PLInput/Input/InputManager.h"
#include "PLInput/Input/Devices/Device.h"
#include "PLInput/Backend/Provider.h"
//[-------------------------------------------------------]
//[ Namespace ]
//[-------------------------------------------------------]
using namespace PLCore;
namespace PLInput {
//[-------------------------------------------------------]
//[ RTTI interface ]
//[-------------------------------------------------------]
pl_class_metadata(Provider, "PLInput", PLCore::Object, "Input provider")
pl_class_metadata_end(Provider)
//[-------------------------------------------------------]
//[ Public functions ]
//[-------------------------------------------------------]
/**
* @brief
* Default constructor
*/
Provider::Provider()
{
}
/**
* @brief
* Destructor
*/
Provider::~Provider()
{
// Clean up
Clear();
}
/**
* @brief
* Get list of devices
*/
const List<Device*> &Provider::GetDevices() const
{
// Return device list
return m_lstDevices;
}
/**
* @brief
* Detect devices
*/
void Provider::DetectDevices(bool bReset)
{
// Delete all devices?
if (bReset)
Clear();
// Flag all current devices as 'not confirmed'
for (uint32 i=0; i<m_lstDevices.GetNumOfElements(); i++)
m_lstDevices[i]->m_bConfirmed = false;
// Detect new devices (the ones that are already there will be ignored by AddDevice)
QueryDevices();
// Delete all devices that are no longer there (confirmed == false)
for (uint32 i=0; i<m_lstDevices.GetNumOfElements(); i++) {
Device *pDevice = m_lstDevices[i];
if (!pDevice->m_bConfirmed) {
// Remove device
InputManager::GetInstance()->RemoveDevice(pDevice);
m_lstDevices.Remove(pDevice);
delete pDevice;
i--;
}
}
}
//[-------------------------------------------------------]
//[ Protected functions ]
//[-------------------------------------------------------]
/**
* @brief
* Destroy all devices
*/
void Provider::Clear()
{
// Delete all input devices
for (uint32 i=0; i<m_lstDevices.GetNumOfElements(); i++) {
Device *pDevice = m_lstDevices[i];
InputManager::GetInstance()->RemoveDevice(pDevice);
delete pDevice;
}
m_lstDevices.Clear();
}
/**
* @brief
* Check if a device is already known
*/
bool Provider::CheckDevice(const String &sName)
{
// Check if the device is already present
Device *pDevice = InputManager::GetInstance()->GetDevice(sName);
if (pDevice) {
// Update device
pDevice->m_bConfirmed = true;
return true;
}
// Not found
return false;
}
/**
* @brief
* Add a new input device
*/
bool Provider::AddDevice(const String &sName, Device *pDevice)
{
// Check if the device is already present
Device *pDeviceFound = InputManager::GetInstance()->GetDevice(sName);
if (!pDeviceFound) {
// Add device to manager
if (InputManager::GetInstance()->AddDevice(pDevice)) {
// Add device to own list - if we're in here, we now that the pDevice pointer is valid
m_lstDevices.Add(pDevice);
pDevice->m_bConfirmed = true;
return true;
}
}
// Error!
return false;
}
//[-------------------------------------------------------]
//[ Namespace ]
//[-------------------------------------------------------]
} // PLInput
|
Winter Guard – The Biggest Sport You Never Heard Of
Winter Guard – The Biggest Sport You Never Heard Of
Winter Guard Performers
It’s a utilitarian high school gym. There are books and backpacks piled along the walls. The volleyball nets are removed and the basketball hoops are up. On a giant tarp in the middle of the floor, a different activity is underway.
Young men and women in a variety of shorts and tees, or leggings and tanks, are practicing dance moves while music plays on the loudspeaker. Some are twirling and flipping wooden rifles, while others are waving large colored flags. A few are tossing swords in the air (yes, freaking SWORDS!).
Each individual’s part contributes to the whole. They move around the space together, always aware of what the others are doing, concentrating on their own movements and listening carefully to their coach. Over and over you hear them chant “1-2-3-4-5-6-7-8!” In five minutes, maybe less, it’s all over. And then they start again, because they can always make it better.
Welcome to winter guard!
So What the Heck Is It?
Winter guard is a dynamic indoor competitive sport, based on traditional color guard, but it has gone way beyond those military roots to become something unique and artistic. Whether it’s a part of a high school or university’s athletic and artistic curriculum, or organized by an independent group, it takes dedication, team work, and passion to create a winning routine. This is why it is called the “Sport of the Arts”.
It’s like a dance version of Glee: winter guard teams compete in regional, national, and international competitions organized by Winter Guard International (WGI), the sport’s governing body. There are also many regional groups (such as Texas Color Guard Circuit) that hosts their own competitions. While it began in the United States, and is still most popular there, there are now also teams competing in Canada, Great Britain, Korea, Belgium, Holland, Japan, and Africa.
Color Guard Roots
WGI Logo
WGI has been around since 1977, and a lot has changed since those early days. It has left behind its marching band beginnings to become a display of music and dance, wearing matching or coordinating costumes rather than military uniforms, while still using the traditional color guard elements of flags, rifles and sabres, all against the background of creatively designed backdrops and floor tarps.
Don’t Tell Them It’s Not Hard Work
Participation in this sport is rewarding, but it’s also hard work. The winter guard team must work in complete unison to present a flawless performance. That takes months of planning, practice, and polishing by dedicated individuals, who have to master a variety of challenging skills before they are ready for a competition.
Winter Guard Music and Dance
Winterguard is more than just flag throwing, there is dance and acrobatics as well.
One of the big differences between the indoor and outdoor versions of this sport is the music. Whereas a color guard ensemble often works with a marching band in the generous space available on the football field, in the more restricted confines indoors (usually a gym), recorded music is usually used. The music could be instrumental or even spoken word, but tend to be lyrical and evocative. This lends itself to the development of an interpretive dance that tells a story or creates a mood.
Dance is the element that ties the performance together. Just as a cheerleading squad must practice basic moves and then complicated routines, or a theater company has to rehearse a song and dance number over and over, a winter guard team works on elementary dance skills and then the performance of a specific routine until it is perfect. Much of the appeal of a winter guard performance is in the team members moving in a synchronized manner, and that means hours of practice in that school gym to get the moves just right.
The Equipment Used
Color guard is famous for its flags, and that element has come inside to winter guard. While early WGI rules required the use of the American Flag, today the teams use brightly colored silks in a variety of designs and sizes to better reflect the tone of the team’s presentation. They could be solid color or patterned, but the selection of the design is only the beginning.
Rifle, Sabre, Pole Rack
Rifle, Sabre, Pole Rack
The team then spends long hours practicing their flag techniques, so that in performance the audience will see a routine executed with flawless precision. Whether the flags are flipped or swirled all in unison, or in a succession of waves across the space, it is up to each and every team member to play his or her part so that the team acts as one. When it works, it’s like magic!
The military roots of winter guard are represented by the sabres and rifles that are a part of each routine. However, they are far removed from the real thing, becoming props for the performance rather than functioning weapons. Both are often wrapped in electrical tape and padded to make them safer and easier to handle in performance.
The “rifles” are now wooden models with straps, much lighter than actual weapons, and thus well-suited for complicated spinning routines. Once again, the team members have to develop both their individual rifle-handling skills and the coordination of their movements so that they create the desired picture for their audience. One dropped rifle- or one out of sync with the rest- will affect the whole group. Nobody wants to be that person!
Sabres can be made of metal or plastic, but metal ones are more common. The tips and blades are blunted to increase safety, but otherwise they look like real swords. The balance of a sabre is extremely important, as it will be tossed and twirled with great precision. They are not necessarily used by everyone on the team; they require a higher degree of skill and are usually reserved for the most experienced members. The dazzling display of silver-colored sabres cutting through the air adds excitement to a winter guard performance, and again takes many hours of practice to achieve perfection in execution.
Putting In The Hard Work
So, a winter guard team has been organized, its members selected, hours, days, months of practice have been completed, and it is time for competition. What does that entail?
Winterguard flag throwing practice makes perfect
The team travels to the site of the competition in their region. There they will be competing against other teams in their division, depending on whether they are school-affiliated or independent, and on their level of experience and skill. The order of performances is usually determined by a random draw.
The team has only a few minutes to set up for its performance. That means rolling out their tarp to cover the floor, bringing out set pieces, and putting up backdrops. Doing this quickly and effectively takes practice as well. Immediately following the performance, the team must then quickly clear the floor for the next competitors while the stopwatch ticks.
In between, the performance itself is around five minutes in length. This is the culmination of all those months of effort, and every team member is equally responsible for making it the best it can be. The performance is judged on talent, precision, creativity, and horizontal orchestration by a panel of judges. First, second, and third place will be selected in each division. The winners will be able to go on to the World Championship, where over 350 teams compete.
At the End of the Day
However, success is not just determined by a ribbon. Every winter guard team member gets an important grounding in various athletic and artistic skills. And yes, they have some fun, too in the process. As well, they learn how to work with others in a team, a skill that will help them in school and work for the rest of their lives. This is the true value of membership on a winter guard team.
Leave a Reply
By submitting this form, you are granting: ABI Digitial Solutions, 851 N. FM 3083 Rd. E, Conroe, TX, 77303, permission to email you. You may unsubscribe via the link found at the bottom of every email. (See our Email Privacy Policy ( for details.) Emails are serviced by Constant Contact.
|
Thursday, January 27, 2011
Going to the Neighbors' House
We have some dear older neighbors down the road, who don't get out much during the winter months due to illness, and snow. Last week we brought them a pan of fresh homemade cinnamon rolls, hot from our oven. In return, they grabbed the candy bowl from their coffee table that was halfway full of M&M's, and gave it to my husband, despite his protests. They're from that generation of folks that always wants to give something in return for kindness. It's tough to bless them, because no matter whether it's shoveling their driveway, raking pine needles, or mowing their yard, they find something from their house for us to take home. It can be pretty awkward.
They gave us a call this week to thank us for the dessert and ask for the recipe. They LOVE my children, so I decided to send them on an errand down the road to return the candy bowl, and give them a copy of the cinnamon roll recipe. Before the kids left, I combed their hair, made sure that their clothes matched, and gave them no less than 20 directions and orders. My instructions included, but were not limited to the following things:
"Look both ways before you cross the road."
"Hold hands with your sister."
"Don't crinkle the recipe."
"Hold the glass bowl with TWO hands!"
"Say, "Thank you" for the M & M's."
"Speak loud enough for them to hear you because they don't hear well."
"Look them in the eyes."
"Say, 'Hello Mr. and Mrs. ________'."
"Say 'please' and 'thank you' if they offer you a drink."
"Don't run down their driveway."
"Stay out of their yard."
"Be polite."
That's when it suddenly hit me like a rock. I've turned into my mother!!! I remember when I was a kid, my mom would have us deliver Christmas gifts to all of the neighbors. In addition to safety rules, she had my brother and I ring the doorbell and when the neighbors answered the door, we were to say the following thing in unison: "Merry Christmas from the __________'s." You can imagine how excited my little brother was to do this. Not. I carried the baked goods, and he carried the small potted pine tree saplings, and we trudged down the driveway to start our deliveries.
[They answer the door] "Why hello little ones!"
We choke out the rehearsed greeting completely out of unison, "Merry Christmas from the ________'s."
Then we hand over the goods, turn and sprint through their yard until we reach home.
I don't know why I've always remembered this, but now that I have kids of my own, it terrifies me to think that they may be the same way that I was.
So I watched them walk down the road from the front window of my living room. I was checking to see if they followed my instructions. So far, so good.
After they were out of sight, I waited. And waited. And waited. They were gone over ten minutes. I started to worry, but then I saw them sprinting down the road towards home.
When they walked in the front door, I started peppering them with questions:
"Were you kind?"
"Did you say thank you?"
"Did you speak loud enough for them to hear?"
Then I asked them to tell me the EXACT conversation they had with them.
Because, after Wednesday night's embarrassing moment with my youngest, I'm acutely aware of the fact that my children's mouths are loaded cannons, ready to spew out random facts and information that may cause me grief, and, or further explanation. Don't you just love kids?
In the end, everything went very well and I considered it to be just one more successful mission under their learning belts. I'm a firm believer that older folks and younger ones really need each other. My kids love doing things for our older neighbors, and my neighbors just adore having a good chat with my kids. I think it's a win-win situation for all.
No comments:
Post a Comment
|
// Copyright (c) OpenMMLab. All rights reserved.
#include "mmdeploy/codebase/mmocr/panet.h"
#include <algorithm>
#include <opencv2/opencv.hpp>
#include "mmdeploy/codebase/mmocr/mmocr.h"
#include "mmdeploy/core/device.h"
#include "mmdeploy/core/registry.h"
#include "mmdeploy/core/serialization.h"
#include "mmdeploy/core/utils/device_utils.h"
namespace mmdeploy {
namespace mmocr {
std::vector<std::vector<float>> pixel_group_cpu(const cv::Mat_<float>& score,
const cv::Mat_<uint8_t>& mask,
const cv::Mat_<float>& embedding,
const cv::Mat_<int32_t>& kernel_label,
const cv::Mat_<uint8_t>& kernel_contour,
int kernel_region_num, float dis_threshold);
class PANHead : public MMOCR {
public:
explicit PANHead(const Value& config) : MMOCR(config) {
if (config.contains("params")) {
auto& params = config["params"];
min_text_avg_confidence_ = params.value("min_text_avg_confidence", min_text_avg_confidence_);
min_kernel_confidence_ = params.value("min_kernel_confidence", min_kernel_confidence_);
min_text_avg_confidence_ = params.value("min_text_avg_confidence", min_text_avg_confidence_);
min_text_area_ = params.value("min_text_area", min_text_area_);
rescale_ = params.value("rescale", rescale_);
downsample_ratio_ = params.value("downsample_ratio", downsample_ratio_);
}
auto platform = Platform(device_.platform_id()).GetPlatformName();
auto creator = Registry<PaHeadImpl>::Get().GetCreator(platform);
if (!creator) {
MMDEPLOY_ERROR("PANHead: implementation for platform \"{}\" not found", platform);
throw_exception(eEntryNotFound);
}
impl_ = creator->Create(nullptr);
impl_->Init(stream_);
}
Result<Value> operator()(const Value& _data, const Value& _pred) noexcept {
OUTCOME_TRY(auto pred, MakeAvailableOnDevice(_pred["output"].get<Tensor>(), device_, stream_));
OUTCOME_TRY(stream_.Wait());
if (pred.shape().size() != 4 || pred.shape(0) != 1 || pred.data_type() != DataType::kFLOAT) {
MMDEPLOY_ERROR("unsupported `output` tensor, shape: {}, dtype: {}", pred.shape(),
(int)pred.data_type());
return Status(eNotSupported);
}
// drop batch dimension
pred.Squeeze(0);
auto text_pred = pred.Slice(0);
auto kernel_pred = pred.Slice(1);
auto embed_pred = pred.Slice(2, pred.shape(0));
cv::Mat_<float> text_score;
cv::Mat_<uint8_t> text;
cv::Mat_<uint8_t> kernel;
cv::Mat_<int> labels;
cv::Mat_<float> embed;
int region_num = 0;
OUTCOME_TRY(impl_->Process(text_pred, kernel_pred, embed_pred, min_text_confidence_,
min_kernel_confidence_, text_score, text, kernel, labels, embed,
region_num));
auto text_points = pixel_group_cpu(text_score, text, embed, labels, kernel, region_num,
min_text_avg_confidence_);
auto scale_w = _data["img_metas"]["scale_factor"][0].get<float>();
auto scale_h = _data["img_metas"]["scale_factor"][1].get<float>();
TextDetectorOutput output;
for (auto& text_point : text_points) {
auto text_confidence = text_point[0];
auto area = text_point.size() - 2;
if (filter_instance(static_cast<float>(area), text_confidence, min_text_area_,
min_text_avg_confidence_)) {
continue;
}
cv::Mat_<float> points(text_point.size() / 2 - 1, 2, text_point.data() + 2);
cv::RotatedRect rect = cv::minAreaRect(points);
std::vector<cv::Point2f> vertices(4);
rect.points(vertices.data());
if (rescale_) {
for (auto& p : vertices) {
p.x /= scale_w * downsample_ratio_;
p.y /= scale_h * downsample_ratio_;
}
}
auto& bbox = output.boxes.emplace_back();
for (int i = 0; i < 4; ++i) {
bbox[i * 2] = vertices[i].x;
bbox[i * 2 + 1] = vertices[i].y;
}
output.scores.push_back(text_confidence);
}
return to_value(output);
}
static bool filter_instance(float area, float confidence, float min_area, float min_confidence) {
return area < min_area || confidence < min_confidence;
}
float min_text_confidence_{.5f};
float min_kernel_confidence_{.5f};
float min_text_avg_confidence_{0.85};
float min_text_area_{16};
bool rescale_{true};
float downsample_ratio_{.25f};
std::unique_ptr<PaHeadImpl> impl_;
};
REGISTER_CODEBASE_COMPONENT(MMOCR, PANHead);
} // namespace mmocr
MMDEPLOY_DEFINE_REGISTRY(mmocr::PaHeadImpl);
} // namespace mmdeploy
|
#ifndef TAG_HH
#define TAG_HH
#include <algorithm>
#include <map>
#include <string>
using std::map;
using std::string;
enum TagType {
AREA,
BASE,
BASEFONT,
BGSOUND,
BR,
COL,
COMMAND,
EMBED,
FRAME,
HR,
IMAGE,
IMG,
INPUT,
ISINDEX,
KEYGEN,
LINK,
MENUITEM,
META,
NEXTID,
PARAM,
SOURCE,
TRACK,
WBR,
END_OF_VOID_TAGS,
A,
ABBR,
ADDRESS,
ARTICLE,
ASIDE,
AUDIO,
B,
BDI,
BDO,
BLOCKQUOTE,
BODY,
BUTTON,
CANVAS,
CAPTION,
CITE,
CODE,
COLGROUP,
DATA,
DATALIST,
DD,
DEL,
DETAILS,
DFN,
DIALOG,
DIV,
DL,
DT,
EM,
FIELDSET,
FIGCAPTION,
FIGURE,
FOOTER,
FORM,
H1,
H2,
H3,
H4,
H5,
H6,
HEAD,
HEADER,
HGROUP,
HTML,
I,
IFRAME,
INS,
KBD,
LABEL,
LEGEND,
LI,
MAIN,
MAP,
MARK,
MATH,
MENU,
METER,
NAV,
NOSCRIPT,
OBJECT,
OL,
OPTGROUP,
OPTION,
OUTPUT,
P,
PICTURE,
PRE,
PROGRESS,
Q,
RB,
RP,
RT,
RTC,
RUBY,
S,
SAMP,
SCRIPT,
SECTION,
SELECT,
SLOT,
SMALL,
SPAN,
STRONG,
STYLE,
SUB,
SUMMARY,
SUP,
SVG,
TABLE,
TBODY,
TD,
TEMPLATE,
TEXTAREA,
TFOOT,
TH,
THEAD,
TIME,
TITLE,
TR,
U,
UL,
VAR,
VIDEO,
CUSTOM,
};
// Get the string->TagType match for the above enums
static const map<string, TagType> get_tag_map() {
map<string, TagType> result;
#define TAG(name) result[#name] = name
TAG(AREA);
TAG(BASE);
TAG(BASEFONT);
TAG(BGSOUND);
TAG(BR);
TAG(COL);
TAG(COMMAND);
TAG(EMBED);
TAG(FRAME);
TAG(HR);
TAG(IMAGE);
TAG(IMG);
TAG(INPUT);
TAG(ISINDEX);
TAG(KEYGEN);
TAG(LINK);
TAG(MENUITEM);
TAG(META);
TAG(NEXTID);
TAG(PARAM);
TAG(SOURCE);
TAG(TRACK);
TAG(WBR);
TAG(A);
TAG(ABBR);
TAG(ADDRESS);
TAG(ARTICLE);
TAG(ASIDE);
TAG(AUDIO);
TAG(B);
TAG(BDI);
TAG(BDO);
TAG(BLOCKQUOTE);
TAG(BODY);
TAG(BUTTON);
TAG(CANVAS);
TAG(CAPTION);
TAG(CITE);
TAG(CODE);
TAG(COLGROUP);
TAG(DATA);
TAG(DATALIST);
TAG(DD);
TAG(DEL);
TAG(DETAILS);
TAG(DFN);
TAG(DIALOG);
TAG(DIV);
TAG(DL);
TAG(DT);
TAG(EM);
TAG(FIELDSET);
TAG(FIGCAPTION);
TAG(FIGURE);
TAG(FOOTER);
TAG(FORM);
TAG(H1);
TAG(H2);
TAG(H3);
TAG(H4);
TAG(H5);
TAG(H6);
TAG(HEAD);
TAG(HEADER);
TAG(HGROUP);
TAG(HTML);
TAG(I);
TAG(IFRAME);
TAG(INS);
TAG(KBD);
TAG(LABEL);
TAG(LEGEND);
TAG(LI);
TAG(MAIN);
TAG(MAP);
TAG(MARK);
TAG(MATH);
TAG(MENU);
TAG(METER);
TAG(NAV);
TAG(NOSCRIPT);
TAG(OBJECT);
TAG(OL);
TAG(OPTGROUP);
TAG(OPTION);
TAG(OUTPUT);
TAG(P);
TAG(PICTURE);
TAG(PRE);
TAG(PROGRESS);
TAG(Q);
TAG(RB);
TAG(RP);
TAG(RT);
TAG(RTC);
TAG(RUBY);
TAG(S);
TAG(SAMP);
TAG(SCRIPT);
TAG(SECTION);
TAG(SELECT);
TAG(SLOT);
TAG(SMALL);
TAG(SPAN);
TAG(STRONG);
TAG(STYLE);
TAG(SUB);
TAG(SUMMARY);
TAG(SUP);
TAG(SVG);
TAG(TABLE);
TAG(TBODY);
TAG(TD);
TAG(TEMPLATE);
TAG(TEXTAREA);
TAG(TFOOT);
TAG(TH);
TAG(THEAD);
TAG(TIME);
TAG(TITLE);
TAG(TR);
TAG(U);
TAG(UL);
TAG(VAR);
TAG(VIDEO);
#undef TAG
return result;
}
static const map<string, TagType> TAG_TYPES_BY_TAG_NAME = get_tag_map();
static const TagType TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS[] = {
ADDRESS, ARTICLE, ASIDE, BLOCKQUOTE, DETAILS, DIV, DL,
FIELDSET, FIGCAPTION, FIGURE, FOOTER, FORM, H1, H2,
H3, H4, H5, H6, HEADER, HR, MAIN,
NAV, OL, P, PRE, SECTION,
};
static const TagType *TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS_END =
(TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS +
sizeof(TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS) / sizeof(TagType));
struct Tag {
TagType type;
string custom_tag_name;
// This default constructor is used in the case where there is not enough
// space in the serialization buffer to store all of the tags. In that case,
// tags that cannot be serialized will be treated as having an unknown type.
// These tags will be closed via implicit end tags regardless of the next
// closing tag is encountered.
Tag() : type(END_OF_VOID_TAGS) {}
Tag(TagType type, const string &name) : type(type), custom_tag_name(name) {}
bool operator==(const Tag &other) const {
if (type != other.type)
return false;
if (type == CUSTOM && custom_tag_name != other.custom_tag_name)
return false;
return true;
}
inline bool is_void() const { return type < END_OF_VOID_TAGS; }
// if Tag can contain other tags
inline bool can_contain(const Tag &tag) {
TagType child = tag.type;
switch (type) {
case LI:
return child != LI;
case DT:
case DD:
return child != DT && child != DD;
case P:
return std::find(TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS,
TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS_END,
tag.type) == TAG_TYPES_NOT_ALLOWED_IN_PARAGRAPHS_END;
case COLGROUP:
return child == COL;
case RB:
case RT:
case RP:
return child != RB && child != RT && child != RP;
case OPTGROUP:
return child != OPTGROUP;
case TR:
return child != TR;
case TD:
case TH:
return child != TD && child != TH && child != TR;
default:
return true;
}
}
// TODO: no idea what it does
static inline Tag for_name(const string &name) {
map<string, TagType>::const_iterator type =
TAG_TYPES_BY_TAG_NAME.find(name);
if (type != TAG_TYPES_BY_TAG_NAME.end()) {
return Tag(type->second, string());
} else {
return Tag(CUSTOM, name);
}
}
};
#endif
|
/* Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// See docs in ../ops/ctc_ops.cc.
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/bounds_check.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/ctc/ctc_loss_calculator.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
#ifdef _MSC_VER
#undef max
#endif
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
class CTCLossOp : public OpKernel {
typedef Eigen::Map<const Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic,
Eigen::RowMajor> >
InputMap;
typedef Eigen::Map<
Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> >
OutputMap;
public:
explicit CTCLossOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("preprocess_collapse_repeated",
&preprocess_collapse_repeated_));
OP_REQUIRES_OK(ctx,
ctx->GetAttr("ctc_merge_repeated", &ctc_merge_repeated_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor* inputs;
const Tensor* labels_indices;
const Tensor* labels_values;
const Tensor* seq_len;
OP_REQUIRES_OK(ctx, ctx->input("inputs", &inputs));
OP_REQUIRES_OK(ctx, ctx->input("labels_indices", &labels_indices));
OP_REQUIRES_OK(ctx, ctx->input("labels_values", &labels_values));
OP_REQUIRES_OK(ctx, ctx->input("sequence_length", &seq_len));
OP_REQUIRES(ctx, inputs->shape().dims() == 3,
errors::InvalidArgument("inputs is not a 3-Tensor"));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(seq_len->shape()),
errors::InvalidArgument("sequence_length is not a vector"));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(labels_indices->shape()),
errors::InvalidArgument("labels_indices is not a matrix"));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(labels_values->shape()),
errors::InvalidArgument("labels_values is not a vector"));
const TensorShape& inputs_shape = inputs->shape();
const int64 max_time = inputs_shape.dim_size(0);
const int64 batch_size = inputs_shape.dim_size(1);
const int64 num_classes_raw = inputs_shape.dim_size(2);
OP_REQUIRES(
ctx, FastBoundsCheck(num_classes_raw, std::numeric_limits<int>::max()),
errors::InvalidArgument("num_classes cannot exceed max int"));
const int num_classes = static_cast<const int>(num_classes_raw);
OP_REQUIRES(
ctx, batch_size == seq_len->dim_size(0),
errors::InvalidArgument("len(sequence_length) != batch_size. ",
"len(sequence_length): ", seq_len->dim_size(0),
" batch_size: ", batch_size));
auto seq_len_t = seq_len->vec<int32>();
OP_REQUIRES(ctx, labels_indices->dim_size(0) == labels_values->dim_size(0),
errors::InvalidArgument(
"labels_indices and labels_values must contain the "
"same number of rows, but saw shapes: ",
labels_indices->shape().DebugString(), " vs. ",
labels_values->shape().DebugString()));
TensorShape labels_shape({batch_size, max_time});
std::vector<int64> order{0, 1};
sparse::SparseTensor labels_sp(*labels_indices, *labels_values,
labels_shape, order);
Status labels_sp_valid = labels_sp.IndicesValid();
OP_REQUIRES(ctx, labels_sp_valid.ok(),
errors::InvalidArgument("label SparseTensor is not valid: ",
labels_sp_valid.error_message()));
ctc::CTCLossCalculator::LabelSequences labels_t(batch_size);
for (const auto& g : labels_sp.group({0})) { // iterate by batch
const int64 batch_indices = g.group()[0];
OP_REQUIRES(ctx, FastBoundsCheck(batch_indices, batch_size),
errors::InvalidArgument("labels batch index must be between ",
0, " and ", batch_size, " but saw: ",
batch_indices));
auto values = g.values<int32>();
std::vector<int>* b_values = &labels_t[batch_indices];
b_values->resize(values.size());
for (int i = 0; i < values.size(); ++i) (*b_values)[i] = values(i);
}
OP_REQUIRES(ctx, static_cast<size_t>(batch_size) == labels_t.size(),
errors::InvalidArgument("len(labels) != batch_size. ",
"len(labels): ", labels_t.size(),
" batch_size: ", batch_size));
for (int64 b = 0; b < batch_size; ++b) {
OP_REQUIRES(
ctx, seq_len_t(b) <= max_time,
errors::InvalidArgument("sequence_length(", b, ") <= ", max_time));
}
Tensor* loss = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("loss", seq_len->shape(), &loss));
auto loss_t = loss->vec<float>();
Tensor* gradient;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("gradient", inputs_shape, &gradient));
auto gradient_t = gradient->tensor<float, 3>();
auto inputs_t = inputs->tensor<float, 3>();
std::vector<OutputMap> gradient_list_t;
std::vector<InputMap> input_list_t;
for (std::size_t t = 0; t < max_time; ++t) {
input_list_t.emplace_back(inputs_t.data() + t * batch_size * num_classes,
batch_size, num_classes);
gradient_list_t.emplace_back(
gradient_t.data() + t * batch_size * num_classes, batch_size,
num_classes);
}
gradient_t.setZero();
// Assumption: the blank index is num_classes - 1
ctc::CTCLossCalculator ctc_loss_calculator(num_classes - 1, 0);
OP_REQUIRES_OK(ctx, ctc_loss_calculator.CalculateLoss(
seq_len_t, labels_t, input_list_t,
preprocess_collapse_repeated_, ctc_merge_repeated_,
&loss_t, &gradient_list_t));
}
private:
bool preprocess_collapse_repeated_;
bool ctc_merge_repeated_;
TF_DISALLOW_COPY_AND_ASSIGN(CTCLossOp);
};
REGISTER_KERNEL_BUILDER(Name("CTCLoss").Device(DEVICE_CPU), CTCLossOp);
} // end namespace tensorflow
|
Pollution and autism spectrum disorder
Monday 7 April 2014 5:30PM (view full episode)
Research from Harvard University suggests that autism spectrum disorder, which affects perhaps about 1 in 100 Australian children and, some say, is increasing in prevalence, may be partly caused by air pollutants. This can have enormous implications for housing near roads and industry.
Norman Swan: Hello from me, Norman Swan, and welcome to the Health Report. Today: Unlocking the secrets of the brain, new discoveries of tiny molecular machines in our nervous system which might hold the answers for problems like autism and schizophrenia; and speaking of autism … a link to air pollution. Research is suggesting that children’s brain development is put at risk with increased chances of autism spectrum disorder if their mothers were exposed to excessive air pollution during pregnancy.
The autism spectrum goes from a child with severe behavioural, language and intellectual disabilities, through to quite well functioning people but who nonetheless have trouble relating to others, reading their emotions, and expressing themselves. This isn’t some fringe finding; it comes from Harvard School of Public Health where Marc Weisskopf is Associate Professor of Occupational and Environmental Epidemiology.
Marc Weisskopf: Air pollution has been studied in relation to many things. In particular cardiovascular and respiratory symptoms; obviously brain development and exposure to the mother is a different issue. There has been a literature that’s evolved and has grown around effects of air pollution on birth outcomes such as birth weight, prematurity, things of that sort; but the idea that it might affect autism is relatively new and quite frankly I got into it because of a report out of California, suggesting in a study, in the sort of Bay area of California, that mothers who were in census tracts that had higher estimated pollutant levels, had a higher risk of giving birth to a child with autism.
Norman Swan: Who were living in areas where there are lots of freeways, lots of trucks passing by, diesel fumes and things like that?
Marc Weisskopf: Yes, this was based on an EPA model, or EPA models of different pollutants.
Norman Swan: Environmental Protection Agency.
Marc Weisskopf: Environmental Protection Agency models that takes into account many factors—traffic, other point sources from industry, weather patterns, things of that sort. But they did find something that I was initially sceptical of because I thought there’s too much error in the way they were assessing how a mother was exposed.
Norman Swan: Well precisely, I mean, if you’re living in one of those areas with lots of trucks going by and maybe under a flight path to an airport, then you probably don’t have as much money to live in a fancier neighbourhood which is away from the freeway and the airline routes.
Marc Weisskopf: That’s right. So there are huge opportunities for what, in epidemiology we call confounding, which is basically another variable that is explaining what you’re seeing. They try to address that by adjusting for different factors that are related to socioeconomics or aspects of that sort, and they did it to the best of their ability, and they did it reasonably well, as far as these studies go; that might explain it. But the other issue is just that with an estimate based on these models from the EPA that are not great, they can give you general ideas of who happens to be a little higher, a little lower, but they’re not terribly accurate and so my initial reaction was, when you have that kind of error in how you’re saying who’s exposed and who’s not, you tend not to see things; so the fact that they saw something was sort of surprising.
Norman Swan: What have you done?
Marc Weisskopf: We wanted to build on that and we, at Harvard, have these very large cohorts that are across the United States, the Nurses’ Health Study in particular, the Nurses Health Study II.
Norman Swan: And when you’re saying cohort, what you’re saying is that you’ve gathered a large group of people …
Marc Weisskopf: Yes.
Norman Swan: … in this case they were nurses and all female …
Marc Weisskopf: That’s right.
Norman Swan: … and you’ve been following them now for 20 or 30 years to see what their health …
Marc Weisskopf: Exactly.
Norman Swan: … turns out to be. You know lots about them.
Marc Weisskopf: We know lots about them. This was started in 1989. It was over 100,000 nurses at the time. It’s focussed on the health of the nurse. So we had to, later on … in the course of … they get followed every 2 years. They answer extensive questionnaires and we track where they are and what they’re doing and in 2005 we asked them if they had any children with autism and we went through a process to validate the ones who said they did …
Norman Swan: This is full blown autism, rather than autism spectrum disorder where they might just have a bit of communication difficulty?
Marc Weisskopf: I said ‘autism’, I should have said autism spectrum disorder because we specifically asked whether they had a child with either autism, Asperger’s or pervasive developmental disorder. So we were capturing a broader spectrum and it’s the maternal report. Now they’re all nurses, so would some of them potentially just have behaviours that are a little bit more extreme than normal but don’t meet true definition of autism? It’s possible; and the way we check that is we took a random sample of these mothers and we administered what’s considered a gold standard autism interview—the Autism Diagnostic Interview—and got very excellent agreement. Over 95 per cent met full autism … autism spectrum disorder, criteria.
Norman Swan: So if one of these mothers who had been a nurse told you that her child with autism spectrum disorder, they did indeed have a child with autism spectrum disorder.
Marc Weisskopf: Yes.
Norman Swan: So how did you assess the exposure to air pollution?
Marc Weisskopf: Really we started by using the exact same models that they used in the California study. We had berths over a much wider period. The Environmental Protection Agency puts out a few of these—there are four of them in different years. They average over the year, they’re at the level of the census tract, and we simply compared the mothers who had reported a child with autism compared her where she was at the time of the pregnancy, what the census tract levels of different pollutants were for her compared to the women who said they had no children with autism.
To my mind, somewhat surprisingly actually, we found results that were strikingly similar to what they saw in California. So I, at that point, went from being mildly sceptical to thinking there was something worth pursuing here.
Norman Swan: Now there are other ways of checking whether something’s cause and effect or just some sort of association and it’s something else that you’re observing rather than just the air pollution. So when it’s dosed, if this is cause and effect, the more air pollution they experienced, the higher the risk of a child with autism spectrum disorder. Did you find a dose effect?
Marc Weisskopf: We did. Though that said, one has to be a little careful because doses—there can be threshold effects, they don’t have to always be linear relations with the outcome. We, in general, found as you got higher with those estimates, you had a higher risk of autism spectrum disorders. It wasn’t perfect …
Norman Swan: And were you able to measure, for example, the exposure in pregnancy to the exposure in the first years of life?
Marc Weisskopf: That’s what I’ve been working on now, in fact. That original study used these exposure models that estimated over an entire year; and only in four different years. So for some of our children, we were assigning an estimated exposure from say 1999 to a child who’s mother was pregnant in 1998. So what we really wanted to do was say, well let’s now … it looks like something’s there. Let’s go after this with a much finer resolution tool; something where we have finer spatial and temporal resolution. We don’t have that for all of the pollutants that the EPA has in their models but one that we do have very good data on is particulate matter, which is basically just stuff in the air. We can size fractionate it, that is, we can look at small stuff that’s less than two and a half microns in diameter, we can look at the bigger stuff that’s up to ten microns in diameter; and we have very extensive models that the Nurses’ Health Study researchers, many of them have used, for example, to link air pollution to cardiovascular disease, to respiratory problems, to mortality in general.
It’s based on, again, EPA monitors that are around the entire country, meteorological factors; it’s a model then that takes those monitor data and incorporates meteorological conditions and incorporates land use patterns like traffic or other point sources; and it gets down to a resolution of a month—you could go even finer—but we have it as a month at a time.
So now we can look specifically at the pregnancy or even trimesters of pregnancy. We also have it on the spatial level that now we know it at her address.
Norman Swan: That’s extraordinary. What did you find?
Marc Weisskopf: We initially looked at the nine months. So we averaged over the nine months of the pregnancy, we considered the nine months prior to that, and the nine months after that … so when the child was born. And what we found was a significant association between higher particulate matter that was less than 2.5 microns, so not with the larger stuff, but less than 2.5, for that pregnancy period. Again in a dose response fashion; and we then went further and said, okay, if it’s the pregnancy period, when in the pregnancy period? And we broke it out again by trimester of pregnancy and found results that looked much more dramatic for the third trimester of pregnancy.
Norman Swan: For the third trimester? With brain development, wouldn’t you expect the second trimester?
Marc Weisskopf: It depends on which part of the brain you’re talking about. But certainly the third trimester you have a lot of neural development, you have a lot of synapse formation, you have a lot of growth going on.
Norman Swan: So in summary then, what you’ve found is that, when you really boil this down, these women are getting exposed in the third trimester, the last 3 months of pregnancy, thereabouts …
Marc Weisskopf: Right.
Norman Swan: … and they’re getting exposed to really tiny particles, which are those also that seem to be associated with heart disease and other problems …
Marc Weisskopf: That’s right.
Norman Swan: … tend to come from traffic, don’t they, these small particles?
Marc Weisskopf: That’s an issue. These small particles have many sources and those sources can in fact vary even across different areas in the United States. So between Australia and the United States, they could be vastly different; and even we know on the west coast, for example, they have a very different pattern than on the east coast because of a lot of the winds come over the water.
So we don’t yet know which aspect of—and this is one of the things we’re trying to pursue now is—what element of the particulate matter, if there is a particular one within that, that matters? Or is it just anything that happens to be two and a half?
Norman Swan: What could be—because the next step …cause and effect—so you’ve shown the time element, you’ve shown the exposure, you’ve shown that the story is related—so it’s getting …
pretty close to cause and effect, but the killer one is, what could possibly be the biological reasons? There are plenty associations between pollutants and disease …
Marc Weisskopf: Right.
Norman Swan: … but they fall down when it comes to the biology because you can’t work out …
Marc Weisskopf: How that can …
Norman Swan: … a sensible biological explanation for why this would occur because you’ve got the lungs to filter these particles, you’ve got the placenta. You know the mother is designed to protect the foetus from lots of rubbish, why would these pollutants make a difference?
Marc Weisskopf: That is of course the 64,000 dollar question. We don’t know but we would like to know. What we do know is that exposure to these particles can cause things like immune dysfunction in the mother, inflammatory processes to be set off in the mother and these things can potentially pass through to the foetus and potentially affect the foetus as well.
Norman Swan: So it might be indirect rather than direct?
Marc Weisskopf: We think it’s probably indirect. There’s a lot of interest right now on these very fine particles and the possibility in brain effects in general because in part of the possibility that they may actually go directly to the brain through the olfactory valve, skipping the lungs and circulatory system.
Norman Swan: So you smell them in? They go into your nose and at the top of your nose there’s actually brain tissue at the top of your nose, and so it just might hit your brain tissue and …
Marc Weisskopf: Go right in, yes.
Norman Swan: That’s the mother and not the baby.
Marc Weisskopf: Exactly. So that’s the mother. So I … given that our findings are quite specific to this in utero period, I don’t suspect that’s happening; I suspect that it is an exposure to mother that is a more typical effect that engages some type of biological activity in the circulatory system that then gets passed to the placenta, to the foetus, to affect the development. Or perhaps affect simply oxygenation status, whatever it might be in terms of the placental health.
Norman Swan: Can you give a sense of the increased risk associated with living with exposure, you know, there’s a background risk of having a child with autism spectrum disorder, it is rare, and what’s the increased risk?
Marc Weisskopf: Right. So it is quite rare although of course there’s the whole idea that it’s getting more and more common. Right now in the US it’s, I think, one child in 88 is said to have an autism spectrum disorder. It’s less than that for boys; it’s much more common in boys. What we find, in our data, for what it’s worth is, when you go from the bottom 25 per cent to the top 25 per cent of exposure to this type of particulate matter, you increase the risk for having a child with autism by about 60 per cent, 50-60 per cent, something like that.
Norman Swan: So it’s not huge. It’s 1.6 times the size.
Marc Weisskopf: So it’s not huge, it’s small. That’s right.
Norman Swan: So people don’t need to panic too much about if they’re living in an area with reasonable pollution. You’d expect other effects here. You’d expect perhaps increased risk of premature delivery in pollution because it could affect placenta and things like that. Is there other stuff with the foetus that’s going on here that complicates the story?
Marc Weisskopf: Yes. You would expect other effects and in fact, as I alluded to earlier that there’s in fact a little earlier literature on that in fact with some very elegant studies suggesting indeed that this particular matter exposure or traffic related exposures can decrease birth weight or prematurity as well. That is out there prior to this autism literature. We have to be concerned about that because those things may be also related to autism so one question is, is what we’re seeing the result of changes in birth weight more risk of prematurity and therefore more autism?
One of the beauties of the Nurses’ Health Study is we have lots of data on these women and we have information on that and we’ve adjusted for it and it doesn’t affect our results; we still seem to see this independent of any effect on prematurity or birth weight.
Norman Swan: Now even though the Nurses’ Health Study is 120,000 women followed for many years, it’s still a small sample in population terms.
Marc Weisskopf: What you say is absolutely right. I should point out quite clearly that even though the Nurses’ Health Study is 100,000 people, they don’t all have children and they don’t all have children with autism and they don’t all have children with autism during the period where we have estimates for their exposures to particulate matter.
Norman Swan: So the sample …
Marc Weisskopf: So in the end, the sample was around 300 kids with autism and a couple thousand or three thousand kids without so it’s bigger than things that are out there but yes we would love to do a whole population study. I am exploring this now in Israel, in fact, looking at the entire population and trying to get similar data there.
Norman Swan: So if a woman lives near a freeway or in an industrial zone, what does she do with this information?
Marc Weisskopf: I guess part of this is where it comes in to the point you raised that the risk is raised but it’s still reasonably small so keep that in mind and don’t panic. I would say that if you want to be particularly careful or if you … if you have the means, it would be great to move somewhere else during that period; and I understand not everybody can do that. I mean, what I will say is that as these results get stronger and stronger—and there are other groups looking at this too—it does open up precisely that avenue for either public health interventions on a larger political entity scale like regulation to bring this stuff down, or individual behaviour change where yes you do move during critical periods or get away somehow. Now, obviously not everybody can do that but if we can understand better exactly what components are perhaps we can devise ways that, you know, you can protect yourself more in the home. I don’t think we’re there yet but that’s part of the idea behind this is we would like to have enough information so we can make policy changes, make behaviour change suggestions that would reduce the risk of having a child with autism.
Norman Swan: Marc Weisskopf is Associate Professor of Occupational and Environmental Epidemiology at Harvard School of Public Health in Boston.
Associate Professor Marc Weisskopf
Associate Professor of Environmental and Occupational Epidemiology
Harvard University School of Public Health
Dr Norman Swan
Brigitte Seega
|
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
#include <cmath>
#include <map>
using namespace std;
int threads = 4;
/**
Projeto 01 - Implementação Paralela de uma Técnica de Inteligência Artificial
i) Tempo gasto para processamento no parcode, sem paralelização, usando o dataset data_final: entre 10 e 11s (real time)
ii) Código aberto utilizado para a versão sequencial. Pode ser encontrado aqui: https://github.com/lucasheriques/DecisionTree
iii) Mudanças realizadas usando o OpenMP estão comentadas
iv) Tempos de execução:
Sequencial: entre 10 e 11s
Paralela 1 thread:
Paralela 2 thread:
Paralela 4 thread:
Paralela 8 thread:
*/
class Table {
public:
vector<string> attrName;
vector<vector<string> > data;
vector<vector<string> > attrValueList;
void extractAttrValue() {
attrValueList.resize(attrName.size());
for(int j=0; j<attrName.size(); j++) {
map<string, int> value;
#pragma omp parallel for num_threads(threads)
for(int i=0; i<data.size(); i++) {
value[data[i][j]]=1;
}
for(auto iter=value.begin(); iter != value.end(); iter++) {
attrValueList[j].push_back(iter->first);
}
}
}
};
class Node {
public:
int criteriaAttrIndex;
string attrValue;
int treeIndex;
bool isLeaf;
string label;
vector<int > children;
Node() {
isLeaf = false;
}
};
class DecisionTree {
public:
Table initialTable;
vector<Node> tree;
DecisionTree(Table table) {
initialTable = table;
initialTable.extractAttrValue();
Node root;
root.treeIndex=0;
tree.push_back(root);
run(initialTable, 0);
printTree(0, "");
cout<< "<-- finish generating decision tree -->" << endl << endl;
}
string guess(vector<string> row) {
string label = "";
int leafNode = dfs(row, 0);
if(leafNode == -1) {
return "dfs failed";
}
label = tree[leafNode].label;
return label;
}
int dfs(vector<string>& row, int here) {
if(tree[here].isLeaf) {
return here;
}
int criteriaAttrIndex = tree[here].criteriaAttrIndex;
for(int i=0;i<tree[here].children.size(); i++) {
int next = tree[here].children[i];
if (row[criteriaAttrIndex] == tree[next].attrValue) {
return dfs(row, next);
}
}
return -1;
}
void run(Table table, int nodeIndex) {
if(isLeafNode(table) == true) {
tree[nodeIndex].isLeaf = true;
tree[nodeIndex].label = table.data.back().back();
return;
}
int selectedAttrIndex = getSelectedAttribute(table);
map<string, vector<int> > attrValueMap;
for(int i=0;i<table.data.size();i++) {
attrValueMap[table.data[i][selectedAttrIndex]].push_back(i);
}
tree[nodeIndex].criteriaAttrIndex = selectedAttrIndex;
pair<string, int> majority = getMajorityLabel(table);
if((double)majority.second/table.data.size() > 0.8) {
tree[nodeIndex].isLeaf = true;
tree[nodeIndex].label = majority.first;
return;
}
for(int i=0;i< initialTable.attrValueList[selectedAttrIndex].size(); i++) {
string attrValue = initialTable.attrValueList[selectedAttrIndex][i];
Table nextTable;
vector<int> candi = attrValueMap[attrValue];
for(int i=0;i<candi.size(); i++) {
nextTable.data.push_back(table.data[candi[i]]);
}
Node nextNode;
nextNode.attrValue = attrValue;
nextNode.treeIndex = (int)tree.size();
tree[nodeIndex].children.push_back(nextNode.treeIndex);
tree.push_back(nextNode);
// for empty table
if(nextTable.data.size()==0) {
nextNode.isLeaf = true;
nextNode.label = getMajorityLabel(table).first;
tree[nextNode.treeIndex] = nextNode;
} else {
run(nextTable, nextNode.treeIndex);
}
}
}
double getEstimatedError(double f, int N) {
double z = 0.69;
if(N==0) {
cout << ":: getEstimatedError :: N is zero" << endl;
exit(0);
}
return (f+z*z/(2*N)+z*sqrt(f/N-f*f/N+z*z/(4*N*N)))/(1+z*z/N);
}
pair<string, int> getMajorityLabel(Table table) {
string majorLabel = "";
int majorCount = 0;
map<string, int> labelCount;
for(int i=0;i< table.data.size(); i++) {
labelCount[table.data[i].back()]++;
if(labelCount[table.data[i].back()] > majorCount) {
majorCount = labelCount[table.data[i].back()];
majorLabel = table.data[i].back();
}
}
return {majorLabel, majorCount};
}
bool isLeafNode(Table table) {
for(int i=1;i < table.data.size();i++) {
if(table.data[0].back() != table.data[i].back()) {
return false;
}
}
return true;
}
int getSelectedAttribute(Table table) {
int maxAttrIndex = -1;
double maxAttrValue = 0.0;
// except label
#pragma omp parallel for num_threads(threads)
for(int i=0; i< initialTable.attrName.size()-1; i++) {
if(maxAttrValue < getGainRatio(table, i)) {
maxAttrValue = getGainRatio(table, i);
maxAttrIndex = i;
}
}
return maxAttrIndex;
}
double getGainRatio(Table table, int attrIndex) {
return getGain(table, attrIndex)/getSplitInfoAttrD(table, attrIndex);
}
double getInfoD(Table table) {
double ret = 0.0;
int itemCount = (int)table.data.size();
map<string, int> labelCount;
for(int i=0;i<table.data.size();i++) {
labelCount[table.data[i].back()]++;
}
for(auto iter=labelCount.begin(); iter != labelCount.end(); iter++) {
double p = (double)iter->second/itemCount;
ret += -1.0 * p * log(p)/log(2);
}
return ret;
}
double getInfoAttrD(Table table, int attrIndex) {
double ret = 0.0;
int itemCount = (int)table.data.size();
map<string, vector<int> > attrValueMap;
#pragma omp parallel for num_threads(threads)
for(int i=0;i<table.data.size();i++) {
attrValueMap[table.data[i][attrIndex]].push_back(i);
}
for(auto iter=attrValueMap.begin(); iter != attrValueMap.end(); iter++) {
Table nextTable;
for(int i=0;i<iter->second.size(); i++) {
nextTable.data.push_back(table.data[iter->second[i]]);
}
int nextItemCount = (int)nextTable.data.size();
ret += (double)nextItemCount/itemCount * getInfoD(nextTable);
}
return ret;
}
double getGain(Table table, int attrIndex) {
return getInfoD(table)-getInfoAttrD(table, attrIndex);
}
double getSplitInfoAttrD(Table table, int attrIndex) {
double ret = 0.0;
int itemCount = (int)table.data.size();
map<string, vector<int> > attrValueMap;
#pragma omp parallel for num_threads(threads)
for(int i=0;i<table.data.size();i++) {
attrValueMap[table.data[i][attrIndex]].push_back(i);
}
for(auto iter=attrValueMap.begin(); iter != attrValueMap.end(); iter++) {
Table nextTable;
for(int i=0;i<iter->second.size(); i++) {
nextTable.data.push_back(table.data[iter->second[i]]);
}
int nextItemCount = (int)nextTable.data.size();
double d = (double)nextItemCount/itemCount;
ret += -1.0 * d * log(d) / log(2);
}
return ret;
}
/*
* Enumerates through all the nodes of the tree and prints all the branches
*/
void printTree(int nodeIndex, string branch) {
if (tree[nodeIndex].isLeaf == true)
cout << branch << "Label: " << tree[nodeIndex].label << "\n";
#pragma omp parallel for num_threads(threads)
for(int i = 0; i < tree[nodeIndex].children.size(); i++) {
int childIndex = tree[nodeIndex].children[i];
string attributeName = initialTable.attrName[tree[nodeIndex].criteriaAttrIndex];
string attributeValue = tree[childIndex].attrValue;
printTree(childIndex, branch + attributeName + " = " + attributeValue + ", ");
}
}
};
class InputReader {
private:
ifstream fin;
Table table;
public:
InputReader(string filename) {
fin.open(filename);
if(!fin) {
cout << filename << " file could not be opened\n";
exit(0);
}
parse();
}
void parse() {
string str;
bool isAttrName = true;
while(!getline(fin, str).eof()){
vector<string> row;
int pre = 0;
for(int i=0;i<str.size();i++){
if(str[i] == '\t') {
string col = str.substr(pre, i-pre);
row.push_back(col);
pre = i+1;
}
}
string col = str.substr(pre, str.size()-pre-1);
row.push_back(col);
if(isAttrName) {
table.attrName = row;
isAttrName = false;
} else {
table.data.push_back(row);
}
}
}
Table getTable() {
return table;
}
};
class OutputPrinter {
private:
ofstream fout;
public:
OutputPrinter(string filename) {
fout.open(filename);
if(!fout) {
cout << filename << " file could not be opened\n";
exit(0);
}
}
string joinByTab(vector<string> row) {
string ret = "";
#pragma omp parallel for num_threads(threads)
for(int i=0; i< row.size(); i++) {
ret += row[i];
if(i != row.size() -1) {
ret += '\t';
}
}
return ret;
}
void addLine(string str) {
fout << str << endl;
}
};
int main(int argc, const char * argv[]) {
if(argc!=4) {
cout << "Please follow this format. dt.exe [train.txt] [test.txt] [result.txt]";
return 0;
}
string trainFileName = argv[1];
InputReader trainInputReader(trainFileName);
DecisionTree decisionTree(trainInputReader.getTable());
string testFileName = argv[2];
InputReader testInputReader(testFileName);
Table test = testInputReader.getTable();
string resultFileName = argv[3];
OutputPrinter outputPrinter(resultFileName);
outputPrinter.addLine(outputPrinter.joinByTab(test.attrName));
#pragma omp parallel for num_threads(threads)
for(int i=0;i < test.data.size(); i++) {
vector<string> result = test.data[i];
result.push_back(decisionTree.guess(test.data[i]));
outputPrinter.addLine(outputPrinter.joinByTab(result));
}
/* for answer check */
/*
InputReader answerInputReader("dt_answer1.txt");
Table answer = answerInputReader.getTable();
int totalCount = (int)answer.data.size();
int hitCount = 0;
for(int i=0;i < test.data.size(); i++) {
if(answer.data[i].back() == decisionTree.guess(test.data[i])) {
hitCount++;
}
}
cout << "Accuracy: " << (double)hitCount/totalCount*100 << "%";
cout << "(" << hitCount << "/" << totalCount << ")" << endl;
*/
return 0;
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#pragma once
#include "byte_array_comparable.h"
#include "cell.h"
#include "filter_base.h"
class CompareFilter : public FilterBase {
public:
enum class CompareOp{
/** less than */
LESS,
/** less than or equal to */
LESS_OR_EQUAL,
/** equals */
EQUAL,
/** not equal */
NOT_EQUAL,
/** greater than or equal to */
GREATER_OR_EQUAL,
/** greater than */
GREATER,
/** no operation */
NO_OP
};
CompareFilter();
CompareFilter(const CompareOp &compare_op, const ByteArrayComparable *comparator);
const CompareFilter::CompareOp & GetOperator();
const ByteArrayComparable *GetComparator();
virtual ~CompareFilter();
bool FilterRowKey(const Cell &first_row_cell);
// TODO
// public static ArrayList<Object> extractArguments(ArrayList<byte []> filterArguments)
// FilterProtos.CompareFilter convert()
// boolean areSerializedFieldsEqual(Filter o)
// public String toString()
std::unique_ptr<google::protobuf::Message> Convert();
const std::string &GetCompareOpName();
const hbase::pb::CompareType &GetPbCompareOp();
virtual const char *GetName() = 0;
virtual bool ToByteArray(hbase::pb::Filter &filter) = 0;
protected:
bool CompareRow(const CompareOp &compare_op, const ByteArrayComparable *comparator, const Cell &cell);
bool compareFamily(const CompareOp &compare_op, const ByteArrayComparable *comparator, const Cell &cell);
bool compareQualifier(const CompareOp &compare_op, const ByteArrayComparable *comparator, const Cell &cell);
bool compareValue(const CompareOp &compare_op, const ByteArrayComparable *comparator, const Cell &cell);
CompareOp compare_op_;
ByteArrayComparable *comparator_;
private:
bool Compare(const CompareOp &compare_op, const int &compare_result);
void SetCompareOpName(const CompareOp &compare_op);
void SetPbCompareOp(const CompareOp &compare_op);
std::string compare_op_name_;
hbase::pb::CompareType pb_compare_op_;
};
|
#ifndef AL_OPENCL_IMAGE3D_H
#define AL_OPENCL_IMAGE3D_H 1
#include "al_OpenCLInternal.hpp"
#include "al_OpenCLContext.hpp"
#include "al_OpenCLMemoryBuffer.hpp"
#include <vector>
#include <string>
using std::vector;
using std::list;
using std::string;
namespace al {
namespace cl {
class OpenCLImage3D : public OpenCLMemoryBuffer {
public:
OpenCLImage3D(cl_mem mem=0)
: OpenCLMemoryBuffer(mem)
{}
virtual ~OpenCLImage3D() {}
void create(
OpenCLContext &ctx,
cl_mem_flags usage,
const cl_image_format *format,
size_t width,
size_t height,
size_t depth,
size_t rowstride,
size_t planestride,
void *ptr
);
virtual void create(
OpenCLContext &ctx,
cl_mem_flags usage,
AlloArray *array
);
OpenCLEvent enqueue_read(
OpenCLCommandQueue &queue,
bool block,
size_t offset,
size_t size,
void *ptr
) { printf("NOT IMPLEMENTED for IMAGE2D\n"); return OpenCLEvent(0); }
virtual OpenCLEvent enqueue_read(
OpenCLCommandQueue &queue,
bool block,
size_t offset,
AlloArray *array
);
OpenCLEvent enqueue_read(
OpenCLCommandQueue &queue,
bool block,
const size_t origin[3],
const size_t region[3],
size_t rowstride,
size_t planestride,
void *ptr
);
protected:
};
} // cl::
} // al::
#endif // AL_OPENCL_IMAGE3D_H
|
Stop doing -and calling it- Link Building: It’s Reference Building now!
As we are all seeing Google is quickly changing the way of evaluating links, taking social related signals and authority factors more and more into consideration -with special focus in its own social network– and less to those links that could be the result of manipulative tactics, such as link building schemes, by penalizing link networks.
Some days ago Rand shared an interesting post written by Ed Fryand, “Building Awesome Relationships For Links, Likes, and Love” and commented in Twitter that link building is “relationship building”:
As you can see my response to his tweet was that for me link building is “reference building” and here is why:
What are References?
According to the Wikipedia:
Reference is a relation between objects in which one object designates, or acts as a means by which to connect to or link to, another object. The first object in this relation is said to refer to the second object. The second object – the one to which the first object refers – is called the referent of the first object.
In summary, a “reference” happens when an object connects or refers another. In SEO a reference can encompass links, mentions or votes, those signals that a user creates to “refer” a site (or a related brand) and that can increase a site’s authority in the eyes of search engines.
In a time when links were the only type of signal that search engines took into consideration to identify trust and authority it made sense to talk about “link building”.
Nonetheless this time has ended already and we work now in an ecosystem where users can refer a site or a brand with mentions (in Twitter or Google Plus, for example) or votes (with a +1 in Google or Like in Facebook) not just links, and these are also taken into consideration more and more by search engines.
On the other hand, it’s actually far easier to explain to “non-SEO” people how Off-Page SEO works when you talk about “reference building” instead of “link building”, since a “reference” is a well known concept that is more easily related to votes or endorsements than “links”.
But why not directly “Relationship Building”?
A relationship is “the state of being connected or related” and it is a concept used to call an ongoing, recurrent association.
In this sense creating “relationships” should be seen as a scalable strategy to get long-term, consistent “references” (which is actually something really important since scalability and consistency over time has been one of the most challenging aspects of link building… and reference building now).
Reference Building: Links, Mentions, Votes
At the same time,
• Not all references are the result of a relationship: Like a first-time, spontaneous +1 at a page you had no idea it existed before, but its result in Google has answered a specific information need so you refer it, but you might never do it again,
• A relationship starts with an initial reference: You need to have an initial *positive* interaction with the site or brand that will make you refer it in the first place… and then *possibly* start a recurrent connection with it.
Start Building References and Creating Relationships
The importance of updating the concept of “link building” to “reference building” is that it transmits the necessity we have to develop interdisciplinary activities -with content marketing, social media, etc.- that were not necessarily taken into consideration before for the typical link building process and for which is sometimes difficult to find support or resources.
Are you already creating interesting content (news, Q+A, glossaries, reviews), useful resources (applications, infographics), attractive promotions (contests, giveaways, events), using social networks to distribute them and interacting with your audience, amplifying your online presence, establishing relationships in order to build recurrent references that will lead to more visits and conversions?
I hope the answer is yes, otherwise you are already behind and you should start with “reference building” now!
No comments yet.
Leave a Reply
|
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2015 Cloudius Systems, Ltd.
*/
#ifndef LOG_HH_
#define LOG_HH_
#include "core/sstring.hh"
#include <unordered_map>
#include <exception>
#include <iosfwd>
#include <atomic>
#include <mutex>
#include <boost/lexical_cast.hpp>
/// \addtogroup logging
/// @{
namespace seastar {
/// \brief log level used with \see {logger}
/// used with the logger.do_log method.
/// Levels are in increasing order. That is if you want to see debug(3) logs you
/// will also see error(0), warn(1), info(2).
///
enum class log_level {
error,
warn,
info,
debug,
trace,
};
std::ostream& operator<<(std::ostream& out, log_level level);
std::istream& operator>>(std::istream& in, log_level& level);
}
// Boost doesn't auto-deduce the existence of the streaming operators for some reason
namespace boost {
template<>
seastar::log_level lexical_cast(const std::string& source);
}
namespace seastar {
class logger;
class logger_registry;
/// \brief Logger class for stdout or syslog.
///
/// Java style api for logging.
/// \code {.cpp}
/// static seastar::logger logger("lsa-api");
/// logger.info("Triggering compaction");
/// \endcode
/// The output format is: (depending on level)
/// DEBUG %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
class logger {
sstring _name;
std::atomic<log_level> _level = { log_level::info };
static std::atomic<bool> _stdout;
static std::atomic<bool> _syslog;
private:
struct stringer {
// no need for virtual dtor, since not dynamically destroyed
virtual void append(std::ostream& os) = 0;
};
template <typename Arg>
struct stringer_for final : stringer {
explicit stringer_for(const Arg& arg) : arg(arg) {}
const Arg& arg;
virtual void append(std::ostream& os) override {
os << arg;
}
};
template <typename... Args>
void do_log(log_level level, const char* fmt, Args&&... args);
void really_do_log(log_level level, const char* fmt, stringer** stringers, size_t n);
void failed_to_log(std::exception_ptr ex);
public:
explicit logger(sstring name);
logger(logger&& x);
~logger();
bool is_shard_zero();
/// Test if desired log level is enabled
///
/// \param level - enum level value (info|error...)
/// \return true if the log level has been enabled.
bool is_enabled(log_level level) const {
return level <= _level.load(std::memory_order_relaxed);
}
/// logs to desired level if enabled, otherwise we ignore the log line
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void log(log_level level, const char* fmt, Args&&... args) {
if (is_enabled(level)) {
try {
do_log(level, fmt, std::forward<Args>(args)...);
} catch (...) {
failed_to_log(std::current_exception());
}
}
}
/// Log with error tag:
/// ERROR %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void error(const char* fmt, Args&&... args) {
log(log_level::error, fmt, std::forward<Args>(args)...);
}
/// Log with warning tag:
/// WARN %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void warn(const char* fmt, Args&&... args) {
log(log_level::warn, fmt, std::forward<Args>(args)...);
}
/// Log with info tag:
/// INFO %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void info(const char* fmt, Args&&... args) {
log(log_level::info, fmt, std::forward<Args>(args)...);
}
/// Log with info tag on shard zero only:
/// INFO %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void info0(const char* fmt, Args&&... args) {
if (is_shard_zero()) {
log(log_level::info, fmt, std::forward<Args>(args)...);
}
}
/// Log with info tag:
/// DEBUG %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void debug(const char* fmt, Args&&... args) {
log(log_level::debug, fmt, std::forward<Args>(args)...);
}
/// Log with trace tag:
/// TRACE %Y-%m-%d %T,%03d [shard 0] - "your msg" \n
///
/// \param fmt - printf style format
/// \param args - args to print string
///
template <typename... Args>
void trace(const char* fmt, Args&&... args) {
log(log_level::trace, fmt, std::forward<Args>(args)...);
}
/// \return name of the logger. Usually one logger per module
///
const sstring& name() const {
return _name;
}
/// \return current log level for this logger
///
log_level level() const {
return _level.load(std::memory_order_relaxed);
}
/// \param level - set the log level
///
void set_level(log_level level) {
_level.store(level, std::memory_order_relaxed);
}
/// Also output to stdout. default is true
static void set_stdout_enabled(bool enabled);
/// Also output to syslog. default is false
///
/// NOTE: syslog() can block, which will stall the reactor thread.
/// this should be rare (will have to fill the pipe buffer
/// before syslogd can clear it) but can happen.
static void set_syslog_enabled(bool enabled);
};
/// \brief used to keep a static registry of loggers
/// since the typical use case is to do:
/// \code {.cpp}
/// static seastar::logger("my_module");
/// \endcode
/// this class is used to wrap around the static map
/// that holds pointers to all logs
///
class logger_registry {
mutable std::mutex _mutex;
std::unordered_map<sstring, logger*> _loggers;
public:
/// loops through all registered loggers and sets the log level
/// Note: this method locks
///
/// \param level - desired level: error,info,...
void set_all_loggers_level(log_level level);
/// Given a name for a logger returns the log_level enum
/// Note: this method locks
///
/// \return log_level for the given logger name
log_level get_logger_level(sstring name) const;
/// Sets the log level for a given logger
/// Note: this method locks
///
/// \param name - name of logger
/// \param level - desired level of logging
void set_logger_level(sstring name, log_level level);
/// Returns a list of registered loggers
/// Note: this method locks
///
/// \return all registered loggers
std::vector<sstring> get_all_logger_names();
/// Registers a logger with the static map
/// Note: this method locks
///
void register_logger(logger* l);
/// Unregisters a logger with the static map
/// Note: this method locks
///
void unregister_logger(logger* l);
/// Swaps the logger given the from->name() in the static map
/// Note: this method locks
///
void moved(logger* from, logger* to);
};
logger_registry& global_logger_registry();
enum class logger_timestamp_style {
none,
boot,
real,
};
struct logging_settings final {
std::unordered_map<sstring, log_level> logger_levels;
log_level default_level;
bool stdout_enabled;
bool syslog_enabled;
logger_timestamp_style stdout_timestamp_style = logger_timestamp_style::real;
};
/// Shortcut for configuring the logging system all at once.
///
void apply_logging_settings(const logging_settings&);
/// \cond internal
extern thread_local uint64_t logging_failures;
sstring pretty_type_name(const std::type_info&);
sstring level_name(log_level level);
template <typename T>
class logger_for : public logger {
public:
logger_for() : logger(pretty_type_name(typeid(T))) {}
};
template <typename... Args>
void
logger::do_log(log_level level, const char* fmt, Args&&... args) {
[&](auto&&... stringers) {
stringer* s[sizeof...(stringers)] = {&stringers...};
this->really_do_log(level, fmt, s, sizeof...(stringers));
} (stringer_for<Args>(std::forward<Args>(args))...);
}
/// \endcond
} // end seastar namespace
// Pretty-printer for exceptions to be logged, e.g., std::current_exception().
namespace std {
std::ostream& operator<<(std::ostream&, const std::exception_ptr&);
std::ostream& operator<<(std::ostream&, const std::exception&);
std::ostream& operator<<(std::ostream&, const std::system_error&);
}
#endif /* LOG_HH_ */
/// @}
|
Theresa Marie Model Ship Project
Theresa Marie Model Ship Project
Week 58: Anchors and Masts (Sept 30 - Oct 6, 2013)
Anchors. The kit comes with plastic stocks and arms for two large and two smaller anchors. In the supplied directions from SC&H it actually says this about the smaller set: "We don't normally put ours on for sailing as they're just another hook to get caught."
With that in mind I decided to just make one set of anchors by shortening the larger anchor stocks, shortening the smaller anchors arms, and then gluing them together in parallel instead of perpendicular. This way they can fit flat against the bow with the stocks sitting on top of the catheads and not get in the way of my bowchasers, swivel guns or gun ports. Maybe not prototypical, but I think it looks better than awkwardly draping them over the bulwarks.
Next I scored up the plastic stocks to look more like wood grain and painted them brown.
I went for a shorter and less-in-the-way style of anchor.
Masts. I took out all the supplied masts and spars and started figuring out what goes where. It looks like my shipyard and stand are just the right size to fit the topmasts on without hitting the ceiling. The stand will have to go on the floor for the topgallants.
I've also reached the point in my build where I decided to take out the unused kit parts from the shipping box and push the shipyard back against the wall. Made the Namesake happy.
The masts are designed to bend back on a hinge for easier transport, so I cut the six wooden pegs that are used to lock them in place along with two metal pegs and did some sanding until everything fit together. I plan to install the masts that way, but hopefully she'll fit in our Escape with the main masts in place.
Lots of things need attaching to the masts and spares like jibboom saddles, jibsheet blocks and numerous brass pins that I'll need to figure out before proceeding. But just seeing the main masts and bowsprit in place sure makes a difference in her look.
Boomer was suspiciously interested in my masts and spars.
Figuring out what is what. More maritime terms to learn.
What a difference the masts make.
I can almost see her sailing into a sunset.
I'll build her to fold, but hope I won't have to.
Week 60
|
Enterprise Architecture
Disciplined Agile Enterprise Architecture
The Agile Enterprise Architecture process blade overviews how a disciplined agile EA team will work. An agile enterprise architecture is flexible, easily extended, and easily evolved collection of structures and processes upon which your organization is built. The act of agile enterprise architecture is the collaborative and evolutionary exploration and potential modelling of an organization’s architectural ecosystem in a context-sensitive manner. The implications are that enterprise architects must be willing to work in a collaborative and flexible manner AND IT delivery teams must be willing to work closely with enterprise architects.
This article is organized into the following topics:
Why Enterprise Architecture?
Enterprise architecture, when performed in a disciplined agile manner, is an important enabler of agile software delivery. This is true for several reasons:
1. Common architecture enables agile teams to focus on value creation. A common enterprise architecture enables reuse across delivery teams. When agile teams have high-quality assets – such as micro-services, legacy data sources, and frameworks – available to reuse they are able to focus on creating new value for their stakeholders and not on reinventing new versions of existing infrastructure.
2. Common technical guidance enables greater consistency. When team follow effective, common conventions it results in greater quality. This makes it easier to learn about assets that are new to them, in particular existing source code, and to evolve those assets as needed. Greater consistency also makes it easier for people to move between teams because it will be easier for them to come up to speed on what the new team is doing and to share their skills with those team members.
3. Agile architectures enable disaggregation. When your solutions are built from loosely coupled, highly cohesive components it is easier to spread development work across smaller teams. This reduces overall risk and organizational complexity, which in turn reduces time-to-delivery.
4. Common infrastructure enables continuous delivery. When there is a common technical infrastructure to IT delivery teams to deploy into it is easier to deploy. The easier it is to deploy, the more often it makes sense to deploy.
5. Enterprise architecture scales agile. A disciplined agile approach to enterprise architecture enables organizations to scale agile strategies “horizontally” across their entire IT department.
The EA Process
Some methods will choose to prescribe a single approach, such as capturing architectural requirements in the form of epics or pre-building “architectural runways,” but the Disciplined Agile (DA) framework promotes an adaptive, context-sensitive strategy. DA does this via its goal-driven approach that indicates the process decision points you need to consider, a range of techniques or strategies for you to address each decision point, and the advantages and disadvantages of each technique. In this section we present the goal diagram for the Enterprise Architecture process blade and overview its process process decision points.
The following diagram overviews the potential activities associated with Disciplined Agile Enterprise Architecture.
Disciplined Agile Enterprise ArchitectureThe process decision points that you need to consider for enterprise architecture are:
1. Support stakeholders. Enterprise architects will work with business and IT stakeholders on a regular basis to understand their needs and to help them develop a vision for the organization.
2. Support delivery teams. Enterprise architects will work with IT delivery teams, and ideally be active members of IT delivery teams, on a regular basis. They may guide the teams in the business and technical roadmaps, help them to identify potentially reusable assets, to identify technical debt, and transfer their skills and knowledge to team members.
3. Negotiate technical dependencies. Like it or not, there are dependencies between the solutions that we create. For example, if your system invokes a web service, or calls an API, provided by another system then you have a dependency on that system. Enterprise architects will often find that they need to negotiate these dependencies with other teams, either at a high-level in their role of Enterprise Architect or sometimes at a detailed level in their role of Architecture Owner on a delivery team.
4. Explore architectural views. Organizations are complex and as a result they must be understood from a variety of view points. It’s not just a matter of writing “architectural epics” on a collection of index cards.
5. Tailor architectural framework. The enterprise architecture team may choose to adopt, and likely tailor, an existing enterprise architecture framework. These frameworks typically suggest a multi-view collection of artifacts to create and techniques for doing so.
6. Evolve enterprise architecture. Enterprise architects will collaborate with one another, and with their stakeholders, in a variety of ways. They may choose to hold architecture envisioning/modeling sessions or regular meetings where they share learnings with one another. They will often work together, or with IT delivery teams, to investigate new technologies or identify candidate architecture strategies.
7. Evolve roadmap(s). An important output of your enterprise architecture effort will be one or more roadmaps describing your technology strategies and/or your architectural strategies. In agile organizations this roadmapping occurs in a rolling wave approach where the roadmap(s) are updated regularly.
8. Capture enterprise architecture. There are two broad categories for how enterprise architects can capture their work: as documents or as working/executable examples. High-level models work well for documentation, although sometimes you may find the need for detailed documentation as well. Executable artifacts, such as executable reference architectures or architectural runways, are usually preferred over documentation by delivery teams.
9. Govern architecture. Architectural activities within your organization should be governed in a lightweight, collaborative manner. This is an important activity for enterprise architects as well as for your IT governance team.
Workflow With Other IT Teams
The following diagram overviews the major workflows that your disciplined agile enterprise architecture activities are associated with. Note that feedback is implied in the diagram. For example, where you see the Technology Roadmap and Guidance flow from Enterprise Architecture to Reuse Engineering there is an implied feedback loop from the reuse engineers to the enterprise architects. Also note that the workflows do not necessarily imply that artifacts exist. For example, some of the guidance provided by enterprise architects may discussions with their stakeholders.
Disciplined Agile Enterprise Architecture Workflow
The following table summarizes the workflows depicted in the diagram.
Process Blade Process Blade Overview Workflow with EA
IT Delivery Addresses how to develop solutions in a disciplined agile manner. This includes the four lifecycles – basis/agile, advanced/lean, continuous delivery, and exploratory – supported but the DAD framework plus the program management blade (effectively a large team following one or more of the lifecycles). Enterprise architecture will provide guidance to the IT delivery teams in the form of coaching and mentoring the teams in architectural issues, providing the technology roadmap, and providing development guidelines (such as coding conventions, security conventions, database guidelines, and so on).
Continuous Improvement Addresses how to support process and organizational structure improvement across teams in a lightweight, collaborative manner; how to support improvement experiments within teams; and how to govern process improvement with your IT department. The continuous improvement activities will provide potential improvement suggestions for improving enterprise architecture efforts. Similarly, the EA team may have insights to share with the rest of the organization.
Data Management Addresses how to improve data quality, evolve data assets such as master data and test data, and govern data activities within your organization. Enterprise architecture will provide guidance to the data management activities. Operational intelligence pertaining to production data sources and data activities will be made available to the EA team to support their long-term planning efforts.
Operations Addresses how to run systems, evolve the IT infrastructure, manage change within the operational ecosystem, mitigate disasters, and govern IT operations. Enterprise architecture will provide a technology roadmap and guidance to the operations efforts so that their efforts to evolve the IT infrastructure reflect the overall organizational strategy. Operational intelligence will be used by the EA team to provide insights into the effective of various architectural strategies.
Portfolio Management Addresses how to identify potential business value that could be supported by IT endeavors, explore those potential endeavors to understand them in greater detail, prioritize those potential endeavours, initiate the endeavours, manage vendors, and govern the IT portfolio. Enterprise architecture provides the technology roadmap to portfolio management. The roadmap is used as input into identifying potential business value that could be supported by IT and into prioritization decisions.
Program Management Addresses strategies for managing large product/project teams, allocating requirements between sub teams, managing dependencies between sub teams, coordinating the sub teams, and governing a program. Enterprise architecture will provide guidance to large IT delivery teams (programs) in the form of coaching and mentoring the teams in architectural issues, providing the technology roadmap, and providing development guidelines (such as coding conventions, security conventions, database guidelines, and so on).
Release Management Addresses strategies for planning the IT release schedule, coordinating releases of solutions, managing the release infrastructure, supporting delivery teams, and governing the release management efforts. Enterprise architecture will provide the technology roadmap and guidance to the release management efforts so that their planning efforts reflect the direction of the overall organization. Operational intelligence will be used by the EA team to provide insights into the impact of current architectural strategies on the overall release effort.
Reuse Engineering Addresses how to identify and obtain reusable assets, publish the assets so that they are available to be reused, support delivery teams in reusing the assets, evolving those assets over time, and governing the reuse efforts. Enterprise architecture will provide the technology roadmap and guidance to the reuse management efforts so that they can better identify potentially reusable assets. Reuse intelligence will be used by the EA team to provide insights into where to focus technical debt pay down efforts.
Support Addresses how to adopt an IT support strategy, to escalate incidents, to effectively address the incidents, and govern the IT support effort. Enterprise architecture will provide the technology roadmap and guidance to the support team so that they can better understand the overall IT ecosystem and the direction it is going in. Operational intelligence will be used by the EA team to provide insights into the supportability of the various solutions in production.
Workflow Within the Team
The workflow within a disciplined agile enterprise architecture team is depicted in the following diagram.
Agile Enterprise Architecture Process
There are four major activities:
1. Envision initial architecture. The enterprise architects will spend several days developing initial, high-level models of the enterprise architecture. This will be a face-to-face, initial architecture envisioning session where the scope is the entire organization, not just a single IT solution. Ideally this is done in an agile modelling room so as to streamline the communication and collaborative modelling efforts. Such a room is large with lots of whiteboard space, enabling the team to work on several models in parallel (each of which has its own section of wall space). The primary purpose of this session is for the EA team to develop a common understanding, at least a high level, of the current state of the enterprise architecture and a vision for how the team would like to see it evolve. Secondary outcomes include creating some initial artifacts which the enterprise architects will evolve over time, (potentially) meeting one another for the first time, and building bonds between the team members. Potential challenges to this activity include getting an agile modeling room (you may have to convert an existing room, or accept lower productivity if you can’t get access to such a room) and the logistics of getting the right people together at the same time.
2. Collaborate with business stakeholders. On a regular basis enterprise architects work with business stakeholders to understand their needs, work with them to envision the future, and help educate them on the possibilities and constraints of technology. This collaboration may be in the form of working sessions, presentations, or one-on-one conversations. These sessions occur as needed and at times it can be difficult to gain access to stakeholders as they are often very busy people.
3. Collaborate with IT stakeholders. Disciplined agile EAs will spend the majority of their time, 80 to 90% of it typically, working as members of IT delivery teams. By doing this they bring their knowledge, vision, and skills to the team in a pragmatic, hands-on manner. On Disciplined Agile Delivery (DAD) teams they will often take on the role of architecture owner (AO). Enterprise architects will also work with other IT stakeholders, including operations engineers, support staff, the data management team and so on so as to understand their needs.
4. Evolve architecture assets. The enterprise architecture team, or at least the portion of the team who is currently available, will meet on a regular basis to evolve the enterprise architecture assets based on their learnings. A common pattern we’ve seen it for the team to meet every Friday afternoon for two hours where they discuss what they’ve learned that week from working on delivery teams and working with their various stakeholders. As the result of the meeting several of the enterprise architects may take on action items to update existing EA artifacts. These artifacts may include EA models, reference architectures, development guidelines, white papers, and so on. When a new major topic arises, such as the potential adoption of a new platform or a merger with another organization, the EA may choose to schedule agile modelling sessions to explore the topic.
Related Readings
Leave a Reply
|
Germany's fate is on the agenda
By Jorgo Chatzimarkakis*
Across Europe, stunned citizens are witnessing the drama that is taking place ahead of the final negotiations for the next tranche of aid for Greece. Germany, which brought the International Monetary Fund on board for the Greek bailout, now refuses to accept its advice, and will not heed the advice of its national economists either.
Germany is becoming increasingly isolated within Europe. The future of the eurozone, the survival of European integration and even Germany's prosperity, are all hostages in the runup to the country's 2013 general election – even though there is already a clear winner.
What is really needed now is a bold design that tackles the eurozone’s real problems. The pattern of the last two years, of small policy steps and muddling through from one summit to another, is making matters worse.
Instead of muddling through, we need a mechanism within the eurozone which ensures that one partner is not burdened with a huge amount of debt, while another is in a position to spend lavishly because of its strong economy. In return, we must have solid parameters for sustainable and balanced budgets. Adequate proposals are already on the table.
Above all, we need to have an honest debate within Germany about what is happening to the profits it is making from the euro crisis. From the first tranche alone, Germany has earned more than 400 million euros. By way of historically low interest rates on German government bonds, Germany has, according to Kiel Institute for the World Economy economist Jens Boysen-Hogrefe, saved 68 billion in the last three-and-a-half years.
The real victim of continuous debates is Greece and its Prime Minister Antonis Samaras, who has pulled the coals out of the fire for the Europeans. For months, the Europeans have been telling the Greeks to cut budgets and implement reforms in order to get the next tranche. And the Greeks who have made a colossal effort to do just that are now left in the rain waiting.
The Greek state is virtually bankrupt and political pressure on the domestic front is close to boiling over. Long-gone diseases like malaria are returning because people lack the money to go to the doctor. Some are literally starving. Many European politicians here in Brussels are becoming increasingly ashamed that a community which is supposed to stand in solidarity would allow such a thing.
Several Greeks see Germany as the root of their misery. They cannot understand why country that has benefited so much from the euro is refusing to take the necessary steps. This is especially so considering the fact that during the 1953 conference in London, Germany was granted a huge haircut by European and international borrowers. This was in addition to the Marshall Plan, an economic stimulus plan that provided the money with which the German economic miracle was possible in the first place.
Rather than learn the lessons of history, we have forgotten German reflection that was once based on the two principles of German foreign policy after 1945: Never again, never be alone. The ahistorical policy of the current government is what is leading to angry reactions all over Europe.
In Greece, a new debate on war reparations is ongoing. What if the Greek government asks not for reparations, but for a repayment of the imposed loan from 1942? At that time, Berlin forced Athens to make an interest-free forced loan of 476 million marks, which today corresponds to 10 billion euros; with compound interest, the sum would be six times that.
How long can Germany stand the pressure? As the net beneficiary of the euro crisis, it will not be easy to stay its hard line. Germany now has a very narrow window of opportunity to break out of its growing isolation in Europe but also in the bodies of the IMF. The next Eurogroup meeting, for Germany, as for Greece, has a historical dimension. I hope for Germany and Europe that the lessons of history sink in.
* Jorgo Chatzimarkakis is a member of Germany's Free Democratic party and a member of the European Parliament. He holds German-Greek citizenship.
|
• Email
• Help
Orphan designation
On 17 July 2012, orphan designation (EU/3/12/1027) was granted by the European Commission to Sanquin Blood Supply Foundation, the Netherlands, for human apotransferrin for the treatment of congenital hypotransferrinaemia.
The sponsorship was transferred to Sanquin Plasma Products B.V., The Netherlands, in April 2016.
What is congenital hypotransferrinaemia?
Congenital hypotransferrinaemia is a genetic disease characterised by abnormally low levels of the protein transferrin in blood. Transferrin attaches to iron in the blood and delivers it to where it is needed, such as the bone marrow, where it is used for the production of haemoglobin (the protein found in red blood cells that carries oxygen around the body). Severely reduced levels of transferrin cause anaemia (low red blood cell counts), which may lead to heart problems. It also leads to free iron accumulating in tissues and organs, where it can cause damage and increase the likelihood of infections.
Congenital hypotransferrinaemia is a life-threatening condition due to severe anaemia and accumulation of iron in tissues which can cause heart problems and infections.
What is the estimated number of patients affected by the condition?
At the time of designation, congenital hypotransferrinaemia affected approximately 0.00012 in 10,000 people in the European Union (EU)*. This is equivalent to a total of around 6 people, and is below the ceiling for orphan designation, which is 5 people in 10,000. This is based on the information provided by the sponsor and the knowledge of the Committee for Orphan Medicinal Products (COMP).
What treatments are available?
At the time of orphan designation, no satisfactory treatments were authorised in the EU for this condition. Treatments included blood transfusions to manage anaemia and iron chelation therapy to reduce the accumulation of free iron in tissues and organs.
How is this medicine expected to work?
This medicine is made of the transferrin protein extracted from human plasma (the liquid component of the blood) and depleted of the iron attached to it. This medicine is expected to work by replacing the missing protein, which can attach to the iron in the blood. This is expected to improve the symptoms of the disease.
What is the stage of development of this medicine?
The effects of human apotransferrin have been evaluated in experimental models.
At the time of submission of the application for orphan designation, clinical trials with human apotransferrin in patients with congenital hypotransferrinaemia were ongoing.
At the time of submission, human apotransferrin was not authorised anywhere in the EU for congenital hypotransferrinaemia or designated as an orphan medicinal product elsewhere for this condition.
• the seriousness of the condition;
Key facts
Product details for <p>Human apotransferrin</p>
Active substanceHuman apotransferrin
Medicine Name
Disease/conditionTreatment of congenital hypotransferrinaemia
Date of decision17/07/2012
Orphan decision numberEU/3/12/1027
Review of designation
Sponsor’s contact details:
Sanquin Plasma Products B.V.
Plesmanlaan 125
1066 CX Amsterdam
The Netherlands
Tel. +31 20 512 30 00
Patients' organisations:
|
Life insurance and critical illness cover
If you or someone else has a terminal sickness youve probably found that it may be extremely difficult to obtain a travel insurance policy that will meet a claim related to your insureds illness.
This can be aggravating especially if the person that is been given the fatal prognosis would simular to to travel while they still need the possibility. Many people have got friends or relations abroad whom selecting to just see again along with who would like to see them.
A lot of people feel that its therapeutic for someone with a airport terminal illness to get away from the usual environment exactly where they are constantly prompted of their illness. By travelling and in a totally different position it enables many peoples focus to help shift at least regarding periods of time away from the healthmedical situation and on the people or place theyre visiting.
Thus hoping to plan an outing for someone with a airport terminal illness and then finding that its difficult to obtain take a trip insurance can be incredibly annoying. Life insurance and critical illness cover After all few people would likely wish to travel without having insurance especially when someone incorporates a serious and life-threatening ailment. Medical bills can easily mount up- consultations treatment treatment hospital keeps. Understandably people want to have some kind of assurance that when the worst found the worst the insurance company would be there to pick up the bill.
Wait how is a terminal disease defined In our expertise there is no one apparent definition. Generally an individual is regarded as having a terminal prognosis if there is no further treatment obtainable that will cure or maybe control their problem and that it wouldnt become unexpected if they have been to die in six months. Clearly that definition may vary in one person to another so we would advise dealing with the matter in detail together with the medical professionals involved.
So why is it so difficult to have travel insurance for someone having a terminal illness
Very well from an insurers perspective they price their particular travel insurance policy in line with the level of risk these are covering and their experience with claims.
If the amount of money in the pot from costs collected isnt enough to meet all the statements an insurer is likely to end selling the insurance insurance plan which is bad news for every individual.
The more serious or complex a medical problem someone has the more chance there is of the claim being manufactured and that the claim will be high. Almost all insurers dont want to danger insuring someone using a terminal illness. You can find the risk that someone will need to go into hospital along with the danger is that they will be regarded as too ill to travel time for the UK. This means their own stay in hospital could possibly be extended. Not only would their medical costs have to be met because of the insurer but also holiday accommodation and any extra traveling costs incurred by a new travelling companion.
If the person is well enough to go back to the UK it may be crucial that they be accompanied by a qualified nurse or even doctor on a routine airline. Or it usually is that an air ambulance is needed. The cost of providing a reverse phone lookup especially the latter would run into many thousands associated with pounds.
The insurance firm has a number of possibilities-
Raise all the rates for everyone so that the cost in the pot is always sufficiently large to meet perhaps very large claims coming from customers with fatal illness Keep the basic level of premiums at the reasonably low level however charge higher rates on a scale commensurate with the seriousness of the problems being covered including offering cover people with a critical illness As the place immediately above although not offer cover for fatal illnesses A mixture of this the above
Insurers usually wont want to bring up their premiums too much for everyone because they would certainly then become uncompetitive and customers could think that they were being charged excessive.
The reality is that most insurance providers simply prefer not to ever offer insurance to people with a terminal sickness because they consider that will to do so would be as well risky.
However you possibly can find specialist insurance firms who will provide a saying for travel insurance which will cover claims related to the terminally ill personal medical conditions. Because of the risks involved in supplying this kind of insurance as we previously explained you must anticipate the premium to be much higher. It could take a very few large promises for the insurance company to make the decision that the risk ended up being too great and stop quoting intended for such situations.
Youll be able that even for a specialist insurer someones predicament may be regarded as way too great risk and protect not to be offered.
Hopefully you like this article has provided a conclusion of how insurance businesses come to their selection about which scenarios they can cover and also the issues involved. Life insurance and critical illness cover The demand for exotic pets has increased over the decade in comparison to the common pets market. More and more people prefer an exotic pet mostly as a statement of their personality traits or lifestyle. Consequently insurance corporations have produced specific policies personalized with the needs of unique pet owners.
Exotic pets are frequently a lot more hard to look after and so the cost from the veterinary costs might not be something you can actually cover not having developing an insurance policy. Should you have made the decision to the type of unique pet you prefer in your own household getting covered for the ailments or accidents that might occur to this is a should.
The normal policy of most insurance corporations to choose from in the market place can have a optimum amount of money compensated for each sickness your pet could are afflicted by according to the premium you happen to be ready to pay.
Leave a Reply
|
#pragma once
#include "base/LifeCycleComponent.hpp"
#include "entity-system/factories/entities/entity-instance-builder-factory/EntityInstanceBuilderFactory.hpp"
#include "entity-system/model/entities/entity-instances/EntityInstance.hpp"
namespace inexor::entity_system::type_system {
using EntityInstancePtr = std::shared_ptr<EntityInstance>;
using EntityInstancePtrOpt = std::optional<EntityInstancePtr>;
using EntityInstanceBuilderFactoryPtr = std::shared_ptr<EntityInstanceBuilderFactory>;
/// @class RandomNextIntFactory
/// @brief Factory for creating entity instances of type RANDOM_NEXT_INT.
class RandomNextIntFactory : public LifeCycleComponent
{
public:
/// @brief Constructs a factory for creating entity instances of type RANDOM_NEXT_INT.
/// @note The dependencies of this class will be injected automatically.
/// @param entity_instance_builder_factory Factory for creating entity instance builders.
RandomNextIntFactory(EntityInstanceBuilderFactoryPtr entity_instance_builder_factory);
/// Destructor.
~RandomNextIntFactory();
/// Returns the name of the component
std::string get_component_name() override;
/// Creates a counter with the default values: millis = 1000, step = 1, start_value = 0.
EntityInstancePtrOpt create_instance();
/// @brief Creates an counter with the specified
/// @param min ?
/// @param min ?
EntityInstancePtrOpt create_instance(int min, int max);
private:
/// Factory for creating entity instance builders.
EntityInstanceBuilderFactoryPtr entity_instance_builder_factory;
};
} // namespace inexor::entity_system::type_system
|
Re-inventing the Planned City Monday, March 12, 2012
TAU and MIT launch pilot project to re-think 50's era "New Towns"
A bird's-eye view of Kiryat Gat
In response to population growth, many "new towns" or planned cities were built around the world in the 1950s. But according to Dr. Tali Hatuka, head of Tel Aviv University's Laboratory for Contemporary Urban Design (LCUD) at the Department of Geography and the Human Environment, these cities are a poor fit for modern lifestyles — and it's time to innovate.
TAU has launched a pilot project, in collaboration with a team from the Massachusetts Institute of Technology led by Prof. Eran Ben-Joseph, to revitalize this aging model. Last month, a team of five TAU and 11 MIT graduate students visited Kiryat Gat, a mid-sized town in the south of Israel. Home to branches of industrial giants Hewlett-Packard Company and Intel, Kiryat Gat was chosen as a "laboratory" for re-designing outmoded planned civic spaces.
Based on smart technologies, improved transportation, use of the city's natural surroundings, and a reconsideration of the current use of city space, the team's action plan is designed to help Kiryat Gat emerge as a new, technologically-advanced planned city — a prototype that could be applied to similar urban communities.
Planning a future for the mid-sized city
The project, jointly funded by TAU's Vice President for Research and MIT's MISTI Global Seed Funds, will create a new planning model that could reshape the future of Kiryat Gat and similar cities across the world which are often overlooked in academia and practical planning. "Our goal is to put a spotlight on these kinds of towns and suggest innovative ways of dealing with their problems," says TAU student Roni Bar.
MIT's Alice Shay, who visited Israel for the first time for the project, believes that Kiryat Gat, a city that massive urbanization has left behind, is an ideal place for the team to make a change. "The city is at a catalyst point — an exciting moment where good governance and energy will give it the capacity to implement some of these new projects."
To tackle the design and planning challenges of the city, the team of students focused on four themes: the "mobile city," which looked at transport and accessibility; the "mediated city," dealing with technological infrastructure; the "compact city," which reconsidered the use of urban space and population growth; and the "natural city," which integrated environmental features into the urban landscape.
Finding common ground
Ultimately, the team’s goal is to create a more flexible city model that encourages residents and workers to be a more active part of the urban fabric of the city, said Dr. Hatuka. The current arrangement of dedicated industrial, residential, and core zones is out of step with a 21st century lifestyle, in which people work, live, and spend their leisure time in the same environment.
"Much of the past discourse about the design of sustainable communities and 'eco-cities' has been premised on using previously undeveloped land," says Prof. Ben-Joseph. "In contrast, this project focuses on the 'retrofitting' of an existing environment — a more likely approach, given the extent of the world's already-built infrastructure."
The students from TAU and MIT have become a truly cohesive team, and their diversity of background helps challenge cultural preconceptions, Bar says. "They ask many questions that help us to rethink things we took for granted." Shay agrees. "Tali and Eran have created an incredible collaboration, encouraging us all to exchange ideas. Our contexts are different but there is a common urban design language."
The team estimates that they will be able to present the updated model of the city early next year. The next step is further exploring the project's key themes at a March meeting at MIT. And while the project has provided an exceptional educational experience for all involved, ideas are already leaping off the page and into the city's urban fabric. "In the next two months, the Mayor of Kiryat Gat would like to push this model forward and implement the initial steps that we have offered," says an enthusiastic Dr. Hatuka.
|
by Michele Giuliani
Retail Design is a creative and commercial discipline, which combines several areas of expertise in designing and building retail space.
First, Retail Design is a specialized practice in architecture and interior design, but it also incorporates elements of interior decoration, industrial design, graphic design, ergonomics and advertising, semiotics, psychology, sociology.
Retail Design is a highly specialized discipline due to the heavy demands placed on sales space.
Since the main aim of commercial space is to store and sell the product to consumers, spaces must be designed in such a way as to promote a pleasant and trouble-free shopping experience for the consumer.
The space should be carefully adapted to the type of product sold in that specific space; for example, a library requires many large shelving units to accommodate small products that can be organized into categories, while a clothing store requires more space to fully view the product.
Business areas, especially when they are part of a chain of stores, must be designed to attract people into the store space. The showcase has to act as a billboard for the shop, often using large showcases that allow customers to see the inside space and product. In the case of a chain of shops, individual spaces should be unified in their design. So, we can talk about “co-ordinated image”.
Retail Design began to grow in the middle of the 19th century, with stores such as Le Bon Marché and Printemps in Paris, followed by Marshall Fields in Chicago, Selfridges in London and Macy in New York.
This new concept of retail shops, called department stores, has been a very first design example, later called also “chain of stores”.
The first chain of stores was opened in the early 20th century by Frank Winfield Woolworth, which soon became a franchise across the United States.
Other chain stores began to grow in places like the United Kingdom a decade later, with stores like Boots.
After the World War II, a new type of retail building, known as the mall, entered in the history of the trade.
This type of construction has taken two different ways in the comparison between the United States and Europe. In the US, shopping malls started to be built outside the city, in the periphery; while in Europe, the shopping centres where placed in the city centre.
The first shopping mall in the Netherlands was built in 1950.
The next evolution of retail design was the creation of the boutique in 1960. A boutique is “a small shop which sells fashionable clothes, jewellery, or other, usually luxury goods.”
Some of the first examples of these shops are the Biba created by Barbara Hulanicki and Habitat by Terence Conran.
The rise of boutiques continued in the next two decades, with an overall increase in consumption worldwide.
Many retail design shops have been redesigned for the period to keep up with the changing consumer tastes. These changes have led to the creation of more “expensive, one-off design shops” catering for specific designers and retailers to get to “lifestyle boutiques”.
With the advent of Internet and the development of online sales, Retail has experienced another epochal change: its design is now associated with online shoppers, revolutionizing the concept of the Retail Design or rather its interface.
A Retail Designer should create a thematic experience for the consumer, using space-based solutions as well as encouraging the consumer to buy goods and interact with the space.
Furthermore, the success of their projects is not measured by design criticism, but by store records and productivity.
Retail Designers have to have an acute awareness that the store and their designs are the backdrop for the merchandise and they are there to represent and create the best environment ever where reflect the goods to the target group of consumers.
To be continued…
|
#include<bits/stdc++.h>
#include<unistd.h>
using namespace std;
#define FZ(n) memset((n),0,sizeof(n))
#define FMO(n) memset((n),-1,sizeof(n))
#define F first
#define S second
#define PB push_back
#define ALL(x) begin(x),end(x)
#define SZ(x) ((int)(x).size())
#define IOS ios_base::sync_with_stdio(0); cin.tie(0)
template<typename A, typename B>
ostream& operator <<(ostream &s, const pair<A,B> &p) {
return s<<"("<<p.first<<","<<p.second<<")";
}
template<typename T>
ostream& operator <<(ostream &s, const vector<T> &c) {
s<<"[ ";
for (auto it : c) s << it << " ";
s<<"]";
return s;
}
// Let's Fight!
typedef pair<int, int> pii;
const int MAXN = 1000005;
int N, K;
int px[MAXN], py[MAXN];
vector<pii> event[MAXN*4];
vector<int> es[MAXN*4];
int seg[MAXN*10], tag[MAXN*10];
void initSeg()
{
for(int i=0; i<8*K+4; i++)
seg[i] = tag[i] = 0;
}
void clearTag(int s, int lb, int rb)
{
if(rb - lb > 1)
{
seg[2*s] += tag[s];
seg[2*s+1] += tag[s];
tag[2*s] += tag[s];
tag[2*s+1] += tag[s];
}
tag[s] = 0;
}
void addSeg(int s, int lb, int rb, int l, int r, int val)
{
if(rb <= l || r <= lb) return;
if(l <= lb && rb <= r)
{
seg[s] += val;
tag[s] += val;
}
else
{
int mb = (lb + rb) / 2;
clearTag(s, lb, rb);
addSeg(2*s, lb, mb, l, r, val);
addSeg(2*s+1, mb, rb, l, r, val);
seg[s] = min(seg[2*s], seg[2*s+1]);
}
}
int qrySeg()
{
return seg[1];
}
bool test()
{
//cout<<"TEST "<<K<<endl;
initSeg();
for(int i=0; i<=2*K; i++)
{
event[i].clear();
es[i].clear();
}
for(int i=0; i<N; i++)
{
int x1 = px[i], x2 = px[i] + 2*K, y1 = py[i], y2 = py[i] + 2*K;
if(x2<0 || y2<0 || x1>2*K || y1>2*K) continue;
x1 = max(x1, 0);
y1 = max(y1, 0);
x2 = min(x2, 2*K);
y2 = min(y2, 2*K);
//cout<<x1<<"~"<<x2<<" , "<<y1<<"~"<<y2<<endl;
event[x1].PB({y1, y2+1});
es[x1].PB(1);
if(x2+1 <= 2*K)
{
event[x2+1].PB({y1, y2+1});
es[x2+1].PB(-1);
}
}
for(int i=0; i<=2*K; i++)
{
for(int j=0; j<(int)event[i].size(); j++)
{
int lb = event[i][j].F, rb = event[i][j].S, val = es[i][j];
addSeg(1, 0, 2*K+1, lb, rb, val);
}
int res = qrySeg();
if(res == 0) return true;
}
return false;
}
int calc()
{
int lb = 0, rb = MAXN - 1;
while(lb < rb)
{
int mb = (lb + rb) / 2;
K = mb;
if(test())
lb = mb + 1;
else
rb = mb;
}
return lb;
}
int main() {
IOS;
int t = 0;
while(~scanf("%d", &N) && N != -1)
{
t++;
for(int i=0; i<N; i++)
scanf("%d%d", &px[i], &py[i]);
int ans = calc();
cout<<"Case "<<t<<": ";
if(ans == MAXN - 1)
cout<<"never"<<endl;
else
cout<<ans<<endl;
}
return 0;
}
|
Egypt Dawdles and Hesitates on the Road to Nuclear Power
Ramadan may see fewer electric lights if the government has its way.
"Why is is necessary to invest seven million Egyptian pounds for a two-hour electricity shortage? If every family saves 5 percent of its electricity consumption, Egypt will not need an investment like that," said Hassan Younes, the country's electricity and energy minister.
Younes was referring to the projected cost of three nuclear power plants Egypt is planning to build. Despite the minister's doubts regarding the need to invest such huge sums on nuclear power, he announced that by the end of the year, Egypt will issue a tender to build the first such power stations, to start operating in 2019.
Three firms have already presented proposals for electricity-producing nuclear power plants - the French Areva company, the Canadian firm AECL and the Russian Rosatom. Last week, the American firm Westinghouse submitted its proposal for an advanced AP1000 model reactor. Other proposals are forthcoming.
Ramadan preparations in Cairo
Critics of the Egyptian government say the country began to take an interest in building nuclear reactors too late. Studies show Egypt is exploiting about 65 percent of its energy resources for producing electricity. By 2032, Egypt will need an estimated 70 gigawatts of electricity, compared to the 20 gigawatts it is producing today. This need cannot be satisfied by means of Egyptian oil or natural gas, especially as it's believed Egyptian oil sources will begin to dwindle by 2012.
Other studies and forecasts say the shortage is already being felt, and severely so, in Cairo and in outlying areas. To battle the shortage, the Energy Ministry has published a series of instructions for consumers on how to save power. When necessary, the electric company cuts off the juice, mainly at peak hours.
The government has also warned about the use of decorative lanterns and strings of colored bulbs customarily hung for the month of Ramadan, which begins next week. This year it is forbidden to hang large lanterns or strings of lights so as to conserve electricity, and fines will be imposed.
On Middle Eastern time
Despite the considerable attention given the nuclear solution in recent weeks, it is liable to fall victim to the Egyptian pace. Back in 1955, the idea of building a nuclear power plant was posed, but the years went by, and the project was stopped after the Six-Day War. In 1974, Richard Nixon offered to build Egypt a nuclear power plant, but when the Egyptians found out it would be under American supervision, they rejected the project.
A decade latter, in 1984, Egypt again began to discuss the possibility of building eight nuclear reactors. The Chernobyl disaster occured, and the project was frozen. In 2006, Gamal Mubarak, President Hosni Mubarak's son, announced the renewal of the nuclear project and the plan to build three nuclear power stations. As is appropriate, a special council was also set up on the use of the atom for peaceful purposes. Since then, it has emerged that in Egypt there are not enough experts in the field of the atom and reactor safety.
Above all, a huge dispute developed on the location of the reactors. After examinations and studies, the area of Dab'ah on the Mediterranean coast, not far from Alexandria, was proposed. However, agile businessmen, including close associates of Gamal Mubarak, believed it better to use the area for building holiday villages rather than nuclear reactors. A rumor flew around that these business people had already acquired land in the area after showing studies indicating the site was likely to be risky for a nuclear reactor.
Dr. Ibrahim Kamel, who heads the Kato conglomerate that, inter alia, engages in building and managing vacation sites and is a member of the Egyptian Tourism Authority, said that "the Dab'ah area was not suitable for nuclear reactors because the winds there are northeasterly; in case of a reactor leak, the Delta area is liable to suffer heavy losses. The value of the land at Dab'ah is too high to use it for building a nuclear reactor." Kamel, a close friend of Gamal Mubarak, is also a member of the ruling party's secretariat.
Though the Egyptian energy minister has denied that plots of land have been sold in the area designated for reactors, the intention to examine other possible sites for building reactors confirms the suspicions.
The Jewish side
It was only to be expected that the dispute on wearing the veil would ultimately come down to the Jews. Last week, a religious preacher, Dr. Amana Nusir, ruled that wearing the veil is, in fact, a Jewish custom, going back to the Bible and the Rambam's ruling to the effect that "a Jewish woman who goes out to the street without covering her head and face is behaving contrary to Judaism."
Nusir, former dean of humanities at Al-Azhar University and now a lecturer in philosophy at Cairo University, is an uncompromising opponent to the wearing of the veil. She spoke at a conference of female university graduates from Islamic countries that was held in Alexandria and urged the women to cling to science and religion to get closer to God. She said "this is the only refuge by means of which it is possible to advance society," instead of taking pride in external signs like the veil.
The late head of Al-Azhar, Sheikh Mohammed Sayyed Tantawi,forbade women studing there from wearing the veil and prohibited them from taking examinations when veiled. Nusir sees herself as the successor to champion his path. What better way could there be than depicting wearing the veil as a Jewish custom? No good Muslim woman would want to imitate a Jewish custom.
|
#include "drape_frontend/overlay_batcher.hpp"
#include "drape_frontend/map_shape.hpp"
#include "drape/batcher.hpp"
#include "drape/render_bucket.hpp"
#include "drape/texture_manager.hpp"
namespace df
{
uint32_t const kOverlayIndexBufferSize = 30000;
uint32_t const kOverlayVertexBufferSize = 20000;
OverlayBatcher::OverlayBatcher(TileKey const & key)
: m_batcher(kOverlayIndexBufferSize, kOverlayVertexBufferSize)
{
int const kAverageRenderDataCount = 5;
m_data.reserve(kAverageRenderDataCount);
m_batcher.StartSession([this, key](dp::RenderState const & state, drape_ptr<dp::RenderBucket> && bucket)
{
FlushGeometry(key, state, std::move(bucket));
});
}
void OverlayBatcher::Batch(ref_ptr<dp::GraphicsContext> context, drape_ptr<MapShape> const & shape,
ref_ptr<dp::TextureManager> texMng)
{
m_batcher.SetFeatureMinZoom(shape->GetFeatureMinZoom());
shape->Draw(context, make_ref(&m_batcher), texMng);
}
void OverlayBatcher::Finish(ref_ptr<dp::GraphicsContext> context, TOverlaysRenderData & data)
{
m_batcher.EndSession(context);
data.swap(m_data);
}
void OverlayBatcher::FlushGeometry(TileKey const & key, dp::RenderState const & state,
drape_ptr<dp::RenderBucket> && bucket)
{
m_data.emplace_back(key, state, std::move(bucket));
}
} // namespace df
|
• The Vocalist’s Tool Kit
in Piano,Vocal
Post image for The Vocalist’s Tool Kit
In this lesson, you will discover the tools that are in the vocalist’s tool kit.
The vocalist career is a profession just like Engineering, Medical, Legal, or any other profession. If a technical profession like engineering, demands the use of tools for its job delivery, what makes you think the vocalist profession doesn’t?
The reason why it is necessary for you as a vocalist to have your tool kit is because it helps to enhance your vocal delivery. Imagine a doctor that wants to check your temperature without thermometer, or imagine an engineer that wants to drill a hole without a drilling machine, or loose a screw without a screwdriver.
So, let’s bring it down to a career vocalist.
“Who Is A Vocalist?”
A vocalist is someone that makes music with his/her voice. A vocalist is like an upgraded version of a singer. When you reach the point of being called a vocalist, it indicates what you can make with your voice.
Do you know that most of the songs that you hear on films and animation, were created by vocalists? Do you know that there is so much money to make from this, that you can earn a living exploring this career path?
“What Are The Tools In A Vocalist’s Tool Kit?”
The vocalist tool kits include:
• Writing materials
• A media player
• Voice recorder
• Stopwatch
• Voice lubricant (like water)
• Headphones
These are for the basic level, but by the time you progress you will need a:
• Personal voice mixer
• Personal microphone (for rehearsal and performance)
The Functions Of The Tool Kits
The tool kits outlined above serve the following functions:
Writing Material
This does not necessarily mean a book and a pen, it could be a writing app on your mobile device. One of the things a vocalist can do with the writing material is to write out the plot of a song.
For example, if I am planning a birthday, and I ask you- a vocalist, to come and sing for me, the writing material helps you to compile a collection of songs that will suit the event, and it will also help you organize your act. Your writing materials need to be handy.
You could use your phone or writing software, so you don’t have to move around with pen and paper to show that you are a well-equipped vocalist. The writing materials are basically used to organize and sketch out your rehearsal and performance plan, just like an architect.
A Media Player
This refers to any device you use to listen to music, either what you recorded or what other people have recorded. Even though your job as a vocalist is to sing, you will be required to do more of listening because how good you sound is a function of what you hear and recognize with your ear.
A good vocalist must have a stereo system, and if possible, set up a music player that will play nonstop. This will expose your ear to different varieties of music, and develop your singing style and creativity.
Voice Recorder
The beauty of voice recording is that it’s like your own personal teacher. When you record yourself, and listen to your recording, you will observe certain errors, and can easily spot your fault, which will lead you to re-record until you fix it.
If you practice this all the time, you will notice a self-evaluation trend that can easily shape your voice to create audible and clear sound. Another thing a voice recorder does is it helps you record an inspiration you receive immediately it comes, before you lose it.
Well, I know that it’s good to read music, but how many people want to sit down and write music? So, always keep your voice recorder handy so you can retain all your creative work as a vocalist. When you record your voice, it retains the feeling, the passion, and even the originally inspired key.
It’s been proven that the key of a song is the best key to use when performing the piece because if you use another key it might alter the feeling and effect. Can you see all that you can achieve with just a voice recorder?
Over time, you will notice that music and time are related. So, beginning from your warm up and practice session, a stopwatch is necessary to help you measure duration in your exercise or drill, such as, learning how to sustain a particular note like ‘do.’
You can also use it to measure your progress in terms of timing a song, timing how long you can hold your breath, etc.
Voice Lubricant (Water)
Water contains air which makes your voice to be refreshed, and to a great extent water balances your temperature. Most of the time when you sing and your throat gets dry, water or healthy and edible liquids make you breath more easily.
There are four things a headphone will do for you:
• Headphones can also give you intimacy especially when you want to be alone to rehearse or score songs properly.
• It helps you record in a very busy, noisy, or crowded environment. With your headphones on, you can record in a noisy place because it can eliminate the noise and pick only your voice.
• It will help you hear what you recorded, or the song you are listening to clearly.
• Headphones help you not to disturb your neighbor or colleagues who may be affected by your music if you play it out loud.
Voice Mixer
What a mixer does for a performing vocalist is that it helps you get the right blend of vocal effect for your microphone. You use the mixer while rehearsing, to get acquainted with the right combination for a particular effect on your microphone.
You understand your microphone and your microphone understands you. Just like a guitarist that goes to perform with the personal guitar he practices with, get your personal microphone too.
Final Words
The moral of this lesson is to make sure you won’t be like a doctor who does not have a thermometer or an engineer who does not have a screwdriver. So, start gathering your tools one after the other if you want to be professional.
The following two tabs change content below.
{ 0 comments… add one now }
Leave a Comment
Previous post:
Next post:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.