Sunday, 31 January 2021

Updating React Native language by user choice

I'm using I18n to create multiple language application.

I have created "on first launch" screen that offers the user option to choose his preferred language, it works fine but there is problem.

Once I choose language, App() component updates, it showing login component (initialRouteName="Login"). But the language is still by default English, only when I proceed to another screen it works or either FastRefresh login screen.

const Stack = createStackNavigator();
const HAS_LAUNCHED = "hasLaunched";
const ENGLISH = "en";
const HEBREW = "he";


//Save the language as AsyncStorage for other times the user will open the app
async function setAppLaunched(en) {
  AsyncStorage.clear()
  AsyncStorage.setItem(HAS_LAUNCHED, "true");
  AsyncStorage.setItem(en ? ENGLISH : HEBREW, "true");
  if(await AsyncStorage.getItem(HEBREW)){
    i18n.locale = "he";
    I18nManager.forceRTL(true);
  }
  else{
    i18n.locale = "en";
    I18nManager.forceRTL(false);
  }
}


//If first launch show this screen
function CheckIfFirstLaunch({ onSelect }) {

  const selectLaunched = (value) => {
    setAppLaunched(value);
    onSelect();
  };


  return (
    <View>
        <Text>Choose Language</Text>
        <Button onPress={() => selectLaunched(false)} title="Hebrew"/>
        <Button onPress={() => selectLaunched(true)} title="English"/>
    </View>
  );
}

export default function App() {
  const [selected, setSelected] = useState(false);

  const verifyHasLaunched = async () => {
    try {
      const hasLaunched = await AsyncStorage.getItem(HAS_LAUNCHED);
      setSelected(hasLaunched != null);
    } catch (err) {
      setSelected(false);
    }
  };

  useEffect(() => verifyHasLaunched, []);

  if (!selected){
    return <CheckIfFirstLaunch onSelect={() => setSelected(true)} />;
  }
  else{
    const verifyLang = async () => {
      const lang = await AsyncStorage.getItem('he');
      if(lang != null){
        i18n.locale = "he";
        I18nManager.forceRTL(true);
      }
      else{
        i18n.locale = "en";
        I18nManager.forceRTL(false);
      }
   };
   () => verifyLang;
  }

  return (
    <NavigationContainer>
      <Stack.Navigator screenOptions= initialRouteName="Login">
        <Stack.Screen name="Login" component={Login} />
        <Stack.Screen name="Register" component={Register} />
        <Stack.Screen name="Dashboard" component={Dashboard} />
      </Stack.Navigator>
    </NavigationContainer>
  );
}

I wonder since I have updated my component, the language should update as well, isn't?

Here are some screenshots that will visually explain what my problem is.

enter image description here enter image description here enter image description here

How can I update React Native app by user choice using I18n plugin?

EDIT

Debugging results:

selectedLaunched(value) - value returns boolean value correctly.

Checking setAppLaunched(en) if statement to see if responding correctly, it does.

selected state is also working fine and rendering the NavigationContainer component right after it set to true.



from Updating React Native language by user choice

Boost.Python library build errors when no default constructor and unique pointer

I am trying to wrap my C++ code using Boost.Python and have encountered a couple errors and could use some help at solving them. But first here are the assets I have.

Product.h

namespace Products
{
  class Product
  {
  public:
    virtual ~Product() {};
    virtual int generateProduct(int verbose = 0) = 0;
    virtual int getQueueNum() = 0;
    virtual std::vector<std::vector<std::string> > getQueue(const int &index_int) = 0;
  };
}

ProductBuilder.h

namespace Products
{
  class ProductBuilder
  {
  public:
    ProductBuilder(const std::map<std::string,std::string> input_map, const std::deque<std::string> &dates_deque);
    int build(int verbose = 0);
    int numQueue() {return m_product->getQueueNum();};
    std::vector<std::vector<std::string> > getQueueByIndex(const int &index_int) {return m_product->getQueue(index_int);};
  private:
    std::unique_ptr<Products::Product> m_product;
  };
}

full-wrapper.cpp

BOOST_PYTHON_MODULE(alucard)
  {
    class_<Products::ProductBuilder>("ProductBuilder", init<std::map<std::string,std::string>,std::deque<std::string>>())
    .def("build", &Products::ProductBuilder::build)
    .def("numQueue", &Products::ProductBuilder::numQueue)
    .def("getQueueByIndex", &Products::ProductBuilder::getQueueByIndex)
    ;
  }

The errors that I am getting when I build the library via cmake are as follows:

/usr/include/boost/python/object/value_holder.hpp:133:13: error: use of deleted function ‘Products::ProductBuilder::ProductBuilder(const Products::ProductBuilder&)’
             BOOST_PP_REPEAT_1ST(N, BOOST_PYTHON_UNFORWARD_LOCAL, nil)
             ^
In file included from /home/jlahowetz2/development/cpp-python-wrapper/full-wrapper.cpp:11:0:
/data/alucard/include/ProductBuilder.h:25:9: note: ‘Products::ProductBuilder::ProductBuilder(const Products::ProductBuilder&)’ is implicitly deleted because the default definition would be ill-formed:
   class ProductBuilder
         ^~~~~~~~~~~~~~
/data/alucard/include/ProductBuilder.h:25:9: error: use of deleted function ‘std::unique_ptr<_Tp, _Dp>::unique_ptr(const std::unique_ptr<_Tp, _Dp>&) [with _Tp = Products::Product; _Dp = std::default_delete<Products::Product>]’
In file included from /usr/include/c++/7/memory:80:0,
                 from /usr/include/boost/function/function_base.hpp:16,
                 from /usr/include/boost/function/detail/prologue.hpp:17,
                 from /usr/include/boost/function/function_template.hpp:13,
                 from /usr/include/boost/function/detail/maybe_include.hpp:13,
                 from /usr/include/boost/function/function0.hpp:11,
                 from /usr/include/boost/python/errors.hpp:13,
                 from /usr/include/boost/python/handle.hpp:11,
                 from /usr/include/boost/python/args_fwd.hpp:10,
                 from /usr/include/boost/python/args.hpp:10,
                 from /usr/include/boost/python.hpp:11,
                 from /home/jlahowetz2/development/cpp-python-wrapper/full-wrapper.cpp:1:
/usr/include/c++/7/bits/unique_ptr.h:383:7: note: declared here
       unique_ptr(const unique_ptr&) = delete;
       ^~~~~~~~~~


from Boost.Python library build errors when no default constructor and unique pointer

How can I iterate until all entries are in a given column?

I am trying to apply a while statement to my code in order to run it until all the elements in the lists below (in the column Check) are in column Source.

My code is as so far:

while set_condition: # to set the condition
     newCol = pd.Series(list(set(df['Check']) - set(df['Source']))) # this check for elements which are not currently included in the column Source
     newList1 = newCol.apply(lambda x: my_function(x)) # this function should generate the lists n Check -> this explains why I need to create a while statement
     df = df.append(pd.DataFrame(dict('Source'=newCol, 'Check'=newList1)), ignore_index=True) # append the results in the new column
     df = df.explode('Check')

I will give you an example of the process and of how my_function works: let's say that I have my initial dataset

Source       Check
mouse   [dog, horse, cat]   
horse   [mouse, elephant]   
tiger   []  
elephant [horse, bird]

After exploding Check column and appending the results to Source, I will have

Source       Check
mouse   [dog, horse, cat]   
horse   [mouse, elephant]   
tiger   []  
elephant [horse, bird]
dog     [] # this will be filled in after applying the function
cat     [] # this will be filled in after applying the function
bird    [] # this will be filled in after applying the function

Every elements in the lists should be added in Source column before applying the function. When I apply the function, I populate the lists of the other elements; so, for example I can have

Source       Check
mouse   [dog, horse, cat]   
horse   [mouse, elephant]   
tiger   []  
elephant [horse, bird]
dog     [mouse, fish]  # they are filled in
cat     [mouse]
bird    [elephant, penguin]
fish    [dog]

Since fish and penguin are not in Source, I will need to run again the code in order to have the expected output (all the elements in the lists are already in the Source column):

Source       Check
mouse   [dog, horse, cat]   
horse   [mouse, elephant]   
tiger   []  
elephant [horse, bird]
dog     [mouse, fish] 
cat     [mouse]
bird    [elephant, penguin]
fish    [dog]
penguin [bird]

as both dog and bird are already in Source, I will not need to apply again the function as all the lists are populated with elements already in the Source column. The code can stop to run.

I cannot provide the code for my_function, but I hope it can be clear how it works, in order to try to figure out how to set the while statement.

What I would like to do is to stop the cycle/loop when all the elements in the lists are in the column Source and have applied the function to populate all the lists.

Thank you for all the help you will provide.



from How can I iterate until all entries are in a given column?

Locally compiled libffi files not getting picked up while recompiling python 3 package

In a situation where I need to locally Install Python 3 and some related packages for enabling the Tkinter setup and use openpyxl to read/write .xlsx files using GUI setup.

  1. First I installed the Python 3.9.1 locally and when I tried doing 'import tkinter', it said unable to find the required modules.

  2. Then installed the tcl and tk libraries locally and recompiled the Python libs as shown below. After this step the Python 3 is installed, then tkinter libs were also being built etc.

    ./configure --prefix=~/installed --with-tcltk-libs=‘-L~/installed/lib’ --with-tcltk-includes=’-I~/installed/include

  3. Next I wanted to locally install openpyxl to read/write into .xlsx files. So I tried installing using pip3 install openpyxl, but it said

    "ModuleNotFoundError: No module named '_ctypes'"

  4. Tried downloading libffi and installed locally. Then tried recompiling the python files using below command.

    ./configure --prefix=~/installed --with-tcltk-libs=‘-L~/installed/lib’ --with-tcltk-includes=‘-I~/installed/include’ LDFLAGS=`pkg-config --libs-only-L~/installed/lib/../lib64 -I~/installed/include

    But it wasn't able to pick up the same and during the make of python files, it says, "Following modules built successfully but were removed because they could not be imported: _ctypes".

So, how to make sure the required/locally installed libffi files gets picked up ? Please provide your suggestions !



from Locally compiled libffi files not getting picked up while recompiling python 3 package

How do I get the parent shell's path on windows in pure python?

The following will return the shell that launched the current process & the shell's full path. However, it uses a python library full of c extensions. Sometimes a shell launches a shell, etc. I'm just looking for the "most recent ancestor" process that is a shell.

How do I do this with just pure python? (i.e. no c extensions, using windll.kernel32 and the like are fine- of course at some point to get process info code will have to access platform-specific native code, it just needs to be something already buried in the python standard library, not something that needs compiling c)

Shellingham at the moment can't do this.

from typing import Tuple, List

import os

import psutil

SHELL_NAMES = {
    'sh', 'bash', 'dash', 'ash',    # Bourne.
    'csh', 'tcsh',                  # C.
    'ksh', 'zsh', 'fish',           # Common alternatives.
    'cmd', 'powershell', 'pwsh',    # Microsoft.
    'elvish', 'xonsh',              # More exotic.
}

def find_shell_for_windows() -> Tuple[str,str]:
    names_paths:List[Tuple[str,str]]=[]
    current_process = psutil.Process(os.getppid())
    process_name, process_path = current_process.name(), current_process.exe()
    names_paths.append((process_name, process_path))
    for parent in current_process.parents():
        names_paths.append((parent.name(), parent.exe()))
    for n,p in names_paths:
        if n.lower() in SHELL_NAMES or n.lower().replace(".exe","") in SHELL_NAMES:
            return n,p
    return ["",""]

if __name__ == '__main__':
    print(find_shell_for_windows())


from How do I get the parent shell's path on windows in pure python?

How to avoid duplicate push on top of History Api Stack

I'm facing problem with push duplicate on top of stack History api stack

let say i have user interface with click pattern (1 to 4 pages ,it can be random as per user choice)

Question: i want to push all pages in the same way as it is clicked without duplicate on top of history api, so that when browser forward,backward button is clicked url should change(right now 2 clicks are required)

here is how my project setup will look like (click same page 2 times to see actual issue):

$('li').on('click',function(){
   var baseUrl = location.href;  //'http://www.siliconvalley.com/pages/';//it can be current url also
   var page = $(this).attr('data-page');
   var urlObj = new URL(baseUrl);
   urlObj.searchParams.append('page',page);
   $('#loadedPageContainer').prepend(`<p>${urlObj.href}</p>`);
   try{
       history.pushState({}, null, urlObj.href); //wanted to avoid duplicate on top of history api stack
   }catch(e){}

});

window.addEventListener('popstate', function(e){    
       console.log('page navigated',e);  // called 2 times in my project
      //navigateToCurrentUrlRoute();
});
ul{
  list-style:none;
  display:flex;
  justify-content: space-evenly;
}

ul li{
    border: 1px solid red;
    padding: 50px;
    cursor:pointer;
}

#loadedPageContainer{
    //display: flex;
    width: 500px;
    height: 200px;
    box-shadow: 0 1px 2px 3px #c7c7c7;
      margin: 0 auto;
/*   overflow:auto; */
  overflow-y:auto;
      line-height: 38px;
   padding:15px;
}

#loadedPageContainer p{
      box-shadow: 0 1px 2px 0px #dee5ea;
}

#loadedPageContainer p:first-child {
    box-shadow: 0 4px 2px 1px #de004b;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<ul>
  <li data-page="1">page 1</li>
  <li data-page="2">page 2</li>
  <li data-page="3">page 3</li>
  <li data-page="4">page 4</li>
</ul>
<p>Please click same pages 2 or 3 times to see actual issue below or in url on forward,backward browser button click</p>
<div id="loadedPageContainer">
  
</div>

Note: only top of the history api stack should not have duplicates

For better view here is a codepen link: https://codepen.io/eabangalore/pen/KKgOwgN Please help me thanks in advance!!



from How to avoid duplicate push on top of History Api Stack

D3 General update pattern transition not working on pie chart

I have a project where I am using a Pie/Doughnut chart to visualize my data. I have added the general update pattern in order to create a smooth transition when my data changes/updates.

In order to accomplish this I have followed an example which uses the general update pattern on a pie chart: Bl.ocks example.

The problem I am facing is that the chart doesn't update smoothly when updating the data. The chart instantly swaps from one state into the next.

In this and other examples they define a arcTween method where d3 interpolates between the previous angles and the angles from the newly updated data:

    arcTween(a) {
      let i = d3.interpolate(this._current, a);
      this._current = i(0);
      return t => {
        return this.arc(i(t));
      };
    }

I also have added the code where I join, enter and update my data to the pie chart. Here I first create the group element where the pie is being drawn in. I also define a transition using the 'arcTween' method to transition between the states. And lastly I also define the 'this._current' when the pie is created:

      this.g = this.svg
        .selectAll("doughnut")
        .data(data_ready)
        .enter()
        .append("g");

      this.g
        .transition()
        .duration(1500)
        .attrTween("d", this.arcTween);

      this.g
        .append("path")
        .attr("d", d => {
          return this.arc(d);
        })
        .attr("fill", "#206BF3")
        .attr("class", "slice")
        .attr("stroke", "#2D3546")
        .style("stroke-width", "2px")
        .each(d => {
          this._current = d;
        });

This is my full code. This is being written in Vue.js. I have tried to get this to work inside a snippet. But I couldn't get it to work.

The images that are shown on top of the doughnut slices are locally stored:

<template>
  <div class="p-5 flex flex-col h-full">
    <h2 class="mb-3"></h2>
    <div ref="my_dataviz" class="flex justify-center"></div>
    <div class="grid grid-cols-2 gap-7 m-7">
      <div v-for="item in data" :key="item.key" class="flex">
        <img
          :src="require('@/assets/img/doughnut/' + item.icon)"
          alt=""
          class="doughnutIcon mr-4"
        />
        <div class="flex flex-col">
          <h3></h3>
          <p class="opacity-50">
             
          </p>
          <p class="opacity-50"> %</p>
        </div>
      </div>
    </div>
  </div>
</template>
<script>
import { converter } from "@/shared";
import * as d3 from "d3";

export default {
  name: "DoughnutChartItem",
  props: {
    title: {
      type: String,
      required: true
    },
    data: {
      type: Array,
      required: true
    },
    height: {
      type: Number,
      required: true
    },
    width: {
      type: Number,
      required: true
    },
    unit: {
      type: String
    }
  },
  data() {
    return {
      totalAmount: 0,
      svg: undefined,
      arc: undefined,
      radius: undefined,
      g: undefined
    };
  },
  created() {
    let total = 0;
    this.data.forEach(item => {
      total += item.value;
    });
    this.totalAmount = total;
  },
  mounted() {
    // set the dimensions and margins of the graph
    var margin = 1;

    // The radius of the pieplot is half the width or half the height (smallest one). I subtract a bit of margin.
    this.radius = Math.min(this.width, this.height) / 2 - margin;

    // append the svg object to the div called 'my_dataviz'
    this.svg = d3
      .select(this.$refs.my_dataviz)
      .append("svg")
      .attr("width", this.width)
      .attr("height", this.height)
      .append("g")
      .attr(
        "transform",
        "translate(" + this.width / 2 + "," + this.height / 2 + ")"
      );

    // Compute the position of each group on the pie:
    this.pie = d3.pie().value(function(d) {
      return d[1];
    });

    // declare an arc generator function
    this.arc = d3
      .arc()
      .outerRadius(100)
      .innerRadius(50);

    this.setSlicesOnDoughnut(this.data);

    this.addImagesToSlices();
  },
  methods: {
    animateSliceOnHover(radius, path, dir) {
      switch (dir) {
        case 0:
          path
            .transition()
            .duration(500)
            .ease(d3.easeBounce)
            .attr(
              "d",
              d3
                .arc()
                .innerRadius(100)
                .outerRadius(50)
            );
          path.style("fill", "#206BF3");
          break;

        case 1:
          path.transition().attr(
            "d",
            d3
              .arc()
              .innerRadius(50)
              .outerRadius(110)
          );
          path.style("fill", "white");
          break;
      }
    },
    percentageOfTotal(amount) {
      return Math.round((amount / this.totalAmount) * 100);
    },
    formatNumberValue(amount) {
      return converter.formatNumberValue(amount);
    },
    setSlicesOnDoughnut(data) {
      // Build the pie chart: Basically, each part of the pie is a path that we build using the arc function.
      var data_ready = this.pie(
        data.map(function(d) {
          return [d["key"], d["value"], d["icon"], d["hover"]];
        })
      );

      this.g = this.svg
        .selectAll("doughnut")
        .data(data_ready)
        .enter()
        .append("g");

      this.g
        .transition()
        .duration(1500)
        .attrTween("d", this.arcTween);

      this.g
        .append("path")
        .attr("d", d => {
          return this.arc(d);
        })
        .attr("fill", "#206BF3")
        .attr("class", "slice")
        .attr("stroke", "#2D3546")
        .style("stroke-width", "2px")
        .each(d => {
          this._current = d;
        });

      // Add tooltip
      d3.selectAll(".slice")
        .on("mouseover", this.mouseover)
        .on("mousemove", this.mousemove)
        .on("mouseout", this.mouseout);
    },
    addImagesToSlices() {
      var image_width = 20;
      var image_height = 20;

      this.g.selectAll(".logo").remove();

      this.g
        .append("svg:image")
        .attr("transform", d => {
          var x = this.arc.centroid(d)[0] - image_width / 2;
          var y = this.arc.centroid(d)[1] - image_height / 2;
          return "translate(" + x + "," + y + ")";
        })
        .attr("class", "logo")
        .attr("class", function(d) {
          return `${d.data[0]}-logo`;
        })
        .attr("href", function(d) {
          return require("@/assets/img/doughnut/" + d.data[2]);
        })
        .attr("width", image_width)
        .attr("height", image_height);
    },
    mouseover(event, data) {
      //Swap doughnut icon to blue icon
      d3.selectAll("." + data.data[0] + "-logo").attr("href", d => {
        return require("@/assets/img/doughnut/" + d.data[3]);
      });

      this.animateSliceOnHover(this.radius, d3.select(event.currentTarget), 1);

      const tip = d3.select(".tooltip");

      tip
        .style("left", `${event.clientX + 15}px`)
        .style("top", `${event.clientY}px`)
        .transition()
        .style("opacity", 0.98);

      tip.select("h3").html(`${data.data[0]}`);
      tip
        .select("h4")
        .html(`${this.formatNumberValue(data.data[1])} ${this.unit}`);
    },
    mousemove(event) {
      // Move tooltip
      d3.select(".tooltip")
        .style("left", `${event.clientX + 15}px`)
        .style("top", `${event.clientY}px`);
    },
    mouseout(event, data) {
      //Swap doughnut icon to white icon
      d3.selectAll("." + data.data[0] + "-logo").attr("href", function(d) {
        return require("@/assets/img/doughnut/" + d.data[2]);
      });

      // Animate slice
      var thisPath = d3.select(event.currentTarget);
      this.animateSliceOnHover(this.radius, thisPath, 0);

      // if (!thisPath.classed("clicked")) {
      //   this.animateSliceOnHover(this.radius, thisPath, 0);
      // }

      // Hide tooltip
      d3.select(".tooltip")
        .transition()
        .style("opacity", 0);
    },
    arcTween(a) {
      let i = d3.interpolate(this._current, a);
      this._current = i(0);
      return t => {
        return this.arc(i(t));
      };
    }
  },
  watch: {
    data() {
      this.setSlicesOnDoughnut(this.data);

      this.addImagesToSlices();
    }
  }
};
</script>


from D3 General update pattern transition not working on pie chart

ZeroMQ Connector How to check if an order has been closed by SL/TP?

There is little information about ZeroMQ Connector, I was following the 7 video tutorials on how to interface python with a MetaTrader Terminal 4 and got no problem running them on iPython kernel just as instructed in this video

I have an issue is that how to check if an order has been closed by SL/TP, I am thinking one way is to get the comment as after TP or SL, in the comment, it has[tp] for example:

EURUSD_Trader[tp]

Can anyone share some sample codes that I can access that to check if an order has been closed by SL/TP. I assume others may have the same problem, so I post it here.



from ZeroMQ Connector How to check if an order has been closed by SL/TP?

‘kwargs’ is empty in python decorator

I run a decorator demo below.

def logger(func):
    def inner(*args, **kwargs):
        print(args)
        print(kwargs)
        return func(*args, **kwargs)
    return inner

@logger
def foo1(a, b, c, x=2, y=1):
    print(x * y)

foo1(6,7,8)

output is:

(6, 7, 8)
{}
2

Why is the dict empty? I think it should be {'x':2, 'y':1}



from ‘kwargs’ is empty in python decorator

Trouble sending Uint8List from Flutter to Android[Java] using method channels

I have a method channel setup on Flutter to send Uint8List to Android[Java]. Android is receiving the data but it looks like the bytes are getting wrapped at 127.

Ex: if I send [254, 100, 32] from flutter, it shows up as [-2, 100, 32] in Android.

Basically, any value in the list over 127 is getting converted to a negative value.

Here is how I have the method channel setup on the Fluter side:

static const MethodChannel platform = MethodChannel(SEND_SERIAL_MESSAGE_TO_ANDROID);

Future<void> sendMessageToAndroid(List<int> message) async {

  final bool result = await platform.invokeMethod('parseMessage', message);
}

And this is how I am receiving it on the Android side

@Override
public void configureFlutterEngine(@NonNull FlutterEngine flutterEngine) {
    super.configureFlutterEngine(flutterEngine);
    new MethodChannel(flutterEngine.getDartExecutor().getBinaryMessenger(), CHANNEL)
            .setMethodCallHandler(
                    (call, result) -> {
                        if (call.method.equals("parseMessage")) {
                            byte[] message = call.arguments();
                            parseMessage(message);
                        } else {
                            result.notImplemented();
                        }
                    }

            );
}

private void parseMessage(byte[] message){
    Log.i(LOG_TAG, message.toString());
}

Question: how do I stop values over 127 in my Uint8List from getting converted to a negative value in Java?

Let me know if you need to see any more of my code.



from Trouble sending Uint8List from Flutter to Android[Java] using method channels

Bound image not displaying in Vue project

I have a component that displays an image:

<template>
    <div>
        <img :src="image" />
    </div>
</template>

<script>
export default {
  name: 'MyComponent',
  props: {
    image: String
  }
}
</script>

When I try to use this, and bind an image path:

<MyComponent
    image="./assets/image-test.png">
</MyComponent>

But it won't display the image.

What have I tried?

I tried replacing the code in the component to directly reference the image. If I do the following then the image does display:

<img src="@/assets/image-test.png" />

(Only with the @ symbol)

I've tried replacing the path that's bound to the component with:

image="@/assets/image-test.png">

But that makes no difference.

I found this answer, and so tried this:

<div>
    <img :src="getImage(image)" />
</div>

  methods: {
    getImage(path) {
      return require(path);
    }
  }

Inside the component. That results in an error at runtime:

Cannot find module './assets/image-test.png'

The full stack trace:

runtime-core.esm-bundler.js?5c40:217 Uncaught Error: Cannot find module './assets/image-test.png'
    at webpackEmptyContext (eval at ./src/components sync recursive (app.js:1080), <anonymous>:2:10)
    at Proxy.getImage (MyApp.vue?059c:17)
    at Proxy.render (MyApp.vue?059c:3)
    at renderComponentRoot (runtime-core.esm-bundler.js?5c40:710)
    at componentEffect (runtime-core.esm-bundler.js?5c40:4193)
    at reactiveEffect (reactivity.esm-bundler.js?a1e9:42)
    at effect (reactivity.esm-bundler.js?a1e9:17)
    at setupRenderEffect (runtime-core.esm-bundler.js?5c40:4176)
    at mountComponent (runtime-core.esm-bundler.js?5c40:4134)
    at processComponent (runtime-core.esm-bundler.js?5c40:4094)

I also tried that with the @ symbol.

Please could someone point me in the right direction on this. I realise that there's some quirk with Vue images (it says so on the linked post), but I'm at a loss as to how to persuade it to bind the image.



from Bound image not displaying in Vue project

Remove volume from deployment using patch_namespaced_deployment not working

I'm trying to patch a deployment and remove its volumes using patch_namespaced_deployment (https://github.com/kubernetes-client/python) with the following arguments, but it's not working.

patch_namespaced_deployment(
            name=deployment_name,
            namespace='default',
            body={"spec": {"template": {
                "spec": {"volumes": None,
                "containers": [{'name': container_name, 'volumeMounts': None}]
                }
            }
            }
            },
            pretty='true'
        )

How to reproduce it:

Create this file app.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: myclaim
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  annotations:
    pv.kubernetes.io/bound-by-controller: "yes"
  finalizers:
  - kubernetes.io/pv-protection
  labels:
    volume: pv0001
  name: pv0001
  resourceVersion: "227035"
  selfLink: /api/v1/persistentvolumes/pv0001
spec:
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 5Gi
  claimRef:
    apiVersion: v1
    kind: PersistentVolumeClaim
    name: myclaim
    namespace: default
    resourceVersion: "227033"
  hostPath:
    path: /mnt/pv-data/pv0001
    type: ""
  persistentVolumeReclaimPolicy: Recycle
  volumeMode: Filesystem
status:
  phase: Bound
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: pv-deploy
spec:
  replicas: 1
  selector:
    matchLabels:
      app: mypv
  template:
    metadata:
      labels:
        app: mypv
    spec:
      containers:
      - name: shell
        image: centos:7
        command:
        - "bin/bash"
        - "-c"
        - "sleep 10000"
        volumeMounts:
        - name: mypd
          mountPath: "/tmp/persistent"
      volumes:
      - name: mypd
        persistentVolumeClaim:
          claimName: myclaim
- kubectl apply -f app.yaml

- kubectl describe deployment.apps/pv-deploy (to check the volumeMounts and Volumes)

- kubectl patch deployment.apps/pv-deploy --patch '{"spec": {"template": {"spec": {"volumes": null, "containers": [{"name": "shell", "volumeMounts": null}]}}}}'

- kubectl describe deployment.apps/pv-deploy (to check the volumeMounts and Volumes)

- Delete the application now: kubectl delete -f app.yaml

- kubectl create -f app.yaml

- Patch the deployment using the python library function as stated above. The *VolumeMounts* section is removed but the Volumes still exist.

** EDIT ** Running the kubectl patch command works as expected. But after executing the Python script and running a describe deployment command, the persistentVolumeClaim is replaced with an emptyDir like this

  Volumes:
   mypd:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     
    SizeLimit:  <unset>


from Remove volume from deployment using patch_namespaced_deployment not working

Android paging and grouping models

I have MVVM-like architecture, data flow is the following:

  • I have data (image src url + linked data) in SQLite

  • I have Repository layer and method like this:

    fun <T> getPhotos(): LiveData<PagedList<PhotoInfo>>

  • I'm using androidx.paging library v.2 and it's PagedListAdapter as adapter for my RecyclerView

Now I want to group data by some field, f.e. by date and have ability to have groups to be either horizontal (with horizontal scrolling) or vertical.

I went through existing answers & solutions for similar issue and here're options I found:

Option 1

I can update query to group data and insert 'special' rows as separators. Then in Adapter I can have some branching logic to handle 'regular' rows and 'separators'.

I don't like this option because it involves this undesirable change of SQL query. Also I can't imagine how to implement 'horizontal' group scrolling in this case.

Option 2

I can have nested recycler views, but in this case I don't know how to update my code so that I could have PagedList<PhotoGroup> from 'linear' PagedList<PhotoInfo>.

The only thing I can think:

  • have two methods in repository, which return 'paged' data, for groups and item in a particular group, like:
    • fun <T> getPhotoGroups(): LiveData<PagedList<PhotoGroup>>
    • fun <T> getPhotosForGroup(groupId: Long): LiveData<PagedList<PhotoInfo>>
  • every group has it's own PagedList and RecyclerView.

But I'm afraid it can be over-complicated and might have overhead. Also not sure it will work fine if I have parent RecyclerView and child RecyclerView in the same direction (vertical)

I have a strong feeling I'm missing something and over-complicating things.



from Android paging and grouping models

Query segmentation with spell check

Assuming I have a fixed list of multi word names like: Water Tocopherol (Vitamin E) Vitamin D PEG-60 Hydrogenated Castor Oil

I want the following input/output results:

  1. Water, PEG-60 Hydrogenated Castor Oil -> Water, PEG-60 Hydrogenated Castor Oil
  2. PEG-60 Hydrnated Castor Oil -> PEG-60 Hydrogenated Castor Oil
  3. wter PEG-60 Hydrnated Castor Oil -> Water, PEG-60 Hydrogenated Castor Oil
  4. Vitamin E -> Tocopherol (Vitamin E)

I need it to be performant and the ability to recognize that either there are too many close matches and no close matches. With 1 its relatively easy because I can separate by the comma. Most times the input list is separated by the comma so this works 80% of the time but even this has the small issue. Take for example 4. Once separated, 4's ideal match is not returned by most spellcheck libraries (I've tried a number) because the edit distance to Vitamin D is much smaller. There are some websites that do this well but I'm lost as to how to do it.

The second part to this problem is, how do I do word segmentation on top. Let's say a given list doesn't have a comma, I need to be able to recognize that. Simplest example being Water Vtamin D should become Water, Vitamin D. I can give a ton of examples but I think this gives a good idea of the problem.



from Query segmentation with spell check

Is there a sample project that illustrates using MediaPipe AAR on an ArFragment (sceneform)?

the idea is to track the users' face and augment it with 3d models. MediaPipe would run on the camera feed from arFragment to provide the info necessary to place a 3d model in the arFragment. Is there a sample project that combines MediaPipe and ArFragment ?



from Is there a sample project that illustrates using MediaPipe AAR on an ArFragment (sceneform)?

Setting ticks on a time scale during zoom

The snippet below creates a single x axis with starting ticks of 10. During zoom I'm updating ticks on the rescaled axis with:

.ticks(startTicks * Math.floor(event.transform.k))

With .scaleExtent([1, 50]) I can get down from years to 3-hourly blocks fairly smoothly (besides a little label overlap here and there).

But, when I request the number of ticks applied on the scale (xScale.ticks().length) I get a different number to the one I just assigned.

Also, when I get the labels (xScale.ticks().map(xScale.tickFormat())) they differ from the ones rendered as I get deeper into the zoom.

Reading here:

An optional count argument requests more or fewer ticks. The number of ticks returned, however, is not necessarily equal to the requested count. Ticks are restricted to nicely-rounded values (multiples of 1, 2, 5 and powers of 10), and the scale’s domain can not always be subdivided in exactly count such intervals. See d3.ticks for more details.

I understand I might not get the number of ticks I request, but it's counter-intuitive that:

  • I request more and more ticks (per k) - between 10 and 500
  • Then the returned ticks fluctuates between 5 and 19.

Why is this ? Is there a better or 'standard' way to update ticks whilst zooming for scaleTime or scaleUtc ?

var margin = {top: 0, right: 25, bottom: 20, left: 25}
var width = 600 - margin.left - margin.right;
var height = 40 - margin.top - margin.bottom;

// x domain
var x = d3.timeDays(new Date(2020, 00, 01), new Date(2025, 00, 01));

// start with 10 ticks
var startTicks = 10;

// zoom function 
var zoom = d3.zoom()
  .on("zoom", (event) => {
  
    var t = event.transform;

    xScale
      .domain(t.rescaleX(xScale2).domain())
      .range([0, width].map(d => t.applyX(d))); 
      
    var zoomedRangeWidth = xScale.range()[1] - xScale.range()[0];
    var zrw = zoomedRangeWidth.toFixed(4);
    var kAppliedToWidth = kw = t.k * width;
    var kw = kAppliedToWidth.toFixed(4);
    var zoomTicks = zt = startTicks * Math.floor(t.k);
      
    svg.select(".x-axis")
      .call(d3.axisBottom(xScale)
        .ticks(zt) 
    );

    var realTicks = rt = xScale.ticks().length;
    console.log(`zrw: ${zrw}, kw: ${kw}, zt: ${zt}, rt: ${rt}`);
    console.log(`labels: ${xScale.ticks().map(xScale.tickFormat())}`);
    
  })
  .scaleExtent([1, 50]); 


// x scale
var xScale = d3.scaleTime()
  .domain(d3.extent(x))
  .range([0, width]); 

// x scale copy
var xScale2 = xScale.copy();

// svg
var svg = d3.select("#scale")
  .append("svg")
  .attr("width", width + margin.left + margin.right)
  .attr("height", height + margin.top + margin.bottom)
  .call(zoom) 
  .append("g")
  .attr("transform", `translate(${margin.left},${margin.top})`);

// clippath 
svg.append("defs").append("clipPath")
  .attr("id", "clip")
  .append("rect")
  .attr("x", 0)
  .attr("width", width)
  .attr("height", height);
    
// x-axis
svg.append("g")
  .attr("class", "x-axis")
  .attr("clip-path", "url(#clip)") 
  .attr("transform", "translate(0," + height + ")")
  .call(d3.axisBottom(xScale)
    .ticks(startTicks));
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/6.3.1/d3.min.js"></script>
<div id="scale"></div>


from Setting ticks on a time scale during zoom

Python JoinableQueue call task_done in other process need twice

I have implemented a WorkerManager based on multiprocessing.Process and JoinableQueue. While I try to handle the process exceptions like timeout or un-handle exceptions after proc.join(timeout), and evaluate proc.exitcode to determine how to handle, and then call in_queue.task_done() to notify the job has done with the exception-handle logic. However it need to invoke twice. I have no idea why it should be called twice. Is there anyone could figure it out the reason here.

The whole code snippet:

# -*- coding=utf-8 -*-

import time
import threading
from queue import Empty
from multiprocessing import Event, Process, JoinableQueue, cpu_count, current_process

TIMEOUT = 3


class WorkersManager(object):

    def __init__(self, jobs, processes_num):
        self._processes_num = processes_num if processes_num else cpu_count()
        self._workers_num = processes_num
        self._in_queue, self._run_queue, self._out_queue = JoinableQueue(), JoinableQueue(), JoinableQueue()
        self._spawned_procs = []
        self._total = 0
        self._stop_event = Event()
        self._jobs_on_procs = {}

        self._wk_kwargs = dict(
            in_queue=self._in_queue, run_queue=self._run_queue, out_queue=self._out_queue,
            stop_event=self._stop_event
        )

        self._in_stream = [j for j in jobs]
        self._out_stream = []
        self._total = len(self._in_stream)

    def run(self):
        # Spawn Worker
        worker_processes = [
            WorkerProcess(i, **self._wk_kwargs) for i in range(self._processes_num)
        ]
        self._spawned_procs = [
            Process(target=process.run, args=tuple())
            for process in worker_processes
        ]

        for p in self._spawned_procs:
            p.start()

        self._serve()

        monitor = threading.Thread(target=self._monitor, args=tuple())
        monitor.start()

        collector = threading.Thread(target=self._collect, args=tuple())
        collector.start()

        self._join_workers()
        # TODO: Terminiate threads
        monitor.join(TIMEOUT)
        collector.join(TIMEOUT)

        self._in_queue.join()
        self._out_queue.join()
        return self._out_stream

    def _join_workers(self):
        for p in self._spawned_procs:
            p.join(TIMEOUT)

            if p.is_alive():
                p.terminate()
                job = self._jobs_on_procs.get(p.name)
                print('Process TIMEOUT: {0} {1}'.format(p.name, job))
                result = {
                    "status": "failed"
                }

                self._out_queue.put(result)
                for _ in range(2):
                    # NOTE: Call task_done twice
                    # Guessing:
                    # 1st time to swtich process?
                    # 2nd time to notify task has done?
                    # TODO: figure it out why?
                    self._in_queue.task_done()
            else:
                if p.exitcode == 0:
                    print("{} exit with code:{}".format(p, p.exitcode))
                else:
                    job = self._jobs_on_procs.get(p.name)
                    if p.exitcode > 0:
                        print("{} with code:{} {}".format(p, p.exitcode, job))
                    else:
                        print("{} been killed with code:{} {}".format(p, p.exitcode, job))

                    result = {
                        "status": "failed"
                    }

                    self._out_queue.put(result)
                    for _ in range(2):
                        # NOTE: Call task_done twice
                        # Guessing:
                        # 1st time to swtich process?
                        # 2nd time to notify task has done?
                        # TODO: figure it out why?
                        self._in_queue.task_done()

    def _collect(self):
        # TODO: Spawn a collector proc
        while True:
            try:
                r = self._out_queue.get()
                self._out_stream.append(r)
                self._out_queue.task_done()

                if len(self._out_stream) >= self._total:
                    print("Total {} jobs done.".format(len(self._out_stream)))
                    self._stop_event.set()
                    break
            except Empty:
                continue

    def _serve(self):
        for job in self._in_stream:
            self._in_queue.put(job)

        for _ in range(self._workers_num):
            self._in_queue.put(None)

    def _monitor(self):
        running = 0
        while True:
            proc_name, job = self._run_queue.get()
            running += 1
            self._jobs_on_procs.update({proc_name: job})
            self._run_queue.task_done()
            if running == self._total:
                break


class WorkerProcess(object):

    def __init__(self, worker_id, in_queue, run_queue, out_queue, stop_event):
        self._worker_id = worker_id
        self._in_queue = in_queue
        self._run_queue = run_queue
        self._out_queue = out_queue
        self._stop_event = stop_event

    def run(self):
        self._work()
        print('worker - {} quit'.format(self._worker_id))

    def _work(self):
        print("worker - {0} start to work".format(self._worker_id))
        job = {}
        while not self._stop_event.is_set():
            try:
                job = self._in_queue.get(timeout=.01)
            except Empty:
                continue

            if not job:
                self._in_queue.task_done()
                break

            try:
                proc = current_process()
                self._run_queue.put((proc.name, job))
                r = self._run_job(job)
                self._out_queue.put(r)
            except Exception as err:
                print('Unhandle exception: {0}'.format(err), exc_info=True)
                result = {"status": 'failed'}
                self._out_queue.put(result)
            finally:
                self._in_queue.task_done()

    def _run_job(self, job):
        time.sleep(job)
        return {
            'status': 'succeed'
        }


def main():

    jobs = [3, 4, 5, 6, 7]
    procs_num = 3
    m = WorkersManager(jobs, procs_num)
    m.run()


if __name__ == "__main__":
    main()

And the issue code as following:

   self._out_queue.put(result)
                    for _ in range(2):
                        # ISSUE HERE !!!
                        # NOTE: Call task_done twice
                        # Guessing:
                        # 1st time to swtich process?
                        # 2nd time to notify task has done?
                        # TODO: figure it out why?
                        self._in_queue.task_done()

I need to invoke the self._in_queue.task_done() twice to notify the JoinableQueue the job has done by the exception-handle logic.

I guess whether task_done() call 1st time was to switch process context? or anything else. according to the testing. the 2nd task_done() has effect.

worker - 0 start to work
worker - 1 start to work
worker - 2 start to work

Process TIMEOUT: Process-1 5
Process TIMEOUT: Process-2 6
Process TIMEOUT: Process-3 7
Total 5 jobs done.

If you call task_done() once, and it will block forever and not to finish.



from Python JoinableQueue call task_done in other process need twice

Saturday, 30 January 2021

Halide non-contiguous memory layout

Is it possible to use non-c/fortran ordering in Halide? (where given dimensions x, y, c, x varies the fastest, then c varies the 2nd fastest (strides in numpy at least would be: .strides = (W*C, 1, W) Our memory layout is a stack of images where the channels of each image are stacked by scanline.

(Sorry if the layout still isn't clear enough, I can try to clarify). Using the python bindings, I always get ValueError: ndarray is not contiguous when trying to pass in my numpy array with .strides set.

I've tried changing the numpy array to use contiguous strides (without changing the memory layout) just to get it into Halide, then setting .set_stride in halide, but no luck. I'm just wanting to make sure I'm not trying to do something that can't/shouldn't be done.

I think this is similar to the line-by-line layout mentioned at https://halide-lang.org/tutorials/tutorial_lesson_16_rgb_generate.html, except more dimensions in C since the images are "stacked" along channel (to produce a W, H, C*image_count tensor)

Any advice would be much appreciated.

Thanks!



from Halide non-contiguous memory layout

How to remove a patterned background from an image and detect the objects?

I have an image that is a frame of a video. As you can see in the image 1 the background has a pattern that makes it challenging to detect the Lego objects. Based on my current code, the object edges are detected wrongly and messed up with the shapes of the background, the result in image 2 . The result with rectangles is image 3 .

1 2 3

import cv2
import numpy as np

main_image = cv2.imread('image.jpg', 1)
convert_to_gray = cv2.cvtColor(main_image, cv2.COLOR_BGR2GRAY)
convert_to_blurred = cv2.GaussianBlur(convert_to_gray, (3, 3), 2)
a, b = cv2.threshold(convert_to_blurred, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
canny_result = cv2.Canny(convert_to_blurred, a / 6, b / 3)
k = np.ones((2, 2), np.uint8)
d = cv2.dilate(canny_result, k, iterations=3)

contours_found = cv2.findContours(d, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours_found = contours_found[0] if len(contours_found) == 2 else contours_found[1]

for cont in contours_found:
    x, y, w, h = cv2.boundingRect(cont)
    cv2.rectangle(main_image, (x, y), (x + w, y + h), (0, 0, 255), 3)

cv2.imshow('canny_result', canny_result)
cv2.imshow('main_image', main_image)
cv2.waitKey(0)

What to do to detect the objects correctly?



from How to remove a patterned background from an image and detect the objects?

Firebase: Send OTP but disable sign in using phone number

I am working on a simple registration. I planned to use email to create an account, the problem is upon submission of form, the phone number also signed in as new user.

How can I prevent that from happening? What I only need to use is the send otp feature.

      sendOTP() {
        var self = this
        const phoneNumber = '+' + this.phone;
        const appVerifier = window.recaptchaVerifier;
        firebase.auth().signInWithPhoneNumber(phoneNumber, appVerifier)
          .then((confirmationResult) => {
            window.confirmationResult = confirmationResult;
            self.otpSent = true
          }).catch((error) => {
            self.errorMsg = error.code + ': ' + error.message;
          });
      },
      register() {
        var self = this
        const code = this.otp;
        window.confirmationResult.confirm(code).then((result) => {
          console.log(result)
          firebase.auth().createUserWithEmailAndPassword(this.email, this.password)
            .then((userCredential) => {
              firebase.database().ref('users').set({
                email: this.email,
                name: this.name,
                password: this.password,
                phone: this.phone,
                userCredential: userCredential
              }, (error) => {
                if (error) {
                  // The write failed...
                  self.errorMsg = error.message
                } else {
                  self.$router.push('/dashboard')

                }
              });
            })
            .catch((error) => {
              self.errorMsg = error.message
            });

        }).catch((error) => {
          self.errorMsg = error.message;
        });
      }


from Firebase: Send OTP but disable sign in using phone number

JSON serialized object gives error with multiprocessing calls - TypeError: XXX objects not callable error

I am using JSON serializer helper function to easy access of dictionary(basically received as JSON) objects.

jsondict.py

"""Utilities for working with JSON and json-like structures - deeply nested Python dicts and lists

This lets us iterate over child nodes and access elements with a dot-notation.
"""
import sys
isPy3 = sys.version_info[0]==3
if isPy3:
    def __alt_str__(v,enc='utf8'):
        return v if isinstance(v,bytes) else v.encode(enc)
    __strTypes__ = (str,bytes)
else:
    __alt_str__ = unicode
    __strTypes__ = (str,unicode)

class MyLocals(object):
    pass
mylocals = MyLocals()

def setErrorCollect(collect):
    mylocals.error_collect = collect

setErrorCollect(False)

def errorValue(x):
    if isinstance(x,__strTypes__):
         return repr(x) if ' ' in x else x
    return 'None' if x is None else str(x)
def condJSON(v,__name__=''):
    return JSONDict(v,__name__=__name__) if isinstance(v,dict) else JSONList(v,__name__=__name__) if isinstance(v,list) else v

def condJSONSafe(v,__name__=''):
    return JSONDictSafe(v,__name__=__name__) if isinstance(v,dict) else JSONListSafe(v,__name__=__name__) if isinstance(v,list) else v

class JSONListIter(object):
    def __init__(self, lst, conv):
        self.lst = lst
        self.i = -1
        self.conv = conv

    def __iter__(self):
        return self

    def next(self):
        if self.i<len(self.lst)-1:
            self.i += 1         
            return self.conv(self.lst[self.i])
        else:
            raise StopIteration

    if isPy3:
        __next__ = next
        del next

class JSONList(list):
    def __init__(self,v,__name__=''):
        list.__init__(self,v)
        self.__name__ = __name__
    def __getitem__(self,x):
        return condJSON(list.__getitem__(self,x),__name__='%s\t%s'%(self.__name__,errorValue(x)))
    def __iter__(self):
        return JSONListIter(self,condJSON)

class JSONListSafe(JSONList):
    def __getitem__(self,x):
        __name__='%s\t%s'%(self.__name__,errorValue(x))
        try:
            return condJSONSafe(list.__getitem__(self,x),__name__=__name__)
        except:
            if mylocals.error_collect:
                mylocals.error_collect(__name__)
            return JSONStrSafe('')
    def __iter__(self):
        return JSONListIter(self,condJSONSafe)

class JSONStrSafe(str):
    def __getattr__(self, attr):
        return self
    __getitem__ = __getattr__


class JSONDict(dict):
    "Allows dotted access"
    def __new__(cls,*args,**kwds):
        __name__ = kwds.pop('__name__')
        self = dict.__new__(cls,*args,**kwds)
        self.__name__ = __name__
        return self

    def __init__(self,*args,**kwds):
        kwds.pop('__name__','')
        dict.__init__(self,*args,**kwds)

    def __getattr__(self, attr, default=None):
        if attr in self:
            return condJSON(self[attr],__name__='%s\t%s'%(self.__name__,errorValue(attr)))
        elif __alt_str__(attr) in self:
            return condJSON(self[__alt_str__(attr)],__name__='%s\t%s'%(self.__name__,errorValue(attr)))
        elif attr=='__safe__':
            return JSONDictSafe(self,__name__=self.__name__)
        else:
            raise AttributeError("No attribute or key named '%s'" % attr)

    def sorted_items(self,accept=None, reject=lambda i: i[0]=='__name__'):
        if accept or reject:
            if not accept:
                f = lambda i: not reject(i)
            elif not reject:
                f = accept
            else: #both
                f = lambda i: accept(i) and not reject(i)
            return sorted(((k,condJSON(v,__name__==k)) for k,v in self.iteritems() if f((k,v))))
        else:
            return sorted(((k,condJSON(v,__name__==k)) for k,v in self.iteritems()))

    def sorted_keys(self):
        return sorted(self.keys())

class JSONDictSafe(JSONDict):
    "Allows dotted access"
    def __getattr__(self, attr, default=None):
        if attr in self:
            return condJSONSafe(self[attr],__name__='%s\t%s'%(self.__name__,errorValue(attr)))
        elif __alt_str__(attr) in self:
            return condJSONSafe(self[__alt_str__(attr)],__name__='%s\t%s'%(self.__name__,errorValue(attr)))
        elif attr=='__safe__':
            return self
        else:
            return JSONStrSafe('')

    def __getitem__(self,x):
        __name__='%s\t%s'%(self.__name__,errorValue(x))
        try:
            return condJSONSafe(dict.__getitem__(self,x),__name__=__name__)
        except KeyError:
            if mylocals.error_collect:
                mylocals.error_collect(__name__)
            return JSONStrSafe('')

    def sorted_items(self,accept=None, reject=lambda i: i[0]=='__name__'):
        if accept or reject:
            if not accept:
                f = lambda i: not reject(i)
            elif not reject:
                f = accept
            else: #both
                f = lambda i: accept(i) and not reject(i)
            return sorted(((k,condJSONSafe(v,__name__==k)) for k,v in self.iteritems() if f((k,v))))
        else:
            return sorted(((k,condJSONSafe(v,__name__==k)) for k,v in self.iteritems()))

If JSON object passed like below.

data = {'name': 'john', 'age': 20, 'address': {'city':'xyz', 'country':'XZ', 'zip': 1223}}

json_obj = condJSONSafe(data)

I am able to access data with dot notation.

print(json_obj.name) --> john
print(json_obj.address.country) --> XZ

It was working well until I implementing multiprocessing in my code to improve the performance.

I have extracted a certain number of data from JSON (after made it as dot notation accessible data with the above helper function) and store it into separate lists, like list a,b,c.

And then, I passed into multiprocessing threads,

with mp.Pool(processes=mp.cpu_count()) as pool:
    res = pool.starmap(self.process_records, zip(self.a, self.b, self.c))
pool.join()

end up with

TypeError: 'JSONStrSafe' object is not callable

I tried this answer, but it does not work for me. Appreciate your help. Thanks in advance.

EDIT: reproduce example:

test.py

import jsondict
import multiprocessing as mp
import itertools

def process_records(data, metadata):
    print(data.name)
    print(metadata)
    #code to requirment


if __name__ == '__main__':
    data = {
        "metadata": "test_data",
        "cust_list": [
            {
                'name': 'john', 
                'age': 20, 
                'address': {
                    'city':'xyz', 
                    'country':'XZ', 
                    'zip': 1223
                }
            },
                {
                'name': 'michal', 
                'age': 25, 
                'address': {
                    'city':'abc', 
                    'country':'CX', 
                    'zip': 3435
                }
            },
                {
                'name': 'david', 
                'age': 30, 
                'address': {
                    'city':'mnl', 
                    'country':'TD', 
                    'zip': 6767
                }
            }
        ]
    }

    json_obj = jsondict.condJSONSafe(data)

    print(json_obj.metadata) #will print 'test_data'
    print(json_obj.cust_list[0].name) #will print 'john'
    print(json_obj.cust_list[2].address.city) #will print 'mnl'


    with mp.Pool(processes=mp.cpu_count()) as pool:
        res = pool.starmap(process_records, zip(json_obj.cust_list, itertools.repeat(json_obj.metadata))) # --> not working
        #res = pool.map(process_records, zip(json_obj.cust_list, itertools.repeat(json_obj.metadata))) --> not working
        #res = [pool.apply_async(process_records, d, json_obj.metadata) for d in json_obj.cust_list] --> not working
        #apply --> not working
    pool.join()

Output:

test_data
john
mnl
Traceback (most recent call last):
  File "c:/Users/mohanlal/Desktop/Mock/json_err/test_app.py", line 53, in <module>
    res = pool.starmap(process_records, zip(json_obj.cust_list, itertools.repeat(json_obj.metadata))) # --> not working
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 268, in starmap
    return self._map_async(func, iterable, starmapstar, chunksize).get()
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 608, in get
    raise self._value
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 385, in _handle_tasks
    put(task)
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\connection.py", line 206, in send
    self._send_bytes(_ForkingPickler.dumps(obj))
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 51, in dumps
    cls(buf, protocol).dump(obj)
TypeError: 'JSONStrSafe' object is not callable

Tried with startmap, map, apply_async, apply, getting the same error for all.

I have tried with solution given in similar question attached link above. Modified as below where this error raised.

import re
dunder_pattern = re.compile("__.*__")
protected_pattern = re.compile("_.*")

classJSONStrSafe(str):
    def__getattr__(self, attr):
        if dunder_pattern.match(attr) or protected_pattern.match(attr):
            return super().__getattr__(attr)
        return self
def__getstate__(self): returnself.__dict__
def__setstate__(self, d): self.__dict__.update(d)

__getitem__ = __getattr__

But issue persists.

As suggested in the comments, I changed in all 3 places for getattr and tried. Getting different error as below

Process SpawnPoolWorker-1:
Traceback (most recent call last):
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 249, in _bootstrap
    self.run()
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 93, in run
    self._target(*self._args, **self._kwargs)
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 108, in worker
    task = get()
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\queues.py", line 345, in get
    return _ForkingPickler.loads(res)
  File "c:\Users\mohanlal\Desktop\Mock\json_err\jsondict.py", line 89, in __new__
    __name__ = kwds.pop('__name__')
Process SpawnPoolWorker-2:
Process SpawnPoolWorker-4:
Traceback (most recent call last):
Traceback (most recent call last):
KeyError: '__name__'
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 249, in _bootstrap
    self.run()
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 93, in run
    self._target(*self._args, **self._kwargs)
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 108, in worker
    task = get()
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\queues.py", line 345, in get
    return _ForkingPickler.loads(res)
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 249, in _bootstrap
    self.run()
  File "c:\Users\mohanlal\Desktop\Mock\json_err\jsondict.py", line 89, in __new__
    __name__ = kwds.pop('__name__')
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 93, in run
    self._target(*self._args, **self._kwargs)
KeyError: '__name__'
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\pool.py", line 108, in worker
    task = get()
  File "C:\Users\mohanlal\AppData\Local\Programs\Python\Python36\lib\multiprocessing\queues.py", line 345, in get
    return _ForkingPickler.loads(res)
  File "c:\Users\mohanlal\Desktop\Mock\json_err\jsondict.py", line 89, in __new__
    __name__ = kwds.pop('__name__')
KeyError: '__name__'


from JSON serialized object gives error with multiprocessing calls - TypeError: XXX objects not callable error

newline='' doesn't seem to work for me alone

First off, this is annoying question and also I request everyone reading this to read fully. This is an issue faced by me alone not by anyone else.

I've a csv file as follows:

1,a
2,b
3,c
4,d

I want to modify this file using Python 3.7.2 in my Windows 7.

import csv
row = [5,'e']
with open('modify_existing.csv')as readFile:
    reader= csv.reader(readFile)
    lines = list(reader)
    lines[3] = row
with open('modify_existing.csv','w',newline='') as writeFile:
    writer = csv.writer(writeFile)
    writer.writerows(lines)
print(lines)

I can assure you that except me, for all others the output is correct I don't know why my output is wrong. the newline parameter doesn't seem to work at all.

And the annoying thing, the output I get is not consistent. I get blank lines when executing for the first time and three blank lines when executing second time. Sometime, I didn't get blank lines at all. And furthermore, I get blank lines at the first time and not the second time.

I am actually fed up with this error. I don't know where to ask this. Sorry if I am asking something that doesn't happen for you. Even for me, in my other laptop having the same Windows 7 (64 bit) with the same Python 3.7.2, no blank lines appear.

But only in my laptop it occurs. For instance, I completely deleted the Python and installed 3.7.2 again. Same result.

Could anyone please help me with this issue?

EDIT : When I print that readfile, I get the correct output. When there are blank lines in my csv file (first or second time when it appears). I can assure you that the mistake in writing the csv file



from newline='' doesn't seem to work for me alone

Session cookie not deleted on IOS tablet and smartphone

Using node.js express library to clear the cookie on logout

app.post('/logout', (req, res, next) => {
// cookie-session library used hence req.session = null is valid
            req.session = null;
            res.clearCookie('auth');
            res.end();
    });

Works perfectly on all desktop machines, Android devices but not on IOS device browsers (Firefox, Chrome, Safari, among others). All IOS testing devices are version 14+

Tried available options for clearCookie res.clearCookie function doesn't delete cookies to no avail.

However, using incognito mode on IOS devices works meaning the cookie gets cleared, also closing the browser (which clears the session) works.

When setting the cookie (login) I use the default

res.cookie('auth')

Does anyone have a clue to what's going on?



from Session cookie not deleted on IOS tablet and smartphone

cross origin for amazon lambda function from localhost in gatsby site

I have the following code which works when I run it as a local serverless function with netlify dev, but I need it to run cross origin from a dev server to the hosted server function. I put the function in a aws lambda function but I am getting a cross origin blocked error on my https:dev.website.com, I thought I have the correct headers in the return object so not sure why I am getting a cross origin error.

Any help would be great

const sanityClient = require("@sanity/client");
const client = sanityClient({
  projectId: "random-id",
  dataset: "production",
  useCdn: true,
});

exports.lambdaHandler = async (event, context) => {
  var body = JSON.parse(event.body);

  //console.log(body.price_id)

  try {
    const checkPriceId = async (test) => {
      const query = `*[_type == "products" && price_id == "${body.price_id}"]`;

      const documents = await client.fetch(query, {}); // this could throw

      return documents.map((document) => document.sold);
    };

    var ok = checkPriceId().then((test) => {
      return new Promise(function (resolve, reject) {
        //console.log(test) // this will log the return value from line 7
        console.log(test);
        resolve(test);
      });
    });

    var bools = await ok;
    // prettier-ignore

    return {
      statusCode: 200,
      headers: {
        'Access-Control-Allow-Origin': '*',
        'Access-Control-Allow-Headers': 'Content-Type',
        'Access-Control-Allow-Methods':'GET, POST, OPTION',
      },
      body: JSON.stringify({
        sold: bools,
      }),
    };
  } catch (err) {
    return { statusCode: 500, body: err.toString() };
  }
};

This is my request to the function if that helps

 var fetchUrl = https://random.executue-api.aws.com/prod/sold //not exact 

 var fetchData = async function () {
    const response = await fetch(fetchUrl, {
      method: "post",
      headers: {
        "Content-Type": "application/json",
      },
      body: JSON.stringify({
        price_id: final,
      }),
    })
      .then(res => {
        return res.json()
      })
      .catch(error => console.log(error))

    return response
  }

Update:

I tried adding cors the way suggested in the answer below, but it failed seen below so I tried manually adding the method response seen after.

I still get a cross domain error. And I have changed the domain so it is now https as well. Really stuck here.

cors failing

manual cors addition

I was looking into this more, and it seems like before it does the actual post it does a cors check at the options method, so I added in the same access control headers, and deployed but did not work. Don't quite get this.

enter image description here



from cross origin for amazon lambda function from localhost in gatsby site

Can I use MediaRecorder to record audio with capture silence detection in Android?

I use MediaRecorder to record audio, is it possible to detect the silence and stop recording voice when the user hasn't spoken for some seconds and record again automatically when the user recover to speak?



from Can I use MediaRecorder to record audio with capture silence detection in Android?

How to convert a "raw" DOM Event to a React SyntheticEvent?

I use two libraries, one library which emits "raw" DOM events (lib.dom.d.ts), and one other library which consumes React.SyntheticEvents.

What is the best way to cleanly transform the raw event into a SyntheticEvent?



from How to convert a "raw" DOM Event to a React SyntheticEvent?

Friday, 29 January 2021

How have FreeCAD screwed inheritancy from a python class?

I would like to inherit from Vector, a FreeCAD python class.

In the following test code, I either use my own Vector class or import the Vector class from FreeCAD. It works fine with my own Vector class, but prints a vector of zeroes and then crashes with - 'Base.Vector' has no attribute 'extra', implying the constructor of ExtraVector has been bypassed and ExtraVector is just a Vector, when using the FreeCAD Vector.

(I suspect that the FreeCAD vector is a wrapper around C code)

  1. What have FreeCAD done? How do you mess with a python class to give it this behavior?

  2. How do I get around it? Inheritance is the best solution to my problem. Composition means writing a lot more code, and I don't like WET.

# ---------------------------------------------------

# Vector is either defined in this file or imported
# (comment out to swap over)

#from FreeCAD import Vector  # Probably a wrapper around C code

class Vector :  
  def __init__ ( self, x, y, z ) :
    self.x = x
    self.y = y
    self.z = z

# ---------------------------------------------------

class ExtraVector ( Vector ) :
  def __init__ ( self, x, y, z, extra ) :
    super().__init__ ( x, y, z )
    self.extra = extra

def Test ( ) :

  evect = ExtraVector ( 1, 2, 3, 4 )

  print ( "x = ", evect.x,  " y = ", evect.y, " z = ", evect.z )
  print ( "e = ", evect.extra )

Test()

When my Vector class is used (FreeCAD Vector commented out), output is :-

x =  1  y =  2  z =  3
e =  4

When FreeCAD Vector is used (my Vector commented out), output is:-

x =  0.0  y =  0.0  z =  0.0
Traceback (most recent call last):
  File "ExtraVector.py", line 28, in <module>
    Test()
  File "ExtraVector.py", line 26, in Test
    print ( "e = ", evect.extra )
<class 'AttributeError'>: 'Base.Vector' object has no attribute 'extra'



from How have FreeCAD screwed inheritancy from a python class?

Replacing destination using NavGraph (transaction when using app:popUpTo)

We need to implement a state-machine driven navigation, where the currently shown screen depends on current state. As such, our approach so far has been to use global actions and navigate to the right screen whenever the state changes. On first look, everything seemed to work great, however, we've started noticing some weird behaviours when using the app:popUpTo attribute on the actions.

In our concrete case, we have 2 sub nav graphs (let's call them NG1 and NG2), where NG2 is state-machine driven. Let's say we're in screen A in NG1 and then we navigate to NG2, which adds screen B. We now have 2 options:

  1. If we navigate to screen C (in NG2) using an action with app:popUpTo="@+id/NG2" (effectively aiming to replace B with C), then, sometimes, Fragment A briefly starts (onStart) for a few tens of milliseconds and then stops, before starting C. So instead of B -> C, we get B -> A -> C. It seems that an action with a popUpTo, is not transactional.
  2. If we navigate to screen C (in NG2) using an action without app:popUpTo="@+id/NG2", screen A does not start, but B is left in the back stack. As such, if screen B needs to be shown again, we end up with a backstack looking like A / B / C / B, which is not what we're aiming for.

Of course, the behaviour we're looking for is the one attempted in 1, where we can move from a screen X to a screen Y to a screen Z, with popping Y, but not briefly starting X. Is there anything we're doing wrong or is it a bug in the android-navigation component?


I'm also adding some logs, showing actual logs that I've just replicated (timestamps are actual):

// Here we navigate from A to B
2020-03-09 19:14:14.999 BaseAppNavigator: Navigating to directions: com.example.app:id/toNavGraph2
2020-03-09 19:14:15.006 BaseAppNavigator.OnDestinationChangedListener: Navigation destination changed: com.example.app:id/fragmentB
2020-03-09 19:14:15.007 BaseAppNavigator.OnDestinationChangedListener: Active navigation graph changed to: com.example.app:id/navGraph2
2020-03-09 19:14:15.235 BaseViewModelFragment: OnStart: FragmentB
2020-03-09 19:14:15.247 BaseViewModelFragment: OnStop: FragmentA

// Here we trigger navigate from B to C, with a destination with popUpTo="navGraph2"
2020-03-09 19:14:56.422 BaseAppNavigator: Navigating to directions: com.example.app:id/toFragmentC
2020-03-09 19:14:56.430 BaseAppNavigator.OnDestinationChangedListener: Navigation destination changed: com.example.app:id/fragmentC
2020-03-09 19:14:56.516 BaseViewModelFragment: OnStart: FragmentA
2020-03-09 19:14:56.529 BaseViewModelFragment: OnStop: FragmentB
2020-03-09 19:14:56.537 BaseViewModelFragment: OnDestroy: FragmentB
2020-03-09 19:14:56.580 BaseViewModelFragment: OnStart: FragmentC
2020-03-09 19:14:56.613 BaseViewModelFragment: OnStop: FragmentA

with the following action:

<action
      android:id="@+id/toFragmentC"
      app:destination="@id/fragmentC"
      app:popUpTo="@+id/NavGraph2" />


from Replacing destination using NavGraph (transaction when using app:popUpTo)

react native fetch not getting the same content as post man

Im having a little problem with my request on getting an html from https://readnovelfull.com/beauty-and-the-beast-wolf-hubby-xoxo/chapter-1-i-would-not-be-responsible.html as example.

I can get all the html on the other url eg novel detalj, latest upgated etc.

but not when im getting the detali for the chapters.

I tested those url on postman and also on https://codebeautify.org/source-code-viewer as well and there is no problem on getting the content of the chapter of which it exist under the div #chr-content

So I am a bit lost now, what am I doing wrong?

Here is my fetch calls which is working on other novel sites.

  static async getHtml(
    url: string
  ): Promise<HTMLDivElement> {
    console.log(`Sending html request to ${url}`);
    var container = parse('<div>test</div>') as any;
    try {
      let headers = new Headers({
        Accept: '*/*',
        'User-Agent':
          'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
      });

      var data = await fetch(url, {
        method: 'GET',
        headers: headers,
      });
      if (!data.ok) {
        const message = `An error has occured:${data.status}`;
        console.log(message);
      } else {
        var html = await data.text();
        console.log('Data is ok. proceed to parse it');
        container = parse('<div>' + html + '</div>') as any;
      }
    } catch (e) {
      console.log(e);
    }
    return container as HTMLDivElement;
  }

I should mention that am not getting any error what so ever, its just that the html I am getting is not the same as postman and other site is getting.

Update

Ok so i did some research on the site and this is what i come up with.

the site need X-CSRF-TOKEN and i was able to extract those and find those values

const csrf = 'x09Q6KGqJOJJx2iHwNQUa_mYfG4neV9EOOMsUBKTItKfNjSc0thQzwf2HvCR7SQCqfIpC2ogPj18jG4dQPgVtQ==';
const id = 774791;

which i need to send a request to https://readnovelfull.com/ajax/increase-chapter-views with the values above. and this will send back true/false

now i tried to inc the csrf on my fetch call after but its still the same old same no data.

any idee if i am doing something wrong still?



from react native fetch not getting the same content as post man

How can I prevent WifiManager.getConnectionInfo from triggering a location check?

When my app has location permissions on Android and I want to get the IP of the phone, calling WifiManager.getConnectionInfo is triggering a location event on AppOpsManager. Since Google says my app is using the location in the background, I think this is one of the reasons why. My app needs the IP of the phone for communication with other devices and it starts getting that information on Application.onCreate(). I can probably move the code to get the ip to be called later (not onCreate), but it isn't a simple task, will probably involve a serious rewrite.

I do have other ways of getting the IP but given the variety of devices out there, this is one of the more reliable ways.

Is there a solution for this issue?

Edit: This issue just became a bigger problem for me because a third party library I require apparently makes a called to WifiManager after the app goes on the background as well.



from How can I prevent WifiManager.getConnectionInfo from triggering a location check?

Token Do Not Work On Fetch Request To PHP File

I have two files: one is exposing a session-Token, the other one is answering a javascript-fetch. File one contains:

<?php
session_start();
unset($_SESSION['sessionToken']);
$_SESSION['sessionToken'] = vsprintf('%s%s-%s-%s-%s-%s%s%s', str_split(bin2hex(random_bytes(16)), 4));
?><!DOCTYPE HTML>
<html>
...
    <meta content="<?php echo $_SESSION['sessionToken'] ?? '12345'; ?>" name="csrf-token" />

and further on I make in the same file a fetch request like this:

fetch('src/exposeDelivery.php', {      
           //mode: 'same-origin',
           credentials: 'same-origin',     //'same-origin'    'omit'     'include
           method: 'POST',
           body: JSON.stringify( jsonArr ),
           headers: {
            'x-csrf-token':  document.head.querySelector('meta[name="csrf-token"]').content,
            "Content-Type": "application/json",
            "Accept":       "application/json"
           }
      })
        .then(response => {
          if (!response.ok) {
            console.log("response: %s  | %o",response.statusText,response);
            throw new Error('Network response was not ok');
          }
          return response.json();
        })

The fetch-request runs in an interval like let ask = setInterval(makeRequest, 20000);

My second file, where the request goes to, looks like this:

    <?php
    session_start();
    header('Access-Control-Allow-Origin: *');
    header('Access-Control-Allow-Methods: GET, POST');
    header('Access-Control-Allow-Headers: Content-Type, X-Requested-With, x-csrf-token');
    $csrf = isset($_SERVER["HTTP_X_CSRF_TOKEN"])
          ? trim($_SERVER["HTTP_X_CSRF_TOKEN"])
          : 0;
    $response['t_token']= $csrf;
    $response['sessionToken'] = $_SESSION['sessionToken'] ? $_SESSION['sessionToken'] : "noSessionToken";

    header('HTTP/1.0 200 OK');
    header('Content-Type: application/json');

    echo json_encode($response);

Now I would like to check if $csrf == $_SESSION['sessionToken']. This is the first time I call the fetch-request true. But the second time the request is called, it is differend. What is wrong here? Is File one with the fetch-request calling itself on every request? Can I solve it with maybe another request?



from Token Do Not Work On Fetch Request To PHP File

How to remove style attribute added with jquery

I am using a devExpress table with some custom requirements.

SCREENSHOT

In the screenshot I have certain cells disabled. However the user wants all cells to look disabled other that the row selected.

Using this

   window
  .$("td")
  .not(document.getElementById(this.state.selection[0]))
  .not(document.getElementsByClassName(this.state.selection[0]))
  .not("td:first-child")
  .not(window.$("td:contains('iPlay')"))
  .not(window.$("td:contains('iLOE')"))
  .not(window.$("td:contains('iInvest')"))
  .not(window.$("td:contains('SPACER')"))
  .not(window.$("td:contains('$MM')"))
  .not(window.$("td:contains('$/BOE')"))
  .attr("style", "color:#868a8f");
window
  .$("td > div > div > div > input")
  .not(document.getElementsByClassName(this.state.selection[0]))
  .attr("style", "color:#868a8f");

I managed to achieve my desired result on page load SCREENSHOT2

My problem is when I select a new row I cannot remove that color I applied before when it was not selected. I am trying to use "has" to find the selected row and change the color back to inherit or completely remove the style attribute.

    window
  .$("td")
  .has(document.getElementById(this.state.selection[0]))
  .has(document.getElementsByClassName(this.state.selection[0]))
  .not("td:first-child")
  .not(window.$("td:contains('iPlay')"))
  .not(window.$("td:contains('iLOE')"))
  .not(window.$("td:contains('iInvest')"))
  .not(window.$("td:contains('SPACER')"))
  .not(window.$("td:contains('$MM')"))
  .not(window.$("td:contains('$/BOE')"))
  .attr("style", "color:inherit");
window
  .$("td > div > div > div > input")
  .has(document.getElementsByClassName(this.state.selection[0]))
  .attr("style", "color:inherit");

If it helps I do have the ids of the rows that are NOT selected. I tried to do something with that but did not have any luck

  const otherRows = ExpensesUtils.ROW_PROPS.filter(x => x !== this.state.selection[0]);
for (let i = 0; i < otherRows.length; i += 1) {
  window
  .$("td")
  .has(document.getElementById(otherRows[i]))
  .has(document.getElementsByClassName(otherRows[i]))
  .attr("style", "color:inherit");
  window
  .$("td > div > div > div > input")
  .has(document.getElementById(otherRows[i]))
  .has(document.getElementsByClassName(otherRows[i]))
  .attr("style", "color:inherit");
}

link to HTML Table HTML

this.state.selection[0] is the selected rowId from the list below

I have applied the the rowIds to classes in the nested components. I could not figure out another way to access them.

  const ROW_PROPS = [
  "leaseAndWellExpense",
  "leaseAndWellExpenseBoe",
  "iloeLeaseAndWellExpense",
  "iloeLeaseAndWellExpenseBoe",
  "gnaLeaseAndWell",
  "gnaLeaseAndWellBoe",
  "transportation",
  "transportationBoe",
  "divisionGnA",
  "divisionGnABoe",
  "gatheringProcessing",
  "gatheringProcessingBoe",
  "hqGnA",
  "hqGnABoe",
  "interestExpense",
  "interestExpenseBoe",
  "netProdBoe",
  "leaseImpairments",
  "leaseImpairmentsBoe",
  "ddaProducing",
  "ddaProducingBoe",
  "iInvestDdaProducing",
  "iInvestDdaProducingBoe",
  "ddaGatheringProcessing",
  "ddaGatheringProcessingBoe",
  "iInvestDdaGatheringProcessing",
  "iInvestDdaGatheringProcessingBoe",
  "marketingCosts",
  "otherIncomeExpense",
  "otherIncomeExpenseBoe",
  "otherRevenue",
  "incomeTaxProvision",
  "incomeTaxProvisionBoe",
  "severanceTaxes",
  "severanceTaxesPercent",
  "currentTaxes",
  "currentTaxesRate",
  "netWellHeadRevenue",
];


from How to remove style attribute added with jquery

Can I apply CSS to a flex-item when it wraps onto a new row?

.wrapper {
  border: 5px solid pink;
  display: flex;
  flex-wrap: wrap;
  justify-content: center;
}

.a-fc {
  background-color: purple;
  width: 300px;
  /*height: 100px;*/
}

.b-fc {
    background-color: orange;
    display: flex;
    flex-direction: column;
    /*flex-wrap: wrap;*/
    flex-basis:70px;
    flex-grow:1;
}

.b-fc > * {
  flex-grow: 1;
  flex-basis: 100px;
}

.b-fc > *:nth-child(1) {
  background-color: red;
}

.b-fc > *:nth-child(2) {
  background-color: blue;
}

.b-fc > *:nth-child(3) {
  background-color: green;
}
<div class="wrapper">
  <div class="a-fc">
   <div>a1</div>
  </div>
  <div class="b-fc">
  <div>b1</div><div>b2</div><div>b3</div>
  </div>
</div>

FC = flex-container. FI = flex-item.

I am able to place .b-fc onto a new row when the space left for it to exist on the original row goes below 70px.

My task: I want b-fc's FIs to stack vertically when no new row is created/they don't wrap. I want b-fc's FIs to align horizontally when b-fc wraps.


Current solution

In the code-snippet above, I've tried to achieve my task by writing one set of properties that work for both scenarios by setting a `flex-basis` on `.b-fc`'s FIs. If the space left for `.b-fc`'s FIs is less than this flex-basis (100px), the FIs will stack vertically. The weakness: i) if `.b-fc`'s `width`'s larger than 300px, its FIs align horizontally ii) When `.b-fc` wraps, its FIs wrap when `.bf-c` is less than 300px.

Therefore, I'm figuring it'd be more powerful to be able to apply CSS when .b-fc wraps. Is this possible?

*Idea 1: CSS variables & JS*

Perhaps using CSS variables/SASS I could continually assess whether FC - .a-fc <= than 70px. If true, apply stylings to .b-fc.

Idea 2: media-queries

Another option is to test when row2 is made, use media queries to capture this and apply CSS to .b-fc with media queries.


P.S. Similar question has been asked here before in 2015. Maybe new techniques have transpired since.



from Can I apply CSS to a flex-item when it wraps onto a new row?

TypeError: (data || []).forEach is not a function

I'm trying to render the data using ant design table but it doesn't work. I think it's because of the object key "children" in my response.

When I run my code I get the error: TypeError: (data || []).forEach is not a function

I would also like to note that I have uploaded csv file data without "children" column and it works perfectly.

My response:

enter image description here

import React, { useState } from "react";
import { parse } from "papaparse";
import _ from "lodash";
import { Upload, message, Button, Table, Input } from "antd";
import { UploadOutlined } from "@ant-design/icons";

export default function Home() {

  const [columns, setColumn] = useState([]);
  const [baseData, setBaseData] = useState([]);
  const [filterTable, setFilterTable] = useState(null);

  const props = {
    name: "file",
    accept: ".txt, .csv",
    headers: {
      authorization: "authorization-text",
    },
    async onChange(info) {
      if (info.file.status !== "uploading") {
        console.log(info.file, info.fileList);
      }
      if (info.file.status === "done") {
        const texts = await info.file.originFileObj.text();
        const results = parse(texts, {
          header: true
        });

        const col = _.keys(results.data[0]);

        const customCol = _.map(col, (value) => ({
          title: value,
          dataIndex: value,
          key: value.toLowerCase(),
        }));

        const data = results.data;
        
        console.log({ customCol });
        console.log({ data });

        setColumn(customCol);
        setBaseData(data);

        message.success(`${info.file.name} file uploaded successfully`);
      } else if (info.file.status === "error") {
        message.error(`${info.file.name} file upload failed.`);
      }
    },
  };

  return (
    <div>
      <main>
        <Upload {...props}>
          <Button icon={<UploadOutlined />}>Click to Upload</Button>
        </Upload>

        <Table pagination={false} columns={columns} dataSource={filterTable == null ? baseData : filterTable} />
        
      </main>
    </div>
  );
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/react/16.6.3/umd/react.production.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/react-dom/16.6.3/umd/react-dom.production.min.js"></script>

Here's the table props:

export interface TableProps < RecordType > extends Omit < RcTableProps < RecordType > , 'transformColumns' | 'internalHooks' | 'internalRefs' | 'data' | 'columns' | 'scroll' | 'emptyText' > {
  dropdownPrefixCls ? : string;
  dataSource ? : RcTableProps < RecordType > ['data'];
  columns ? : ColumnsType < RecordType > ;
  pagination ? : false | TablePaginationConfig;
  loading ? : boolean | SpinProps;
  size ? : SizeType;
  bordered ? : boolean;
  locale ? : TableLocale;
  onChange ? : (pagination: TablePaginationConfig, filters: Record < string, (Key | boolean)[] | null > , sorter: SorterResult < RecordType > | SorterResult < RecordType > [], extra: TableCurrentDataSource < RecordType > ) => void;
  rowSelection ? : TableRowSelection < RecordType > ;
  getPopupContainer ? : GetPopupContainer;
  scroll ? : RcTableProps < RecordType > ['scroll'] & {
    scrollToFirstRowOnChange ? : boolean;
  };
  sortDirections ? : SortOrder[];
  showSorterTooltip ? : boolean;
}

export interface TableProps < RecordType = unknown > extends LegacyExpandableProps < RecordType > {
  prefixCls ? : string;
  className ? : string;
  style ? : React.CSSProperties;
  children ? : React.ReactNode;
  data ? : RecordType[];
  columns ? : ColumnsType < RecordType > ;
  rowKey ? : string | GetRowKey < RecordType > ;
}

What could be the problem?



from TypeError: (data || []).forEach is not a function